rculfhash: stress requirement in documentation
[urcu.git] / urcu / uatomic / ppc.h
1 #ifndef _URCU_ARCH_UATOMIC_PPC_H
2 #define _URCU_ARCH_UATOMIC_PPC_H
3
4 /*
5 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
6 * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
7 * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
8 * Copyright (c) 2009 Mathieu Desnoyers
9 *
10 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
11 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
12 *
13 * Permission is hereby granted to use or copy this program
14 * for any purpose, provided the above notices are retained on all copies.
15 * Permission to modify the code and to distribute modified code is granted,
16 * provided the above notices are retained, and a notice that the code was
17 * modified is included with the above copyright notice.
18 *
19 * Code inspired from libuatomic_ops-1.2, inherited in part from the
20 * Boehm-Demers-Weiser conservative garbage collector.
21 */
22
23 #include <urcu/compiler.h>
24 #include <urcu/system.h>
25
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29
30 #define ILLEGAL_INSTR ".long 0xd00d00"
31
32 /*
33 * Providing sequential consistency semantic with respect to other
34 * instructions for cmpxchg and add_return family of atomic primitives.
35 *
36 * This is achieved with:
37 * lwsync (prior loads can be reordered after following load)
38 * lwarx
39 * stwcx.
40 * test if success (retry)
41 * sync
42 *
43 * Explanation of the sequential consistency provided by this scheme
44 * from Paul E. McKenney:
45 *
46 * The reason we can get away with the lwsync before is that if a prior
47 * store reorders with the lwarx, then you have to store to the atomic
48 * variable from some other CPU to detect it.
49 *
50 * And if you do that, the lwarx will lose its reservation, so the stwcx
51 * will fail. The atomic operation will retry, so that the caller won't be
52 * able to see the misordering.
53 */
54
55 /* xchg */
56
57 static inline __attribute__((always_inline))
58 unsigned long _uatomic_exchange(void *addr, unsigned long val, int len)
59 {
60 switch (len) {
61 case 4:
62 {
63 unsigned int result;
64
65 __asm__ __volatile__(
66 LWSYNC_OPCODE
67 "1:\t" "lwarx %0,0,%1\n" /* load and reserve */
68 "stwcx. %2,0,%1\n" /* else store conditional */
69 "bne- 1b\n" /* retry if lost reservation */
70 "sync\n"
71 : "=&r"(result)
72 : "r"(addr), "r"(val)
73 : "memory", "cc");
74
75 return result;
76 }
77 #if (CAA_BITS_PER_LONG == 64)
78 case 8:
79 {
80 unsigned long result;
81
82 __asm__ __volatile__(
83 LWSYNC_OPCODE
84 "1:\t" "ldarx %0,0,%1\n" /* load and reserve */
85 "stdcx. %2,0,%1\n" /* else store conditional */
86 "bne- 1b\n" /* retry if lost reservation */
87 "sync\n"
88 : "=&r"(result)
89 : "r"(addr), "r"(val)
90 : "memory", "cc");
91
92 return result;
93 }
94 #endif
95 }
96 /* generate an illegal instruction. Cannot catch this with linker tricks
97 * when optimizations are disabled. */
98 __asm__ __volatile__(ILLEGAL_INSTR);
99 return 0;
100 }
101
102 #define uatomic_xchg(addr, v) \
103 ((__typeof__(*(addr))) _uatomic_exchange((addr), \
104 caa_cast_long_keep_sign(v), \
105 sizeof(*(addr))))
106 /* cmpxchg */
107
108 static inline __attribute__((always_inline))
109 unsigned long _uatomic_cmpxchg(void *addr, unsigned long old,
110 unsigned long _new, int len)
111 {
112 switch (len) {
113 case 4:
114 {
115 unsigned int old_val;
116
117 __asm__ __volatile__(
118 LWSYNC_OPCODE
119 "1:\t" "lwarx %0,0,%1\n" /* load and reserve */
120 "cmpw %0,%3\n" /* if load is not equal to */
121 "bne 2f\n" /* old, fail */
122 "stwcx. %2,0,%1\n" /* else store conditional */
123 "bne- 1b\n" /* retry if lost reservation */
124 "sync\n"
125 "2:\n"
126 : "=&r"(old_val)
127 : "r"(addr), "r"((unsigned int)_new),
128 "r"((unsigned int)old)
129 : "memory", "cc");
130
131 return old_val;
132 }
133 #if (CAA_BITS_PER_LONG == 64)
134 case 8:
135 {
136 unsigned long old_val;
137
138 __asm__ __volatile__(
139 LWSYNC_OPCODE
140 "1:\t" "ldarx %0,0,%1\n" /* load and reserve */
141 "cmpd %0,%3\n" /* if load is not equal to */
142 "bne 2f\n" /* old, fail */
143 "stdcx. %2,0,%1\n" /* else store conditional */
144 "bne- 1b\n" /* retry if lost reservation */
145 "sync\n"
146 "2:\n"
147 : "=&r"(old_val)
148 : "r"(addr), "r"((unsigned long)_new),
149 "r"((unsigned long)old)
150 : "memory", "cc");
151
152 return old_val;
153 }
154 #endif
155 }
156 /* generate an illegal instruction. Cannot catch this with linker tricks
157 * when optimizations are disabled. */
158 __asm__ __volatile__(ILLEGAL_INSTR);
159 return 0;
160 }
161
162
163 #define uatomic_cmpxchg(addr, old, _new) \
164 ((__typeof__(*(addr))) _uatomic_cmpxchg((addr), \
165 caa_cast_long_keep_sign(old), \
166 caa_cast_long_keep_sign(_new),\
167 sizeof(*(addr))))
168
169 /* uatomic_add_return */
170
171 static inline __attribute__((always_inline))
172 unsigned long _uatomic_add_return(void *addr, unsigned long val,
173 int len)
174 {
175 switch (len) {
176 case 4:
177 {
178 unsigned int result;
179
180 __asm__ __volatile__(
181 LWSYNC_OPCODE
182 "1:\t" "lwarx %0,0,%1\n" /* load and reserve */
183 "add %0,%2,%0\n" /* add val to value loaded */
184 "stwcx. %0,0,%1\n" /* store conditional */
185 "bne- 1b\n" /* retry if lost reservation */
186 "sync\n"
187 : "=&r"(result)
188 : "r"(addr), "r"(val)
189 : "memory", "cc");
190
191 return result;
192 }
193 #if (CAA_BITS_PER_LONG == 64)
194 case 8:
195 {
196 unsigned long result;
197
198 __asm__ __volatile__(
199 LWSYNC_OPCODE
200 "1:\t" "ldarx %0,0,%1\n" /* load and reserve */
201 "add %0,%2,%0\n" /* add val to value loaded */
202 "stdcx. %0,0,%1\n" /* store conditional */
203 "bne- 1b\n" /* retry if lost reservation */
204 "sync\n"
205 : "=&r"(result)
206 : "r"(addr), "r"(val)
207 : "memory", "cc");
208
209 return result;
210 }
211 #endif
212 }
213 /* generate an illegal instruction. Cannot catch this with linker tricks
214 * when optimizations are disabled. */
215 __asm__ __volatile__(ILLEGAL_INSTR);
216 return 0;
217 }
218
219
220 #define uatomic_add_return(addr, v) \
221 ((__typeof__(*(addr))) _uatomic_add_return((addr), \
222 caa_cast_long_keep_sign(v), \
223 sizeof(*(addr))))
224
225 #ifdef __cplusplus
226 }
227 #endif
228
229 #include <urcu/uatomic/generic.h>
230
231 #endif /* _URCU_ARCH_UATOMIC_PPC_H */
This page took 0.043833 seconds and 4 git commands to generate.