LOAD_SHARED and STORE_SHARED should have CMM_ prefix
[urcu.git] / urcu / uatomic_arch_x86.h
1 #ifndef _URCU_ARCH_UATOMIC_X86_H
2 #define _URCU_ARCH_UATOMIC_X86_H
3
4 /*
5 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
6 * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
7 * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
8 * Copyright (c) 2009 Mathieu Desnoyers
9 *
10 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
11 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
12 *
13 * Permission is hereby granted to use or copy this program
14 * for any purpose, provided the above notices are retained on all copies.
15 * Permission to modify the code and to distribute modified code is granted,
16 * provided the above notices are retained, and a notice that the code was
17 * modified is included with the above copyright notice.
18 *
19 * Code inspired from libuatomic_ops-1.2, inherited in part from the
20 * Boehm-Demers-Weiser conservative garbage collector.
21 */
22
23 #include <urcu/compiler.h>
24 #include <urcu/system.h>
25
26 #define UATOMIC_HAS_ATOMIC_BYTE
27 #define UATOMIC_HAS_ATOMIC_SHORT
28
29 #ifdef __cplusplus
30 extern "C" {
31 #endif
32
33 /*
34 * Derived from AO_compare_and_swap() and AO_test_and_set_full().
35 */
36
37 struct __uatomic_dummy {
38 unsigned long v[10];
39 };
40 #define __hp(x) ((struct __uatomic_dummy *)(x))
41
42 #define _uatomic_set(addr, v) CMM_STORE_SHARED(*(addr), (v))
43
44 /* cmpxchg */
45
46 static inline __attribute__((always_inline))
47 unsigned long __uatomic_cmpxchg(void *addr, unsigned long old,
48 unsigned long _new, int len)
49 {
50 switch (len) {
51 case 1:
52 {
53 unsigned char result = old;
54
55 __asm__ __volatile__(
56 "lock; cmpxchgb %2, %1"
57 : "+a"(result), "+m"(*__hp(addr))
58 : "q"((unsigned char)_new)
59 : "memory");
60 return result;
61 }
62 case 2:
63 {
64 unsigned short result = old;
65
66 __asm__ __volatile__(
67 "lock; cmpxchgw %2, %1"
68 : "+a"(result), "+m"(*__hp(addr))
69 : "r"((unsigned short)_new)
70 : "memory");
71 return result;
72 }
73 case 4:
74 {
75 unsigned int result = old;
76
77 __asm__ __volatile__(
78 "lock; cmpxchgl %2, %1"
79 : "+a"(result), "+m"(*__hp(addr))
80 : "r"((unsigned int)_new)
81 : "memory");
82 return result;
83 }
84 #if (CAA_BITS_PER_LONG == 64)
85 case 8:
86 {
87 unsigned long result = old;
88
89 __asm__ __volatile__(
90 "lock; cmpxchgq %2, %1"
91 : "+a"(result), "+m"(*__hp(addr))
92 : "r"((unsigned long)_new)
93 : "memory");
94 return result;
95 }
96 #endif
97 }
98 /* generate an illegal instruction. Cannot catch this with linker tricks
99 * when optimizations are disabled. */
100 __asm__ __volatile__("ud2");
101 return 0;
102 }
103
104 #define _uatomic_cmpxchg(addr, old, _new) \
105 ((__typeof__(*(addr))) __uatomic_cmpxchg((addr), (unsigned long)(old),\
106 (unsigned long)(_new), \
107 sizeof(*(addr))))
108
109 /* xchg */
110
111 static inline __attribute__((always_inline))
112 unsigned long __uatomic_exchange(void *addr, unsigned long val, int len)
113 {
114 /* Note: the "xchg" instruction does not need a "lock" prefix. */
115 switch (len) {
116 case 1:
117 {
118 unsigned char result;
119 __asm__ __volatile__(
120 "xchgb %0, %1"
121 : "=q"(result), "+m"(*__hp(addr))
122 : "0" ((unsigned char)val)
123 : "memory");
124 return result;
125 }
126 case 2:
127 {
128 unsigned short result;
129 __asm__ __volatile__(
130 "xchgw %0, %1"
131 : "=r"(result), "+m"(*__hp(addr))
132 : "0" ((unsigned short)val)
133 : "memory");
134 return result;
135 }
136 case 4:
137 {
138 unsigned int result;
139 __asm__ __volatile__(
140 "xchgl %0, %1"
141 : "=r"(result), "+m"(*__hp(addr))
142 : "0" ((unsigned int)val)
143 : "memory");
144 return result;
145 }
146 #if (CAA_BITS_PER_LONG == 64)
147 case 8:
148 {
149 unsigned long result;
150 __asm__ __volatile__(
151 "xchgq %0, %1"
152 : "=r"(result), "+m"(*__hp(addr))
153 : "0" ((unsigned long)val)
154 : "memory");
155 return result;
156 }
157 #endif
158 }
159 /* generate an illegal instruction. Cannot catch this with linker tricks
160 * when optimizations are disabled. */
161 __asm__ __volatile__("ud2");
162 return 0;
163 }
164
165 #define _uatomic_xchg(addr, v) \
166 ((__typeof__(*(addr))) __uatomic_exchange((addr), (unsigned long)(v), \
167 sizeof(*(addr))))
168
169 /* uatomic_add_return */
170
171 static inline __attribute__((always_inline))
172 unsigned long __uatomic_add_return(void *addr, unsigned long val,
173 int len)
174 {
175 switch (len) {
176 case 1:
177 {
178 unsigned char result = val;
179
180 __asm__ __volatile__(
181 "lock; xaddb %1, %0"
182 : "+m"(*__hp(addr)), "+q" (result)
183 :
184 : "memory");
185 return result + (unsigned char)val;
186 }
187 case 2:
188 {
189 unsigned short result = val;
190
191 __asm__ __volatile__(
192 "lock; xaddw %1, %0"
193 : "+m"(*__hp(addr)), "+r" (result)
194 :
195 : "memory");
196 return result + (unsigned short)val;
197 }
198 case 4:
199 {
200 unsigned int result = val;
201
202 __asm__ __volatile__(
203 "lock; xaddl %1, %0"
204 : "+m"(*__hp(addr)), "+r" (result)
205 :
206 : "memory");
207 return result + (unsigned int)val;
208 }
209 #if (CAA_BITS_PER_LONG == 64)
210 case 8:
211 {
212 unsigned long result = val;
213
214 __asm__ __volatile__(
215 "lock; xaddq %1, %0"
216 : "+m"(*__hp(addr)), "+r" (result)
217 :
218 : "memory");
219 return result + (unsigned long)val;
220 }
221 #endif
222 }
223 /* generate an illegal instruction. Cannot catch this with linker tricks
224 * when optimizations are disabled. */
225 __asm__ __volatile__("ud2");
226 return 0;
227 }
228
229 #define _uatomic_add_return(addr, v) \
230 ((__typeof__(*(addr))) __uatomic_add_return((addr), \
231 (unsigned long)(v), \
232 sizeof(*(addr))))
233
234 /* uatomic_add */
235
236 static inline __attribute__((always_inline))
237 void __uatomic_add(void *addr, unsigned long val, int len)
238 {
239 switch (len) {
240 case 1:
241 {
242 __asm__ __volatile__(
243 "lock; addb %1, %0"
244 : "=m"(*__hp(addr))
245 : "iq" ((unsigned char)val)
246 : "memory");
247 return;
248 }
249 case 2:
250 {
251 __asm__ __volatile__(
252 "lock; addw %1, %0"
253 : "=m"(*__hp(addr))
254 : "ir" ((unsigned short)val)
255 : "memory");
256 return;
257 }
258 case 4:
259 {
260 __asm__ __volatile__(
261 "lock; addl %1, %0"
262 : "=m"(*__hp(addr))
263 : "ir" ((unsigned int)val)
264 : "memory");
265 return;
266 }
267 #if (CAA_BITS_PER_LONG == 64)
268 case 8:
269 {
270 __asm__ __volatile__(
271 "lock; addq %1, %0"
272 : "=m"(*__hp(addr))
273 : "er" ((unsigned long)val)
274 : "memory");
275 return;
276 }
277 #endif
278 }
279 /* generate an illegal instruction. Cannot catch this with linker tricks
280 * when optimizations are disabled. */
281 __asm__ __volatile__("ud2");
282 return;
283 }
284
285 #define _uatomic_add(addr, v) \
286 (__uatomic_add((addr), (unsigned long)(v), sizeof(*(addr))))
287
288
289 /* uatomic_inc */
290
291 static inline __attribute__((always_inline))
292 void __uatomic_inc(void *addr, int len)
293 {
294 switch (len) {
295 case 1:
296 {
297 __asm__ __volatile__(
298 "lock; incb %0"
299 : "=m"(*__hp(addr))
300 :
301 : "memory");
302 return;
303 }
304 case 2:
305 {
306 __asm__ __volatile__(
307 "lock; incw %0"
308 : "=m"(*__hp(addr))
309 :
310 : "memory");
311 return;
312 }
313 case 4:
314 {
315 __asm__ __volatile__(
316 "lock; incl %0"
317 : "=m"(*__hp(addr))
318 :
319 : "memory");
320 return;
321 }
322 #if (CAA_BITS_PER_LONG == 64)
323 case 8:
324 {
325 __asm__ __volatile__(
326 "lock; incq %0"
327 : "=m"(*__hp(addr))
328 :
329 : "memory");
330 return;
331 }
332 #endif
333 }
334 /* generate an illegal instruction. Cannot catch this with linker tricks
335 * when optimizations are disabled. */
336 __asm__ __volatile__("ud2");
337 return;
338 }
339
340 #define _uatomic_inc(addr) (__uatomic_inc((addr), sizeof(*(addr))))
341
342 /* uatomic_dec */
343
344 static inline __attribute__((always_inline))
345 void __uatomic_dec(void *addr, int len)
346 {
347 switch (len) {
348 case 1:
349 {
350 __asm__ __volatile__(
351 "lock; decb %0"
352 : "=m"(*__hp(addr))
353 :
354 : "memory");
355 return;
356 }
357 case 2:
358 {
359 __asm__ __volatile__(
360 "lock; decw %0"
361 : "=m"(*__hp(addr))
362 :
363 : "memory");
364 return;
365 }
366 case 4:
367 {
368 __asm__ __volatile__(
369 "lock; decl %0"
370 : "=m"(*__hp(addr))
371 :
372 : "memory");
373 return;
374 }
375 #if (CAA_BITS_PER_LONG == 64)
376 case 8:
377 {
378 __asm__ __volatile__(
379 "lock; decq %0"
380 : "=m"(*__hp(addr))
381 :
382 : "memory");
383 return;
384 }
385 #endif
386 }
387 /* generate an illegal instruction. Cannot catch this with linker tricks
388 * when optimizations are disabled. */
389 __asm__ __volatile__("ud2");
390 return;
391 }
392
393 #define _uatomic_dec(addr) (__uatomic_dec((addr), sizeof(*(addr))))
394
395 #if ((CAA_BITS_PER_LONG != 64) && defined(CONFIG_RCU_COMPAT_ARCH))
396 extern int __rcu_cas_avail;
397 extern int __rcu_cas_init(void);
398
399 #define UATOMIC_COMPAT(insn) \
400 ((likely(__rcu_cas_avail > 0)) \
401 ? (_uatomic_##insn) \
402 : ((unlikely(__rcu_cas_avail < 0) \
403 ? ((__rcu_cas_init() > 0) \
404 ? (_uatomic_##insn) \
405 : (compat_uatomic_##insn)) \
406 : (compat_uatomic_##insn))))
407
408 extern unsigned long _compat_uatomic_set(void *addr,
409 unsigned long _new, int len);
410 #define compat_uatomic_set(addr, _new) \
411 ((__typeof__(*(addr))) _compat_uatomic_set((addr), \
412 (unsigned long)(_new), \
413 sizeof(*(addr))))
414
415
416 extern unsigned long _compat_uatomic_xchg(void *addr,
417 unsigned long _new, int len);
418 #define compat_uatomic_xchg(addr, _new) \
419 ((__typeof__(*(addr))) _compat_uatomic_xchg((addr), \
420 (unsigned long)(_new), \
421 sizeof(*(addr))))
422
423 extern unsigned long _compat_uatomic_cmpxchg(void *addr, unsigned long old,
424 unsigned long _new, int len);
425 #define compat_uatomic_cmpxchg(addr, old, _new) \
426 ((__typeof__(*(addr))) _compat_uatomic_cmpxchg((addr), \
427 (unsigned long)(old), \
428 (unsigned long)(_new), \
429 sizeof(*(addr))))
430
431 extern unsigned long _compat_uatomic_xchg(void *addr,
432 unsigned long _new, int len);
433 #define compat_uatomic_add_return(addr, v) \
434 ((__typeof__(*(addr))) _compat_uatomic_add_return((addr), \
435 (unsigned long)(v), \
436 sizeof(*(addr))))
437
438 #define compat_uatomic_add(addr, v) \
439 ((void)compat_uatomic_add_return((addr), (v)))
440 #define compat_uatomic_inc(addr) \
441 (compat_uatomic_add((addr), 1))
442 #define compat_uatomic_dec(addr) \
443 (compat_uatomic_add((addr), -1))
444
445 #else
446 #define UATOMIC_COMPAT(insn) (_uatomic_##insn)
447 #endif
448
449 /* Read is atomic even in compat mode */
450 #define uatomic_set(addr, v) \
451 UATOMIC_COMPAT(set(addr, v))
452
453 #define uatomic_cmpxchg(addr, old, _new) \
454 UATOMIC_COMPAT(cmpxchg(addr, old, _new))
455 #define uatomic_xchg(addr, v) \
456 UATOMIC_COMPAT(xchg(addr, v))
457 #define uatomic_add_return(addr, v) \
458 UATOMIC_COMPAT(add_return(addr, v))
459
460 #define uatomic_add(addr, v) UATOMIC_COMPAT(add(addr, v))
461 #define uatomic_inc(addr) UATOMIC_COMPAT(inc(addr))
462 #define uatomic_dec(addr) UATOMIC_COMPAT(dec(addr))
463
464 #ifdef __cplusplus
465 }
466 #endif
467
468 #include <urcu/uatomic_generic.h>
469
470 #endif /* _URCU_ARCH_UATOMIC_X86_H */
This page took 0.037645 seconds and 4 git commands to generate.