1 #ifndef _URCU_ARCH_UATOMIC_X86_H
2 #define _URCU_ARCH_UATOMIC_X86_H
5 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
6 * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
7 * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
8 * Copyright (c) 2009 Mathieu Desnoyers
10 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
11 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
13 * Permission is hereby granted to use or copy this program
14 * for any purpose, provided the above notices are retained on all copies.
15 * Permission to modify the code and to distribute modified code is granted,
16 * provided the above notices are retained, and a notice that the code was
17 * modified is included with the above copyright notice.
19 * Code inspired from libuatomic_ops-1.2, inherited in part from the
20 * Boehm-Demers-Weiser conservative garbage collector.
23 #include <urcu/compiler.h>
24 #include <urcu/system.h>
26 #define UATOMIC_HAS_ATOMIC_BYTE
27 #define UATOMIC_HAS_ATOMIC_SHORT
34 * Derived from AO_compare_and_swap() and AO_test_and_set_full().
37 struct __uatomic_dummy
{
40 #define __hp(x) ((struct __uatomic_dummy *)(x))
42 #define _uatomic_set(addr, v) CMM_STORE_SHARED(*(addr), (v))
46 static inline __attribute__((always_inline
))
47 unsigned long __uatomic_cmpxchg(void *addr
, unsigned long old
,
48 unsigned long _new
, int len
)
53 unsigned char result
= old
;
56 "lock; cmpxchgb %2, %1"
57 : "+a"(result
), "+m"(*__hp(addr
))
58 : "q"((unsigned char)_new
)
64 unsigned short result
= old
;
67 "lock; cmpxchgw %2, %1"
68 : "+a"(result
), "+m"(*__hp(addr
))
69 : "r"((unsigned short)_new
)
75 unsigned int result
= old
;
78 "lock; cmpxchgl %2, %1"
79 : "+a"(result
), "+m"(*__hp(addr
))
80 : "r"((unsigned int)_new
)
84 #if (CAA_BITS_PER_LONG == 64)
87 unsigned long result
= old
;
90 "lock; cmpxchgq %2, %1"
91 : "+a"(result
), "+m"(*__hp(addr
))
92 : "r"((unsigned long)_new
)
98 /* generate an illegal instruction. Cannot catch this with linker tricks
99 * when optimizations are disabled. */
100 __asm__
__volatile__("ud2");
104 #define _uatomic_cmpxchg(addr, old, _new) \
105 ((__typeof__(*(addr))) __uatomic_cmpxchg((addr), (unsigned long)(old),\
106 (unsigned long)(_new), \
111 static inline __attribute__((always_inline
))
112 unsigned long __uatomic_exchange(void *addr
, unsigned long val
, int len
)
114 /* Note: the "xchg" instruction does not need a "lock" prefix. */
118 unsigned char result
;
119 __asm__
__volatile__(
121 : "=q"(result
), "+m"(*__hp(addr
))
122 : "0" ((unsigned char)val
)
128 unsigned short result
;
129 __asm__
__volatile__(
131 : "=r"(result
), "+m"(*__hp(addr
))
132 : "0" ((unsigned short)val
)
139 __asm__
__volatile__(
141 : "=r"(result
), "+m"(*__hp(addr
))
142 : "0" ((unsigned int)val
)
146 #if (CAA_BITS_PER_LONG == 64)
149 unsigned long result
;
150 __asm__
__volatile__(
152 : "=r"(result
), "+m"(*__hp(addr
))
153 : "0" ((unsigned long)val
)
159 /* generate an illegal instruction. Cannot catch this with linker tricks
160 * when optimizations are disabled. */
161 __asm__
__volatile__("ud2");
165 #define _uatomic_xchg(addr, v) \
166 ((__typeof__(*(addr))) __uatomic_exchange((addr), (unsigned long)(v), \
169 /* uatomic_add_return */
171 static inline __attribute__((always_inline
))
172 unsigned long __uatomic_add_return(void *addr
, unsigned long val
,
178 unsigned char result
= val
;
180 __asm__
__volatile__(
182 : "+m"(*__hp(addr
)), "+q" (result
)
185 return result
+ (unsigned char)val
;
189 unsigned short result
= val
;
191 __asm__
__volatile__(
193 : "+m"(*__hp(addr
)), "+r" (result
)
196 return result
+ (unsigned short)val
;
200 unsigned int result
= val
;
202 __asm__
__volatile__(
204 : "+m"(*__hp(addr
)), "+r" (result
)
207 return result
+ (unsigned int)val
;
209 #if (CAA_BITS_PER_LONG == 64)
212 unsigned long result
= val
;
214 __asm__
__volatile__(
216 : "+m"(*__hp(addr
)), "+r" (result
)
219 return result
+ (unsigned long)val
;
223 /* generate an illegal instruction. Cannot catch this with linker tricks
224 * when optimizations are disabled. */
225 __asm__
__volatile__("ud2");
229 #define _uatomic_add_return(addr, v) \
230 ((__typeof__(*(addr))) __uatomic_add_return((addr), \
231 (unsigned long)(v), \
236 static inline __attribute__((always_inline
))
237 void __uatomic_add(void *addr
, unsigned long val
, int len
)
242 __asm__
__volatile__(
245 : "iq" ((unsigned char)val
)
251 __asm__
__volatile__(
254 : "ir" ((unsigned short)val
)
260 __asm__
__volatile__(
263 : "ir" ((unsigned int)val
)
267 #if (CAA_BITS_PER_LONG == 64)
270 __asm__
__volatile__(
273 : "er" ((unsigned long)val
)
279 /* generate an illegal instruction. Cannot catch this with linker tricks
280 * when optimizations are disabled. */
281 __asm__
__volatile__("ud2");
285 #define _uatomic_add(addr, v) \
286 (__uatomic_add((addr), (unsigned long)(v), sizeof(*(addr))))
291 static inline __attribute__((always_inline
))
292 void __uatomic_inc(void *addr
, int len
)
297 __asm__
__volatile__(
306 __asm__
__volatile__(
315 __asm__
__volatile__(
322 #if (CAA_BITS_PER_LONG == 64)
325 __asm__
__volatile__(
334 /* generate an illegal instruction. Cannot catch this with linker tricks
335 * when optimizations are disabled. */
336 __asm__
__volatile__("ud2");
340 #define _uatomic_inc(addr) (__uatomic_inc((addr), sizeof(*(addr))))
344 static inline __attribute__((always_inline
))
345 void __uatomic_dec(void *addr
, int len
)
350 __asm__
__volatile__(
359 __asm__
__volatile__(
368 __asm__
__volatile__(
375 #if (CAA_BITS_PER_LONG == 64)
378 __asm__
__volatile__(
387 /* generate an illegal instruction. Cannot catch this with linker tricks
388 * when optimizations are disabled. */
389 __asm__
__volatile__("ud2");
393 #define _uatomic_dec(addr) (__uatomic_dec((addr), sizeof(*(addr))))
395 #if ((CAA_BITS_PER_LONG != 64) && defined(CONFIG_RCU_COMPAT_ARCH))
396 extern int __rcu_cas_avail
;
397 extern int __rcu_cas_init(void);
399 #define UATOMIC_COMPAT(insn) \
400 ((likely(__rcu_cas_avail > 0)) \
401 ? (_uatomic_##insn) \
402 : ((unlikely(__rcu_cas_avail < 0) \
403 ? ((__rcu_cas_init() > 0) \
404 ? (_uatomic_##insn) \
405 : (compat_uatomic_##insn)) \
406 : (compat_uatomic_##insn))))
408 extern unsigned long _compat_uatomic_set(void *addr
,
409 unsigned long _new
, int len
);
410 #define compat_uatomic_set(addr, _new) \
411 ((__typeof__(*(addr))) _compat_uatomic_set((addr), \
412 (unsigned long)(_new), \
416 extern unsigned long _compat_uatomic_xchg(void *addr
,
417 unsigned long _new
, int len
);
418 #define compat_uatomic_xchg(addr, _new) \
419 ((__typeof__(*(addr))) _compat_uatomic_xchg((addr), \
420 (unsigned long)(_new), \
423 extern unsigned long _compat_uatomic_cmpxchg(void *addr
, unsigned long old
,
424 unsigned long _new
, int len
);
425 #define compat_uatomic_cmpxchg(addr, old, _new) \
426 ((__typeof__(*(addr))) _compat_uatomic_cmpxchg((addr), \
427 (unsigned long)(old), \
428 (unsigned long)(_new), \
431 extern unsigned long _compat_uatomic_xchg(void *addr
,
432 unsigned long _new
, int len
);
433 #define compat_uatomic_add_return(addr, v) \
434 ((__typeof__(*(addr))) _compat_uatomic_add_return((addr), \
435 (unsigned long)(v), \
438 #define compat_uatomic_add(addr, v) \
439 ((void)compat_uatomic_add_return((addr), (v)))
440 #define compat_uatomic_inc(addr) \
441 (compat_uatomic_add((addr), 1))
442 #define compat_uatomic_dec(addr) \
443 (compat_uatomic_add((addr), -1))
446 #define UATOMIC_COMPAT(insn) (_uatomic_##insn)
449 /* Read is atomic even in compat mode */
450 #define uatomic_set(addr, v) \
451 UATOMIC_COMPAT(set(addr, v))
453 #define uatomic_cmpxchg(addr, old, _new) \
454 UATOMIC_COMPAT(cmpxchg(addr, old, _new))
455 #define uatomic_xchg(addr, v) \
456 UATOMIC_COMPAT(xchg(addr, v))
457 #define uatomic_add_return(addr, v) \
458 UATOMIC_COMPAT(add_return(addr, v))
460 #define uatomic_add(addr, v) UATOMIC_COMPAT(add(addr, v))
461 #define uatomic_inc(addr) UATOMIC_COMPAT(inc(addr))
462 #define uatomic_dec(addr) UATOMIC_COMPAT(dec(addr))
468 #include <urcu/uatomic_generic.h>
470 #endif /* _URCU_ARCH_UATOMIC_X86_H */
This page took 0.038625 seconds and 5 git commands to generate.