1 #ifndef _URCU_ARCH_UATOMIC_X86_H
2 #define _URCU_ARCH_UATOMIC_X86_H
5 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
6 * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
7 * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
8 * Copyright (c) 2009 Mathieu Desnoyers
10 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
11 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
13 * Permission is hereby granted to use or copy this program
14 * for any purpose, provided the above notices are retained on all copies.
15 * Permission to modify the code and to distribute modified code is granted,
16 * provided the above notices are retained, and a notice that the code was
17 * modified is included with the above copyright notice.
19 * Code inspired from libuatomic_ops-1.2, inherited in part from the
20 * Boehm-Demers-Weiser conservative garbage collector.
23 #include <urcu/compiler.h>
24 #include <urcu/system.h>
26 #ifndef __SIZEOF_LONG__
27 #if defined(__x86_64__) || defined(__amd64__)
28 #define __SIZEOF_LONG__ 8
30 #define __SIZEOF_LONG__ 4
35 #define BITS_PER_LONG (__SIZEOF_LONG__ * 8)
39 * Derived from AO_compare_and_swap() and AO_test_and_set_full().
42 struct __uatomic_dummy
{
45 #define __hp(x) ((struct __uatomic_dummy *)(x))
47 #define _uatomic_set(addr, v) STORE_SHARED(*(addr), (v))
48 #define _uatomic_read(addr) LOAD_SHARED(*(addr))
52 static inline __attribute__((always_inline
))
53 unsigned long __uatomic_cmpxchg(void *addr
, unsigned long old
,
54 unsigned long _new
, int len
)
59 unsigned char result
= old
;
62 "lock; cmpxchgb %2, %1"
63 : "+a"(result
), "+m"(*__hp(addr
))
64 : "q"((unsigned char)_new
)
70 unsigned short result
= old
;
73 "lock; cmpxchgw %2, %1"
74 : "+a"(result
), "+m"(*__hp(addr
))
75 : "r"((unsigned short)_new
)
81 unsigned int result
= old
;
84 "lock; cmpxchgl %2, %1"
85 : "+a"(result
), "+m"(*__hp(addr
))
86 : "r"((unsigned int)_new
)
90 #if (BITS_PER_LONG == 64)
93 unsigned long result
= old
;
96 "lock; cmpxchgq %2, %1"
97 : "+a"(result
), "+m"(*__hp(addr
))
98 : "r"((unsigned long)_new
)
104 /* generate an illegal instruction. Cannot catch this with linker tricks
105 * when optimizations are disabled. */
106 __asm__
__volatile__("ud2");
110 #define _uatomic_cmpxchg(addr, old, _new) \
111 ((__typeof__(*(addr))) __uatomic_cmpxchg((addr), (unsigned long)(old),\
112 (unsigned long)(_new), \
117 static inline __attribute__((always_inline
))
118 unsigned long __uatomic_exchange(void *addr
, unsigned long val
, int len
)
120 /* Note: the "xchg" instruction does not need a "lock" prefix. */
124 unsigned char result
;
125 __asm__
__volatile__(
127 : "=q"(result
), "+m"(*__hp(addr
))
128 : "0" ((unsigned char)val
)
134 unsigned short result
;
135 __asm__
__volatile__(
137 : "=r"(result
), "+m"(*__hp(addr
))
138 : "0" ((unsigned short)val
)
145 __asm__
__volatile__(
147 : "=r"(result
), "+m"(*__hp(addr
))
148 : "0" ((unsigned int)val
)
152 #if (BITS_PER_LONG == 64)
155 unsigned long result
;
156 __asm__
__volatile__(
158 : "=r"(result
), "+m"(*__hp(addr
))
159 : "0" ((unsigned long)val
)
165 /* generate an illegal instruction. Cannot catch this with linker tricks
166 * when optimizations are disabled. */
167 __asm__
__volatile__("ud2");
171 #define _uatomic_xchg(addr, v) \
172 ((__typeof__(*(addr))) __uatomic_exchange((addr), (unsigned long)(v), \
175 /* uatomic_add_return, uatomic_sub_return */
177 static inline __attribute__((always_inline
))
178 unsigned long __uatomic_add_return(void *addr
, unsigned long val
,
184 unsigned char result
= val
;
186 __asm__
__volatile__(
188 : "+m"(*__hp(addr
)), "+q" (result
)
191 return result
+ (unsigned char)val
;
195 unsigned short result
= val
;
197 __asm__
__volatile__(
199 : "+m"(*__hp(addr
)), "+r" (result
)
202 return result
+ (unsigned short)val
;
206 unsigned int result
= val
;
208 __asm__
__volatile__(
210 : "+m"(*__hp(addr
)), "+r" (result
)
213 return result
+ (unsigned int)val
;
215 #if (BITS_PER_LONG == 64)
218 unsigned long result
= val
;
220 __asm__
__volatile__(
222 : "+m"(*__hp(addr
)), "+r" (result
)
225 return result
+ (unsigned long)val
;
229 /* generate an illegal instruction. Cannot catch this with linker tricks
230 * when optimizations are disabled. */
231 __asm__
__volatile__("ud2");
235 #define _uatomic_add_return(addr, v) \
236 ((__typeof__(*(addr))) __uatomic_add_return((addr), \
237 (unsigned long)(v), \
240 #define _uatomic_sub_return(addr, v) _uatomic_add_return((addr), -(v))
242 /* uatomic_add, uatomic_sub */
244 static inline __attribute__((always_inline
))
245 void __uatomic_add(void *addr
, unsigned long val
, int len
)
250 __asm__
__volatile__(
253 : "iq" ((unsigned char)val
)
259 __asm__
__volatile__(
262 : "ir" ((unsigned short)val
)
268 __asm__
__volatile__(
271 : "ir" ((unsigned int)val
)
275 #if (BITS_PER_LONG == 64)
278 __asm__
__volatile__(
281 : "er" ((unsigned long)val
)
287 /* generate an illegal instruction. Cannot catch this with linker tricks
288 * when optimizations are disabled. */
289 __asm__
__volatile__("ud2");
293 #define _uatomic_add(addr, v) \
294 (__uatomic_add((addr), (unsigned long)(v), sizeof(*(addr))))
296 #define _uatomic_sub(addr, v) _uatomic_add((addr), -(v))
301 static inline __attribute__((always_inline
))
302 void __uatomic_inc(void *addr
, int len
)
307 __asm__
__volatile__(
316 __asm__
__volatile__(
325 __asm__
__volatile__(
332 #if (BITS_PER_LONG == 64)
335 __asm__
__volatile__(
344 /* generate an illegal instruction. Cannot catch this with linker tricks
345 * when optimizations are disabled. */
346 __asm__
__volatile__("ud2");
350 #define _uatomic_inc(addr) (__uatomic_inc((addr), sizeof(*(addr))))
354 static inline __attribute__((always_inline
))
355 void __uatomic_dec(void *addr
, int len
)
360 __asm__
__volatile__(
369 __asm__
__volatile__(
378 __asm__
__volatile__(
385 #if (BITS_PER_LONG == 64)
388 __asm__
__volatile__(
397 /* generate an illegal instruction. Cannot catch this with linker tricks
398 * when optimizations are disabled. */
399 __asm__
__volatile__("ud2");
403 #define _uatomic_dec(addr) (__uatomic_dec((addr), sizeof(*(addr))))
405 #if ((BITS_PER_LONG != 64) && defined(CONFIG_URCU_COMPAT_ARCH))
406 extern int __urcu_cas_avail
;
407 extern int __urcu_cas_init(void);
409 #define UATOMIC_COMPAT(insn) \
410 ((likely(__urcu_cas_avail > 0)) \
411 ? (_uatomic_##insn) \
412 : ((unlikely(__urcu_cas_avail < 0) \
413 ? ((__urcu_cas_init() > 0) \
414 ? (_uatomic_##insn) \
415 : (compat_uatomic_##insn)) \
416 : (compat_uatomic_##insn))))
418 extern unsigned long _compat_uatomic_set(void *addr
,
419 unsigned long _new
, int len
);
420 #define compat_uatomic_set(addr, _new) \
421 ((__typeof__(*(addr))) _compat_uatomic_set((addr), \
422 (unsigned long)(_new), \
426 extern unsigned long _compat_uatomic_xchg(void *addr
,
427 unsigned long _new
, int len
);
428 #define compat_uatomic_xchg(addr, _new) \
429 ((__typeof__(*(addr))) _compat_uatomic_xchg((addr), \
430 (unsigned long)(_new), \
433 extern unsigned long _compat_uatomic_cmpxchg(void *addr
, unsigned long old
,
434 unsigned long _new
, int len
);
435 #define compat_uatomic_cmpxchg(addr, old, _new) \
436 ((__typeof__(*(addr))) _compat_uatomic_cmpxchg((addr), \
437 (unsigned long)(old), \
438 (unsigned long)(_new), \
441 extern unsigned long _compat_uatomic_xchg(void *addr
,
442 unsigned long _new
, int len
);
443 #define compat_uatomic_add_return(addr, v) \
444 ((__typeof__(*(addr))) _compat_uatomic_add_return((addr), \
445 (unsigned long)(v), \
448 #define compat_uatomic_sub_return(addr, v) \
449 compat_uatomic_add_return((addr), -(v))
450 #define compat_uatomic_add(addr, v) \
451 ((void)compat_uatomic_add_return((addr), (v)))
452 #define compat_uatomic_sub(addr, v) \
453 ((void)compat_uatomic_sub_return((addr), (v)))
454 #define compat_uatomic_inc(addr) \
455 (compat_uatomic_add((addr), 1))
456 #define compat_uatomic_dec(addr) \
457 (compat_uatomic_sub((addr), 1))
460 #define UATOMIC_COMPAT(insn) (_uatomic_##insn)
463 /* Read is atomic even in compat mode */
464 #define uatomic_read(addr) _uatomic_read(addr)
466 #define uatomic_set(addr, v) \
467 UATOMIC_COMPAT(set(addr, v))
468 #define uatomic_cmpxchg(addr, old, _new) \
469 UATOMIC_COMPAT(cmpxchg(addr, old, _new))
470 #define uatomic_xchg(addr, v) \
471 UATOMIC_COMPAT(xchg(addr, v))
472 #define uatomic_add_return(addr, v) \
473 UATOMIC_COMPAT(add_return(addr, v))
474 #define uatomic_sub_return(addr, v) \
475 UATOMIC_COMPAT(sub_return(addr, v))
476 #define uatomic_add(addr, v) UATOMIC_COMPAT(add(addr, v))
477 #define uatomic_sub(addr, v) UATOMIC_COMPAT(sub(addr, v))
478 #define uatomic_inc(addr) UATOMIC_COMPAT(inc(addr))
479 #define uatomic_dec(addr) UATOMIC_COMPAT(dec(addr))
481 #endif /* _URCU_ARCH_UATOMIC_X86_H */
This page took 0.045717 seconds and 5 git commands to generate.