1 #ifndef _URCU_ARCH_UATOMIC_X86_H
2 #define _URCU_ARCH_UATOMIC_X86_H
5 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
6 * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
7 * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
8 * Copyright (c) 2009 Mathieu Desnoyers
10 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
11 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
13 * Permission is hereby granted to use or copy this program
14 * for any purpose, provided the above notices are retained on all copies.
15 * Permission to modify the code and to distribute modified code is granted,
16 * provided the above notices are retained, and a notice that the code was
17 * modified is included with the above copyright notice.
19 * Code inspired from libuatomic_ops-1.2, inherited in part from the
20 * Boehm-Demers-Weiser conservative garbage collector.
23 #include <urcu/compiler.h>
26 #define BITS_PER_LONG (__SIZEOF_LONG__ * 8)
30 * Derived from AO_compare_and_swap() and AO_test_and_set_full().
33 struct __uatomic_dummy
{
36 #define __hp(x) ((struct __uatomic_dummy *)(x))
38 #define uatomic_set(addr, v) \
40 ACCESS_ONCE(*(addr)) = (v); \
43 #define uatomic_read(addr) ACCESS_ONCE(*(addr))
47 static inline __attribute__((always_inline
))
48 unsigned long _uatomic_cmpxchg(void *addr
, unsigned long old
,
49 unsigned long _new
, int len
)
54 unsigned char result
= old
;
57 "lock; cmpxchgb %2, %1"
58 : "+a"(result
), "+m"(*__hp(addr
))
59 : "q"((unsigned char)_new
)
65 unsigned short result
= old
;
68 "lock; cmpxchgw %2, %1"
69 : "+a"(result
), "+m"(*__hp(addr
))
70 : "r"((unsigned short)_new
)
76 unsigned int result
= old
;
79 "lock; cmpxchgl %2, %1"
80 : "+a"(result
), "+m"(*__hp(addr
))
81 : "r"((unsigned int)_new
)
85 #if (BITS_PER_LONG == 64)
88 unsigned long result
= old
;
91 "lock; cmpxchgq %2, %1"
92 : "+a"(result
), "+m"(*__hp(addr
))
93 : "r"((unsigned long)_new
)
99 /* generate an illegal instruction. Cannot catch this with linker tricks
100 * when optimizations are disabled. */
101 __asm__
__volatile__("ud2");
105 #define uatomic_cmpxchg(addr, old, _new) \
106 ((__typeof__(*(addr))) _uatomic_cmpxchg((addr), (unsigned long)(old),\
107 (unsigned long)(_new), \
112 static inline __attribute__((always_inline
))
113 unsigned long _uatomic_exchange(void *addr
, unsigned long val
, int len
)
115 /* Note: the "xchg" instruction does not need a "lock" prefix. */
119 unsigned char result
;
120 __asm__
__volatile__(
122 : "=q"(result
), "+m"(*__hp(addr
))
123 : "0" ((unsigned char)val
)
129 unsigned short result
;
130 __asm__
__volatile__(
132 : "=r"(result
), "+m"(*__hp(addr
))
133 : "0" ((unsigned short)val
)
140 __asm__
__volatile__(
142 : "=r"(result
), "+m"(*__hp(addr
))
143 : "0" ((unsigned int)val
)
147 #if (BITS_PER_LONG == 64)
150 unsigned long result
;
151 __asm__
__volatile__(
153 : "=r"(result
), "+m"(*__hp(addr
))
154 : "0" ((unsigned long)val
)
160 /* generate an illegal instruction. Cannot catch this with linker tricks
161 * when optimizations are disabled. */
162 __asm__
__volatile__("ud2");
166 #define uatomic_xchg(addr, v) \
167 ((__typeof__(*(addr))) _uatomic_exchange((addr), (unsigned long)(v), \
170 /* uatomic_add_return, uatomic_sub_return */
172 static inline __attribute__((always_inline
))
173 unsigned long _uatomic_add_return(void *addr
, unsigned long val
,
179 unsigned char result
= val
;
181 __asm__
__volatile__(
183 : "+m"(*__hp(addr
)), "+q" (result
)
186 return result
+ (unsigned char)val
;
190 unsigned short result
= val
;
192 __asm__
__volatile__(
194 : "+m"(*__hp(addr
)), "+r" (result
)
197 return result
+ (unsigned short)val
;
201 unsigned int result
= val
;
203 __asm__
__volatile__(
205 : "+m"(*__hp(addr
)), "+r" (result
)
208 return result
+ (unsigned int)val
;
210 #if (BITS_PER_LONG == 64)
213 unsigned long result
= val
;
215 __asm__
__volatile__(
217 : "+m"(*__hp(addr
)), "+r" (result
)
220 return result
+ (unsigned long)val
;
224 /* generate an illegal instruction. Cannot catch this with linker tricks
225 * when optimizations are disabled. */
226 __asm__
__volatile__("ud2");
230 #define uatomic_add_return(addr, v) \
231 ((__typeof__(*(addr))) _uatomic_add_return((addr), \
232 (unsigned long)(v), \
235 #define uatomic_sub_return(addr, v) uatomic_add_return((addr), -(v))
237 /* uatomic_add, uatomic_sub */
239 static inline __attribute__((always_inline
))
240 void _uatomic_add(void *addr
, unsigned long val
, int len
)
245 __asm__
__volatile__(
248 : "iq" ((unsigned char)val
)
254 __asm__
__volatile__(
257 : "ir" ((unsigned short)val
)
263 __asm__
__volatile__(
266 : "ir" ((unsigned int)val
)
270 #if (BITS_PER_LONG == 64)
273 __asm__
__volatile__(
276 : "er" ((unsigned long)val
)
282 /* generate an illegal instruction. Cannot catch this with linker tricks
283 * when optimizations are disabled. */
284 __asm__
__volatile__("ud2");
288 #define uatomic_add(addr, v) \
289 (_uatomic_add((addr), (unsigned long)(v), sizeof(*(addr))))
291 #define uatomic_sub(addr, v) uatomic_add((addr), -(v))
296 static inline __attribute__((always_inline
))
297 void _uatomic_inc(void *addr
, int len
)
302 __asm__
__volatile__(
311 __asm__
__volatile__(
320 __asm__
__volatile__(
327 #if (BITS_PER_LONG == 64)
330 __asm__
__volatile__(
339 /* generate an illegal instruction. Cannot catch this with linker tricks
340 * when optimizations are disabled. */
341 __asm__
__volatile__("ud2");
345 #define uatomic_inc(addr) (_uatomic_inc((addr), sizeof(*(addr))))
349 static inline __attribute__((always_inline
))
350 void _uatomic_dec(void *addr
, int len
)
355 __asm__
__volatile__(
364 __asm__
__volatile__(
373 __asm__
__volatile__(
380 #if (BITS_PER_LONG == 64)
383 __asm__
__volatile__(
392 /* generate an illegal instruction. Cannot catch this with linker tricks
393 * when optimizations are disabled. */
394 __asm__
__volatile__("ud2");
398 #define uatomic_dec(addr) (_uatomic_dec((addr), sizeof(*(addr))))
400 #endif /* _URCU_ARCH_UATOMIC_X86_H */
This page took 0.038076 seconds and 4 git commands to generate.