1 #ifndef _ARCH_ATOMIC_X86_H
2 #define _ARCH_ATOMIC_X86_H
5 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
6 * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
7 * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
8 * Copyright (c) 2009 Mathieu Desnoyers
10 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
11 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
13 * Permission is hereby granted to use or copy this program
14 * for any purpose, provided the above notices are retained on all copies.
15 * Permission to modify the code and to distribute modified code is granted,
16 * provided the above notices are retained, and a notice that the code was
17 * modified is included with the above copyright notice.
19 * Code inspired from libatomic_ops-1.2, inherited in part from the
20 * Boehm-Demers-Weiser conservative garbage collector.
24 #define BITS_PER_LONG (__SIZEOF_LONG__ * 8)
27 #ifndef _INCLUDE_API_H
30 * Derived from AO_compare_and_swap() and AO_test_and_set_full().
33 struct __atomic_dummy
{
36 #define __hp(x) ((struct __atomic_dummy *)(x))
40 static inline __attribute__((always_inline
))
41 unsigned long _atomic_cmpxchg(volatile void *addr
, unsigned long old
,
42 unsigned long _new
, int len
)
47 unsigned char result
= old
;
49 "lock; cmpxchgb %2, %1"
50 : "+a"(result
), "+m"(*__hp(addr
))
51 : "q"((unsigned char)_new
)
57 unsigned short result
= old
;
59 "lock; cmpxchgw %2, %1"
60 : "+a"(result
), "+m"(*__hp(addr
))
61 : "r"((unsigned short)_new
)
67 unsigned int result
= old
;
69 "lock; cmpxchgl %2, %1"
70 : "+a"(result
), "+m"(*__hp(addr
))
71 : "r"((unsigned int)_new
)
75 #if (BITS_PER_LONG == 64)
78 unsigned long result
= old
;
80 "lock; cmpxchgq %2, %1"
81 : "+a"(result
), "+m"(*__hp(addr
))
82 : "r"((unsigned long)_new
)
88 /* generate an illegal instruction. Cannot catch this with linker tricks
89 * when optimizations are disabled. */
90 __asm__
__volatile__("ud2");
94 #define cmpxchg(addr, old, _new) \
95 ((__typeof__(*(addr))) _atomic_cmpxchg((addr), (unsigned long)(old),\
96 (unsigned long)(_new), \
101 static inline __attribute__((always_inline
))
102 unsigned long _atomic_exchange(volatile void *addr
, unsigned long val
, int len
)
104 /* Note: the "xchg" instruction does not need a "lock" prefix. */
108 unsigned char result
;
109 __asm__
__volatile__(
111 : "=q"(result
), "+m"(*__hp(addr
))
112 : "0" ((unsigned char)val
)
118 unsigned short result
;
119 __asm__
__volatile__(
121 : "=r"(result
), "+m"(*__hp(addr
))
122 : "0" ((unsigned short)val
)
129 __asm__
__volatile__(
131 : "=r"(result
), "+m"(*__hp(addr
))
132 : "0" ((unsigned int)val
)
136 #if (BITS_PER_LONG == 64)
139 unsigned long result
;
140 __asm__
__volatile__(
142 : "=r"(result
), "+m"(*__hp(addr
))
143 : "0" ((unsigned long)val
)
149 /* generate an illegal instruction. Cannot catch this with linker tricks
150 * when optimizations are disabled. */
151 __asm__
__volatile__("ud2");
155 #define xchg(addr, v) \
156 ((__typeof__(*(addr))) _atomic_exchange((addr), (unsigned long)(v), \
159 /* atomic_add, atomic_sub */
161 static inline __attribute__((always_inline
))
162 void _atomic_add(volatile void *addr
, unsigned long val
, int len
)
167 __asm__
__volatile__(
170 : "iq" ((unsigned char)val
)
176 __asm__
__volatile__(
179 : "ir" ((unsigned short)val
)
185 __asm__
__volatile__(
188 : "ir" ((unsigned int)val
)
192 #if (BITS_PER_LONG == 64)
195 __asm__
__volatile__(
198 : "er" ((unsigned long)val
)
204 /* generate an illegal instruction. Cannot catch this with linker tricks
205 * when optimizations are disabled. */
206 __asm__
__volatile__("ud2");
210 #define atomic_add(addr, v) \
211 (_atomic_add((addr), (unsigned long)(v), sizeof(*(addr))))
213 #define atomic_sub(addr, v) atomic_add((addr), -(v))
218 static inline __attribute__((always_inline
))
219 void _atomic_inc(volatile void *addr
, int len
)
224 __asm__
__volatile__(
233 __asm__
__volatile__(
242 __asm__
__volatile__(
249 #if (BITS_PER_LONG == 64)
252 __asm__
__volatile__(
261 /* generate an illegal instruction. Cannot catch this with linker tricks
262 * when optimizations are disabled. */
263 __asm__
__volatile__("ud2");
267 #define atomic_inc(addr) (_atomic_inc((addr), sizeof(*(addr))))
271 static inline __attribute__((always_inline
))
272 void _atomic_dec(volatile void *addr
, int len
)
277 __asm__
__volatile__(
286 __asm__
__volatile__(
295 __asm__
__volatile__(
302 #if (BITS_PER_LONG == 64)
305 __asm__
__volatile__(
314 /* generate an illegal instruction. Cannot catch this with linker tricks
315 * when optimizations are disabled. */
316 __asm__
__volatile__("ud2");
320 #define atomic_dec(addr) (_atomic_dec((addr), sizeof(*(addr))))
322 #endif /* #ifndef _INCLUDE_API_H */
324 #endif /* ARCH_ATOMIC_X86_H */
This page took 0.044533 seconds and 4 git commands to generate.