39ec407e58c0612458ffc82209e20fc711dec0fd
[urcu.git] / urcu / uatomic_arch_x86.h
1 #ifndef _URCU_ARCH_UATOMIC_X86_H
2 #define _URCU_ARCH_UATOMIC_X86_H
3
4 /*
5 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
6 * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
7 * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
8 * Copyright (c) 2009 Mathieu Desnoyers
9 *
10 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
11 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
12 *
13 * Permission is hereby granted to use or copy this program
14 * for any purpose, provided the above notices are retained on all copies.
15 * Permission to modify the code and to distribute modified code is granted,
16 * provided the above notices are retained, and a notice that the code was
17 * modified is included with the above copyright notice.
18 *
19 * Code inspired from libuatomic_ops-1.2, inherited in part from the
20 * Boehm-Demers-Weiser conservative garbage collector.
21 */
22
23 #include <urcu/compiler.h>
24
25 #ifndef __SIZEOF_LONG__
26 #if defined(__x86_64__) || defined(__amd64__)
27 #define __SIZEOF_LONG__ 8
28 #else
29 #define __SIZEOF_LONG__ 4
30 #endif
31 #endif
32
33 #ifndef BITS_PER_LONG
34 #define BITS_PER_LONG (__SIZEOF_LONG__ * 8)
35 #endif
36
37 /*
38 * Derived from AO_compare_and_swap() and AO_test_and_set_full().
39 */
40
41 struct __uatomic_dummy {
42 unsigned long v[10];
43 };
44 #define __hp(x) ((struct __uatomic_dummy *)(x))
45
46 #define uatomic_set(addr, v) \
47 do { \
48 ACCESS_ONCE(*(addr)) = (v); \
49 } while (0)
50
51 #define uatomic_read(addr) ACCESS_ONCE(*(addr))
52
53 /* cmpxchg */
54
55 static inline __attribute__((always_inline))
56 unsigned long _uatomic_cmpxchg(void *addr, unsigned long old,
57 unsigned long _new, int len)
58 {
59 switch (len) {
60 case 1:
61 {
62 unsigned char result = old;
63
64 __asm__ __volatile__(
65 "lock; cmpxchgb %2, %1"
66 : "+a"(result), "+m"(*__hp(addr))
67 : "q"((unsigned char)_new)
68 : "memory");
69 return result;
70 }
71 case 2:
72 {
73 unsigned short result = old;
74
75 __asm__ __volatile__(
76 "lock; cmpxchgw %2, %1"
77 : "+a"(result), "+m"(*__hp(addr))
78 : "r"((unsigned short)_new)
79 : "memory");
80 return result;
81 }
82 case 4:
83 {
84 unsigned int result = old;
85
86 __asm__ __volatile__(
87 "lock; cmpxchgl %2, %1"
88 : "+a"(result), "+m"(*__hp(addr))
89 : "r"((unsigned int)_new)
90 : "memory");
91 return result;
92 }
93 #if (BITS_PER_LONG == 64)
94 case 8:
95 {
96 unsigned long result = old;
97
98 __asm__ __volatile__(
99 "lock; cmpxchgq %2, %1"
100 : "+a"(result), "+m"(*__hp(addr))
101 : "r"((unsigned long)_new)
102 : "memory");
103 return result;
104 }
105 #endif
106 }
107 /* generate an illegal instruction. Cannot catch this with linker tricks
108 * when optimizations are disabled. */
109 __asm__ __volatile__("ud2");
110 return 0;
111 }
112
113 #define uatomic_cmpxchg(addr, old, _new) \
114 ((__typeof__(*(addr))) _uatomic_cmpxchg((addr), (unsigned long)(old),\
115 (unsigned long)(_new), \
116 sizeof(*(addr))))
117
118 /* xchg */
119
120 static inline __attribute__((always_inline))
121 unsigned long _uatomic_exchange(void *addr, unsigned long val, int len)
122 {
123 /* Note: the "xchg" instruction does not need a "lock" prefix. */
124 switch (len) {
125 case 1:
126 {
127 unsigned char result;
128 __asm__ __volatile__(
129 "xchgb %0, %1"
130 : "=q"(result), "+m"(*__hp(addr))
131 : "0" ((unsigned char)val)
132 : "memory");
133 return result;
134 }
135 case 2:
136 {
137 unsigned short result;
138 __asm__ __volatile__(
139 "xchgw %0, %1"
140 : "=r"(result), "+m"(*__hp(addr))
141 : "0" ((unsigned short)val)
142 : "memory");
143 return result;
144 }
145 case 4:
146 {
147 unsigned int result;
148 __asm__ __volatile__(
149 "xchgl %0, %1"
150 : "=r"(result), "+m"(*__hp(addr))
151 : "0" ((unsigned int)val)
152 : "memory");
153 return result;
154 }
155 #if (BITS_PER_LONG == 64)
156 case 8:
157 {
158 unsigned long result;
159 __asm__ __volatile__(
160 "xchgq %0, %1"
161 : "=r"(result), "+m"(*__hp(addr))
162 : "0" ((unsigned long)val)
163 : "memory");
164 return result;
165 }
166 #endif
167 }
168 /* generate an illegal instruction. Cannot catch this with linker tricks
169 * when optimizations are disabled. */
170 __asm__ __volatile__("ud2");
171 return 0;
172 }
173
174 #define uatomic_xchg(addr, v) \
175 ((__typeof__(*(addr))) _uatomic_exchange((addr), (unsigned long)(v), \
176 sizeof(*(addr))))
177
178 /* uatomic_add_return, uatomic_sub_return */
179
180 static inline __attribute__((always_inline))
181 unsigned long _uatomic_add_return(void *addr, unsigned long val,
182 int len)
183 {
184 switch (len) {
185 case 1:
186 {
187 unsigned char result = val;
188
189 __asm__ __volatile__(
190 "lock; xaddb %1, %0"
191 : "+m"(*__hp(addr)), "+q" (result)
192 :
193 : "memory");
194 return result + (unsigned char)val;
195 }
196 case 2:
197 {
198 unsigned short result = val;
199
200 __asm__ __volatile__(
201 "lock; xaddw %1, %0"
202 : "+m"(*__hp(addr)), "+r" (result)
203 :
204 : "memory");
205 return result + (unsigned short)val;
206 }
207 case 4:
208 {
209 unsigned int result = val;
210
211 __asm__ __volatile__(
212 "lock; xaddl %1, %0"
213 : "+m"(*__hp(addr)), "+r" (result)
214 :
215 : "memory");
216 return result + (unsigned int)val;
217 }
218 #if (BITS_PER_LONG == 64)
219 case 8:
220 {
221 unsigned long result = val;
222
223 __asm__ __volatile__(
224 "lock; xaddq %1, %0"
225 : "+m"(*__hp(addr)), "+r" (result)
226 :
227 : "memory");
228 return result + (unsigned long)val;
229 }
230 #endif
231 }
232 /* generate an illegal instruction. Cannot catch this with linker tricks
233 * when optimizations are disabled. */
234 __asm__ __volatile__("ud2");
235 return 0;
236 }
237
238 #define uatomic_add_return(addr, v) \
239 ((__typeof__(*(addr))) _uatomic_add_return((addr), \
240 (unsigned long)(v), \
241 sizeof(*(addr))))
242
243 #define uatomic_sub_return(addr, v) uatomic_add_return((addr), -(v))
244
245 /* uatomic_add, uatomic_sub */
246
247 static inline __attribute__((always_inline))
248 void _uatomic_add(void *addr, unsigned long val, int len)
249 {
250 switch (len) {
251 case 1:
252 {
253 __asm__ __volatile__(
254 "lock; addb %1, %0"
255 : "=m"(*__hp(addr))
256 : "iq" ((unsigned char)val)
257 : "memory");
258 return;
259 }
260 case 2:
261 {
262 __asm__ __volatile__(
263 "lock; addw %1, %0"
264 : "=m"(*__hp(addr))
265 : "ir" ((unsigned short)val)
266 : "memory");
267 return;
268 }
269 case 4:
270 {
271 __asm__ __volatile__(
272 "lock; addl %1, %0"
273 : "=m"(*__hp(addr))
274 : "ir" ((unsigned int)val)
275 : "memory");
276 return;
277 }
278 #if (BITS_PER_LONG == 64)
279 case 8:
280 {
281 __asm__ __volatile__(
282 "lock; addq %1, %0"
283 : "=m"(*__hp(addr))
284 : "er" ((unsigned long)val)
285 : "memory");
286 return;
287 }
288 #endif
289 }
290 /* generate an illegal instruction. Cannot catch this with linker tricks
291 * when optimizations are disabled. */
292 __asm__ __volatile__("ud2");
293 return;
294 }
295
296 #define uatomic_add(addr, v) \
297 (_uatomic_add((addr), (unsigned long)(v), sizeof(*(addr))))
298
299 #define uatomic_sub(addr, v) uatomic_add((addr), -(v))
300
301
302 /* uatomic_inc */
303
304 static inline __attribute__((always_inline))
305 void _uatomic_inc(void *addr, int len)
306 {
307 switch (len) {
308 case 1:
309 {
310 __asm__ __volatile__(
311 "lock; incb %0"
312 : "=m"(*__hp(addr))
313 :
314 : "memory");
315 return;
316 }
317 case 2:
318 {
319 __asm__ __volatile__(
320 "lock; incw %0"
321 : "=m"(*__hp(addr))
322 :
323 : "memory");
324 return;
325 }
326 case 4:
327 {
328 __asm__ __volatile__(
329 "lock; incl %0"
330 : "=m"(*__hp(addr))
331 :
332 : "memory");
333 return;
334 }
335 #if (BITS_PER_LONG == 64)
336 case 8:
337 {
338 __asm__ __volatile__(
339 "lock; incq %0"
340 : "=m"(*__hp(addr))
341 :
342 : "memory");
343 return;
344 }
345 #endif
346 }
347 /* generate an illegal instruction. Cannot catch this with linker tricks
348 * when optimizations are disabled. */
349 __asm__ __volatile__("ud2");
350 return;
351 }
352
353 #define uatomic_inc(addr) (_uatomic_inc((addr), sizeof(*(addr))))
354
355 /* uatomic_dec */
356
357 static inline __attribute__((always_inline))
358 void _uatomic_dec(void *addr, int len)
359 {
360 switch (len) {
361 case 1:
362 {
363 __asm__ __volatile__(
364 "lock; decb %0"
365 : "=m"(*__hp(addr))
366 :
367 : "memory");
368 return;
369 }
370 case 2:
371 {
372 __asm__ __volatile__(
373 "lock; decw %0"
374 : "=m"(*__hp(addr))
375 :
376 : "memory");
377 return;
378 }
379 case 4:
380 {
381 __asm__ __volatile__(
382 "lock; decl %0"
383 : "=m"(*__hp(addr))
384 :
385 : "memory");
386 return;
387 }
388 #if (BITS_PER_LONG == 64)
389 case 8:
390 {
391 __asm__ __volatile__(
392 "lock; decq %0"
393 : "=m"(*__hp(addr))
394 :
395 : "memory");
396 return;
397 }
398 #endif
399 }
400 /* generate an illegal instruction. Cannot catch this with linker tricks
401 * when optimizations are disabled. */
402 __asm__ __volatile__("ud2");
403 return;
404 }
405
406 #define uatomic_dec(addr) (_uatomic_dec((addr), sizeof(*(addr))))
407
408 #if (BITS_PER_LONG == 64)
409 #define URCU_CAS_AVAIL() 1
410 #define compat_uatomic_cmpxchg(ptr, old, _new) uatomic_cmpxchg(ptr, old, _new)
411 #else
412 extern int __urcu_cas_avail;
413 extern int __urcu_cas_init(void);
414 #define URCU_CAS_AVAIL() \
415 ((likely(__urcu_cas_avail > 0)) ? \
416 (1) : \
417 ((unlikely(__urcu_cas_avail < 0) ? \
418 (__urcu_cas_init()) : \
419 (0))))
420
421 extern unsigned long _compat_uatomic_cmpxchg(void *addr, unsigned long old,
422 unsigned long _new, int len);
423
424 #define compat_uatomic_cmpxchg(addr, old, _new) \
425 ((__typeof__(*(addr))) _uatomic_cmpxchg((addr), (unsigned long)(old),\
426 (unsigned long)(_new), \
427 sizeof(*(addr))))
428 #endif
429
430 #endif /* _URCU_ARCH_UATOMIC_X86_H */
This page took 0.036226 seconds and 3 git commands to generate.