Static arch and uatomic headers
[urcu.git] / include / urcu / uatomic / x86.h
1 #ifndef _URCU_ARCH_UATOMIC_X86_H
2 #define _URCU_ARCH_UATOMIC_X86_H
3
4 /*
5 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
6 * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
7 * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
8 * Copyright (c) 2009 Mathieu Desnoyers
9 *
10 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
11 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
12 *
13 * Permission is hereby granted to use or copy this program
14 * for any purpose, provided the above notices are retained on all copies.
15 * Permission to modify the code and to distribute modified code is granted,
16 * provided the above notices are retained, and a notice that the code was
17 * modified is included with the above copyright notice.
18 *
19 * Code inspired from libuatomic_ops-1.2, inherited in part from the
20 * Boehm-Demers-Weiser conservative garbage collector.
21 */
22
23 #include <urcu/arch.h>
24 #include <urcu/config.h>
25 #include <urcu/compiler.h>
26 #include <urcu/system.h>
27
28 #define UATOMIC_HAS_ATOMIC_BYTE
29 #define UATOMIC_HAS_ATOMIC_SHORT
30
31 #ifdef __cplusplus
32 extern "C" {
33 #endif
34
35 /*
36 * Derived from AO_compare_and_swap() and AO_test_and_set_full().
37 */
38
39 struct __uatomic_dummy {
40 unsigned long v[10];
41 };
42 #define __hp(x) ((struct __uatomic_dummy *)(x))
43
44 #define _uatomic_set(addr, v) ((void) CMM_STORE_SHARED(*(addr), (v)))
45
46 /* cmpxchg */
47
48 static inline __attribute__((always_inline))
49 unsigned long __uatomic_cmpxchg(void *addr, unsigned long old,
50 unsigned long _new, int len)
51 {
52 switch (len) {
53 case 1:
54 {
55 unsigned char result = old;
56
57 __asm__ __volatile__(
58 "lock; cmpxchgb %2, %1"
59 : "+a"(result), "+m"(*__hp(addr))
60 : "q"((unsigned char)_new)
61 : "memory");
62 return result;
63 }
64 case 2:
65 {
66 unsigned short result = old;
67
68 __asm__ __volatile__(
69 "lock; cmpxchgw %2, %1"
70 : "+a"(result), "+m"(*__hp(addr))
71 : "r"((unsigned short)_new)
72 : "memory");
73 return result;
74 }
75 case 4:
76 {
77 unsigned int result = old;
78
79 __asm__ __volatile__(
80 "lock; cmpxchgl %2, %1"
81 : "+a"(result), "+m"(*__hp(addr))
82 : "r"((unsigned int)_new)
83 : "memory");
84 return result;
85 }
86 #if (CAA_BITS_PER_LONG == 64)
87 case 8:
88 {
89 unsigned long result = old;
90
91 __asm__ __volatile__(
92 "lock; cmpxchgq %2, %1"
93 : "+a"(result), "+m"(*__hp(addr))
94 : "r"((unsigned long)_new)
95 : "memory");
96 return result;
97 }
98 #endif
99 }
100 /*
101 * generate an illegal instruction. Cannot catch this with
102 * linker tricks when optimizations are disabled.
103 */
104 __asm__ __volatile__("ud2");
105 return 0;
106 }
107
108 #define _uatomic_cmpxchg(addr, old, _new) \
109 ((__typeof__(*(addr))) __uatomic_cmpxchg((addr), \
110 caa_cast_long_keep_sign(old), \
111 caa_cast_long_keep_sign(_new),\
112 sizeof(*(addr))))
113
114 /* xchg */
115
116 static inline __attribute__((always_inline))
117 unsigned long __uatomic_exchange(void *addr, unsigned long val, int len)
118 {
119 /* Note: the "xchg" instruction does not need a "lock" prefix. */
120 switch (len) {
121 case 1:
122 {
123 unsigned char result;
124 __asm__ __volatile__(
125 "xchgb %0, %1"
126 : "=q"(result), "+m"(*__hp(addr))
127 : "0" ((unsigned char)val)
128 : "memory");
129 return result;
130 }
131 case 2:
132 {
133 unsigned short result;
134 __asm__ __volatile__(
135 "xchgw %0, %1"
136 : "=r"(result), "+m"(*__hp(addr))
137 : "0" ((unsigned short)val)
138 : "memory");
139 return result;
140 }
141 case 4:
142 {
143 unsigned int result;
144 __asm__ __volatile__(
145 "xchgl %0, %1"
146 : "=r"(result), "+m"(*__hp(addr))
147 : "0" ((unsigned int)val)
148 : "memory");
149 return result;
150 }
151 #if (CAA_BITS_PER_LONG == 64)
152 case 8:
153 {
154 unsigned long result;
155 __asm__ __volatile__(
156 "xchgq %0, %1"
157 : "=r"(result), "+m"(*__hp(addr))
158 : "0" ((unsigned long)val)
159 : "memory");
160 return result;
161 }
162 #endif
163 }
164 /*
165 * generate an illegal instruction. Cannot catch this with
166 * linker tricks when optimizations are disabled.
167 */
168 __asm__ __volatile__("ud2");
169 return 0;
170 }
171
172 #define _uatomic_xchg(addr, v) \
173 ((__typeof__(*(addr))) __uatomic_exchange((addr), \
174 caa_cast_long_keep_sign(v), \
175 sizeof(*(addr))))
176
177 /* uatomic_add_return */
178
179 static inline __attribute__((always_inline))
180 unsigned long __uatomic_add_return(void *addr, unsigned long val,
181 int len)
182 {
183 switch (len) {
184 case 1:
185 {
186 unsigned char result = val;
187
188 __asm__ __volatile__(
189 "lock; xaddb %1, %0"
190 : "+m"(*__hp(addr)), "+q" (result)
191 :
192 : "memory");
193 return result + (unsigned char)val;
194 }
195 case 2:
196 {
197 unsigned short result = val;
198
199 __asm__ __volatile__(
200 "lock; xaddw %1, %0"
201 : "+m"(*__hp(addr)), "+r" (result)
202 :
203 : "memory");
204 return result + (unsigned short)val;
205 }
206 case 4:
207 {
208 unsigned int result = val;
209
210 __asm__ __volatile__(
211 "lock; xaddl %1, %0"
212 : "+m"(*__hp(addr)), "+r" (result)
213 :
214 : "memory");
215 return result + (unsigned int)val;
216 }
217 #if (CAA_BITS_PER_LONG == 64)
218 case 8:
219 {
220 unsigned long result = val;
221
222 __asm__ __volatile__(
223 "lock; xaddq %1, %0"
224 : "+m"(*__hp(addr)), "+r" (result)
225 :
226 : "memory");
227 return result + (unsigned long)val;
228 }
229 #endif
230 }
231 /*
232 * generate an illegal instruction. Cannot catch this with
233 * linker tricks when optimizations are disabled.
234 */
235 __asm__ __volatile__("ud2");
236 return 0;
237 }
238
239 #define _uatomic_add_return(addr, v) \
240 ((__typeof__(*(addr))) __uatomic_add_return((addr), \
241 caa_cast_long_keep_sign(v), \
242 sizeof(*(addr))))
243
244 /* uatomic_and */
245
246 static inline __attribute__((always_inline))
247 void __uatomic_and(void *addr, unsigned long val, int len)
248 {
249 switch (len) {
250 case 1:
251 {
252 __asm__ __volatile__(
253 "lock; andb %1, %0"
254 : "=m"(*__hp(addr))
255 : "iq" ((unsigned char)val)
256 : "memory");
257 return;
258 }
259 case 2:
260 {
261 __asm__ __volatile__(
262 "lock; andw %1, %0"
263 : "=m"(*__hp(addr))
264 : "ir" ((unsigned short)val)
265 : "memory");
266 return;
267 }
268 case 4:
269 {
270 __asm__ __volatile__(
271 "lock; andl %1, %0"
272 : "=m"(*__hp(addr))
273 : "ir" ((unsigned int)val)
274 : "memory");
275 return;
276 }
277 #if (CAA_BITS_PER_LONG == 64)
278 case 8:
279 {
280 __asm__ __volatile__(
281 "lock; andq %1, %0"
282 : "=m"(*__hp(addr))
283 : "er" ((unsigned long)val)
284 : "memory");
285 return;
286 }
287 #endif
288 }
289 /*
290 * generate an illegal instruction. Cannot catch this with
291 * linker tricks when optimizations are disabled.
292 */
293 __asm__ __volatile__("ud2");
294 return;
295 }
296
297 #define _uatomic_and(addr, v) \
298 (__uatomic_and((addr), caa_cast_long_keep_sign(v), sizeof(*(addr))))
299
300 /* uatomic_or */
301
302 static inline __attribute__((always_inline))
303 void __uatomic_or(void *addr, unsigned long val, int len)
304 {
305 switch (len) {
306 case 1:
307 {
308 __asm__ __volatile__(
309 "lock; orb %1, %0"
310 : "=m"(*__hp(addr))
311 : "iq" ((unsigned char)val)
312 : "memory");
313 return;
314 }
315 case 2:
316 {
317 __asm__ __volatile__(
318 "lock; orw %1, %0"
319 : "=m"(*__hp(addr))
320 : "ir" ((unsigned short)val)
321 : "memory");
322 return;
323 }
324 case 4:
325 {
326 __asm__ __volatile__(
327 "lock; orl %1, %0"
328 : "=m"(*__hp(addr))
329 : "ir" ((unsigned int)val)
330 : "memory");
331 return;
332 }
333 #if (CAA_BITS_PER_LONG == 64)
334 case 8:
335 {
336 __asm__ __volatile__(
337 "lock; orq %1, %0"
338 : "=m"(*__hp(addr))
339 : "er" ((unsigned long)val)
340 : "memory");
341 return;
342 }
343 #endif
344 }
345 /*
346 * generate an illegal instruction. Cannot catch this with
347 * linker tricks when optimizations are disabled.
348 */
349 __asm__ __volatile__("ud2");
350 return;
351 }
352
353 #define _uatomic_or(addr, v) \
354 (__uatomic_or((addr), caa_cast_long_keep_sign(v), sizeof(*(addr))))
355
356 /* uatomic_add */
357
358 static inline __attribute__((always_inline))
359 void __uatomic_add(void *addr, unsigned long val, int len)
360 {
361 switch (len) {
362 case 1:
363 {
364 __asm__ __volatile__(
365 "lock; addb %1, %0"
366 : "=m"(*__hp(addr))
367 : "iq" ((unsigned char)val)
368 : "memory");
369 return;
370 }
371 case 2:
372 {
373 __asm__ __volatile__(
374 "lock; addw %1, %0"
375 : "=m"(*__hp(addr))
376 : "ir" ((unsigned short)val)
377 : "memory");
378 return;
379 }
380 case 4:
381 {
382 __asm__ __volatile__(
383 "lock; addl %1, %0"
384 : "=m"(*__hp(addr))
385 : "ir" ((unsigned int)val)
386 : "memory");
387 return;
388 }
389 #if (CAA_BITS_PER_LONG == 64)
390 case 8:
391 {
392 __asm__ __volatile__(
393 "lock; addq %1, %0"
394 : "=m"(*__hp(addr))
395 : "er" ((unsigned long)val)
396 : "memory");
397 return;
398 }
399 #endif
400 }
401 /*
402 * generate an illegal instruction. Cannot catch this with
403 * linker tricks when optimizations are disabled.
404 */
405 __asm__ __volatile__("ud2");
406 return;
407 }
408
409 #define _uatomic_add(addr, v) \
410 (__uatomic_add((addr), caa_cast_long_keep_sign(v), sizeof(*(addr))))
411
412
413 /* uatomic_inc */
414
415 static inline __attribute__((always_inline))
416 void __uatomic_inc(void *addr, int len)
417 {
418 switch (len) {
419 case 1:
420 {
421 __asm__ __volatile__(
422 "lock; incb %0"
423 : "=m"(*__hp(addr))
424 :
425 : "memory");
426 return;
427 }
428 case 2:
429 {
430 __asm__ __volatile__(
431 "lock; incw %0"
432 : "=m"(*__hp(addr))
433 :
434 : "memory");
435 return;
436 }
437 case 4:
438 {
439 __asm__ __volatile__(
440 "lock; incl %0"
441 : "=m"(*__hp(addr))
442 :
443 : "memory");
444 return;
445 }
446 #if (CAA_BITS_PER_LONG == 64)
447 case 8:
448 {
449 __asm__ __volatile__(
450 "lock; incq %0"
451 : "=m"(*__hp(addr))
452 :
453 : "memory");
454 return;
455 }
456 #endif
457 }
458 /* generate an illegal instruction. Cannot catch this with linker tricks
459 * when optimizations are disabled. */
460 __asm__ __volatile__("ud2");
461 return;
462 }
463
464 #define _uatomic_inc(addr) (__uatomic_inc((addr), sizeof(*(addr))))
465
466 /* uatomic_dec */
467
468 static inline __attribute__((always_inline))
469 void __uatomic_dec(void *addr, int len)
470 {
471 switch (len) {
472 case 1:
473 {
474 __asm__ __volatile__(
475 "lock; decb %0"
476 : "=m"(*__hp(addr))
477 :
478 : "memory");
479 return;
480 }
481 case 2:
482 {
483 __asm__ __volatile__(
484 "lock; decw %0"
485 : "=m"(*__hp(addr))
486 :
487 : "memory");
488 return;
489 }
490 case 4:
491 {
492 __asm__ __volatile__(
493 "lock; decl %0"
494 : "=m"(*__hp(addr))
495 :
496 : "memory");
497 return;
498 }
499 #if (CAA_BITS_PER_LONG == 64)
500 case 8:
501 {
502 __asm__ __volatile__(
503 "lock; decq %0"
504 : "=m"(*__hp(addr))
505 :
506 : "memory");
507 return;
508 }
509 #endif
510 }
511 /*
512 * generate an illegal instruction. Cannot catch this with
513 * linker tricks when optimizations are disabled.
514 */
515 __asm__ __volatile__("ud2");
516 return;
517 }
518
519 #define _uatomic_dec(addr) (__uatomic_dec((addr), sizeof(*(addr))))
520
521 #if ((CAA_BITS_PER_LONG != 64) && defined(URCU_ARCH_I386))
522
523 /* For backwards compat */
524 #define CONFIG_RCU_COMPAT_ARCH 1
525
526 extern int __rcu_cas_avail;
527 extern int __rcu_cas_init(void);
528
529 #define UATOMIC_COMPAT(insn) \
530 ((caa_likely(__rcu_cas_avail > 0)) \
531 ? (_uatomic_##insn) \
532 : ((caa_unlikely(__rcu_cas_avail < 0) \
533 ? ((__rcu_cas_init() > 0) \
534 ? (_uatomic_##insn) \
535 : (compat_uatomic_##insn)) \
536 : (compat_uatomic_##insn))))
537
538 /*
539 * We leave the return value so we don't break the ABI, but remove the
540 * return value from the API.
541 */
542 extern unsigned long _compat_uatomic_set(void *addr,
543 unsigned long _new, int len);
544 #define compat_uatomic_set(addr, _new) \
545 ((void) _compat_uatomic_set((addr), \
546 caa_cast_long_keep_sign(_new), \
547 sizeof(*(addr))))
548
549
550 extern unsigned long _compat_uatomic_xchg(void *addr,
551 unsigned long _new, int len);
552 #define compat_uatomic_xchg(addr, _new) \
553 ((__typeof__(*(addr))) _compat_uatomic_xchg((addr), \
554 caa_cast_long_keep_sign(_new), \
555 sizeof(*(addr))))
556
557 extern unsigned long _compat_uatomic_cmpxchg(void *addr, unsigned long old,
558 unsigned long _new, int len);
559 #define compat_uatomic_cmpxchg(addr, old, _new) \
560 ((__typeof__(*(addr))) _compat_uatomic_cmpxchg((addr), \
561 caa_cast_long_keep_sign(old), \
562 caa_cast_long_keep_sign(_new), \
563 sizeof(*(addr))))
564
565 extern void _compat_uatomic_and(void *addr, unsigned long _new, int len);
566 #define compat_uatomic_and(addr, v) \
567 (_compat_uatomic_and((addr), \
568 caa_cast_long_keep_sign(v), \
569 sizeof(*(addr))))
570
571 extern void _compat_uatomic_or(void *addr, unsigned long _new, int len);
572 #define compat_uatomic_or(addr, v) \
573 (_compat_uatomic_or((addr), \
574 caa_cast_long_keep_sign(v), \
575 sizeof(*(addr))))
576
577 extern unsigned long _compat_uatomic_add_return(void *addr,
578 unsigned long _new, int len);
579 #define compat_uatomic_add_return(addr, v) \
580 ((__typeof__(*(addr))) _compat_uatomic_add_return((addr), \
581 caa_cast_long_keep_sign(v), \
582 sizeof(*(addr))))
583
584 #define compat_uatomic_add(addr, v) \
585 ((void)compat_uatomic_add_return((addr), (v)))
586 #define compat_uatomic_inc(addr) \
587 (compat_uatomic_add((addr), 1))
588 #define compat_uatomic_dec(addr) \
589 (compat_uatomic_add((addr), -1))
590
591 #else
592 #define UATOMIC_COMPAT(insn) (_uatomic_##insn)
593 #endif
594
595 /* Read is atomic even in compat mode */
596 #define uatomic_set(addr, v) \
597 UATOMIC_COMPAT(set(addr, v))
598
599 #define uatomic_cmpxchg(addr, old, _new) \
600 UATOMIC_COMPAT(cmpxchg(addr, old, _new))
601 #define uatomic_xchg(addr, v) \
602 UATOMIC_COMPAT(xchg(addr, v))
603
604 #define uatomic_and(addr, v) \
605 UATOMIC_COMPAT(and(addr, v))
606 #define cmm_smp_mb__before_uatomic_and() cmm_barrier()
607 #define cmm_smp_mb__after_uatomic_and() cmm_barrier()
608
609 #define uatomic_or(addr, v) \
610 UATOMIC_COMPAT(or(addr, v))
611 #define cmm_smp_mb__before_uatomic_or() cmm_barrier()
612 #define cmm_smp_mb__after_uatomic_or() cmm_barrier()
613
614 #define uatomic_add_return(addr, v) \
615 UATOMIC_COMPAT(add_return(addr, v))
616
617 #define uatomic_add(addr, v) UATOMIC_COMPAT(add(addr, v))
618 #define cmm_smp_mb__before_uatomic_add() cmm_barrier()
619 #define cmm_smp_mb__after_uatomic_add() cmm_barrier()
620
621 #define uatomic_inc(addr) UATOMIC_COMPAT(inc(addr))
622 #define cmm_smp_mb__before_uatomic_inc() cmm_barrier()
623 #define cmm_smp_mb__after_uatomic_inc() cmm_barrier()
624
625 #define uatomic_dec(addr) UATOMIC_COMPAT(dec(addr))
626 #define cmm_smp_mb__before_uatomic_dec() cmm_barrier()
627 #define cmm_smp_mb__after_uatomic_dec() cmm_barrier()
628
629 #ifdef __cplusplus
630 }
631 #endif
632
633 #include <urcu/uatomic/generic.h>
634
635 #endif /* _URCU_ARCH_UATOMIC_X86_H */
This page took 0.047084 seconds and 4 git commands to generate.