616eee9be3ca60b4be62c8a10cadb0aff2d1ea8d
[urcu.git] / include / urcu / uatomic / x86.h
1 // SPDX-FileCopyrightText: 1991-1994 by Xerox Corporation. All rights reserved.
2 // SPDX-FileCopyrightText: 1996-1999 by Silicon Graphics. All rights reserved.
3 // SPDX-FileCopyrightText: 1999-2004 Hewlett-Packard Development Company, L.P.
4 // SPDX-FileCopyrightText: 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 //
6 // SPDX-License-Identifier: LicenseRef-Boehm-GC
7
8 #ifndef _URCU_ARCH_UATOMIC_X86_H
9 #define _URCU_ARCH_UATOMIC_X86_H
10
11 #include <stdlib.h> /* For abort(3). */
12
13 /*
14 * Code inspired from libuatomic_ops-1.2, inherited in part from the
15 * Boehm-Demers-Weiser conservative garbage collector.
16 */
17
18 #include <urcu/arch.h>
19 #include <urcu/config.h>
20 #include <urcu/compiler.h>
21 #include <urcu/system.h>
22
23 #define UATOMIC_HAS_ATOMIC_BYTE
24 #define UATOMIC_HAS_ATOMIC_SHORT
25
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29
30 /*
31 * Derived from AO_compare_and_swap() and AO_test_and_set_full().
32 */
33
34 /*
35 * The __hp() macro casts the void pointer @x to a pointer to a structure
36 * containing an array of char of the specified size. This allows passing the
37 * @addr arguments of the following inline functions as "m" and "+m" operands
38 * to the assembly. The @size parameter should be a constant to support
39 * compilers such as clang which do not support VLA. Create typedefs because
40 * C++ does not allow types be defined in casts.
41 */
42
43 typedef struct { char v[1]; } __hp_1;
44 typedef struct { char v[2]; } __hp_2;
45 typedef struct { char v[4]; } __hp_4;
46 typedef struct { char v[8]; } __hp_8;
47
48 #define __hp(size, x) ((__hp_##size *)(x))
49
50 #define _uatomic_set(addr, v) ((void) CMM_STORE_SHARED(*(addr), (v)))
51
52 /* cmpxchg */
53
54 static inline __attribute__((always_inline))
55 unsigned long __uatomic_cmpxchg(void *addr, unsigned long old,
56 unsigned long _new, int len)
57 {
58 switch (len) {
59 case 1:
60 {
61 unsigned char result = old;
62
63 __asm__ __volatile__(
64 "lock; cmpxchgb %2, %1"
65 : "+a"(result), "+m"(*__hp(1, addr))
66 : "q"((unsigned char)_new)
67 : "memory");
68 return result;
69 }
70 case 2:
71 {
72 unsigned short result = old;
73
74 __asm__ __volatile__(
75 "lock; cmpxchgw %2, %1"
76 : "+a"(result), "+m"(*__hp(2, addr))
77 : "r"((unsigned short)_new)
78 : "memory");
79 return result;
80 }
81 case 4:
82 {
83 unsigned int result = old;
84
85 __asm__ __volatile__(
86 "lock; cmpxchgl %2, %1"
87 : "+a"(result), "+m"(*__hp(4, addr))
88 : "r"((unsigned int)_new)
89 : "memory");
90 return result;
91 }
92 #if (CAA_BITS_PER_LONG == 64)
93 case 8:
94 {
95 unsigned long result = old;
96
97 __asm__ __volatile__(
98 "lock; cmpxchgq %2, %1"
99 : "+a"(result), "+m"(*__hp(8, addr))
100 : "r"((unsigned long)_new)
101 : "memory");
102 return result;
103 }
104 #endif
105 }
106 /*
107 * generate an illegal instruction. Cannot catch this with
108 * linker tricks when optimizations are disabled.
109 */
110 __asm__ __volatile__("ud2");
111 return 0;
112 }
113
114 #define _uatomic_cmpxchg(addr, old, _new) \
115 ((__typeof__(*(addr))) __uatomic_cmpxchg((addr), \
116 caa_cast_long_keep_sign(old), \
117 caa_cast_long_keep_sign(_new),\
118 sizeof(*(addr))))
119
120 /* xchg */
121
122 static inline __attribute__((always_inline))
123 unsigned long __uatomic_exchange(void *addr, unsigned long val, int len)
124 {
125 /* Note: the "xchg" instruction does not need a "lock" prefix. */
126 switch (len) {
127 case 1:
128 {
129 unsigned char result;
130 __asm__ __volatile__(
131 "xchgb %0, %1"
132 : "=q"(result), "+m"(*__hp(1, addr))
133 : "0" ((unsigned char)val)
134 : "memory");
135 return result;
136 }
137 case 2:
138 {
139 unsigned short result;
140 __asm__ __volatile__(
141 "xchgw %0, %1"
142 : "=r"(result), "+m"(*__hp(2, addr))
143 : "0" ((unsigned short)val)
144 : "memory");
145 return result;
146 }
147 case 4:
148 {
149 unsigned int result;
150 __asm__ __volatile__(
151 "xchgl %0, %1"
152 : "=r"(result), "+m"(*__hp(4, addr))
153 : "0" ((unsigned int)val)
154 : "memory");
155 return result;
156 }
157 #if (CAA_BITS_PER_LONG == 64)
158 case 8:
159 {
160 unsigned long result;
161 __asm__ __volatile__(
162 "xchgq %0, %1"
163 : "=r"(result), "+m"(*__hp(8, addr))
164 : "0" ((unsigned long)val)
165 : "memory");
166 return result;
167 }
168 #endif
169 }
170 /*
171 * generate an illegal instruction. Cannot catch this with
172 * linker tricks when optimizations are disabled.
173 */
174 __asm__ __volatile__("ud2");
175 return 0;
176 }
177
178 #define _uatomic_xchg(addr, v) \
179 ((__typeof__(*(addr))) __uatomic_exchange((addr), \
180 caa_cast_long_keep_sign(v), \
181 sizeof(*(addr))))
182
183 /* uatomic_add_return */
184
185 static inline __attribute__((always_inline))
186 unsigned long __uatomic_add_return(void *addr, unsigned long val,
187 int len)
188 {
189 switch (len) {
190 case 1:
191 {
192 unsigned char result = val;
193
194 __asm__ __volatile__(
195 "lock; xaddb %1, %0"
196 : "+m"(*__hp(1, addr)), "+q" (result)
197 :
198 : "memory");
199 return result + (unsigned char)val;
200 }
201 case 2:
202 {
203 unsigned short result = val;
204
205 __asm__ __volatile__(
206 "lock; xaddw %1, %0"
207 : "+m"(*__hp(2, addr)), "+r" (result)
208 :
209 : "memory");
210 return result + (unsigned short)val;
211 }
212 case 4:
213 {
214 unsigned int result = val;
215
216 __asm__ __volatile__(
217 "lock; xaddl %1, %0"
218 : "+m"(*__hp(4, addr)), "+r" (result)
219 :
220 : "memory");
221 return result + (unsigned int)val;
222 }
223 #if (CAA_BITS_PER_LONG == 64)
224 case 8:
225 {
226 unsigned long result = val;
227
228 __asm__ __volatile__(
229 "lock; xaddq %1, %0"
230 : "+m"(*__hp(8, addr)), "+r" (result)
231 :
232 : "memory");
233 return result + (unsigned long)val;
234 }
235 #endif
236 }
237 /*
238 * generate an illegal instruction. Cannot catch this with
239 * linker tricks when optimizations are disabled.
240 */
241 __asm__ __volatile__("ud2");
242 return 0;
243 }
244
245 #define _uatomic_add_return(addr, v) \
246 ((__typeof__(*(addr))) __uatomic_add_return((addr), \
247 caa_cast_long_keep_sign(v), \
248 sizeof(*(addr))))
249
250 /* uatomic_and */
251
252 static inline __attribute__((always_inline))
253 void __uatomic_and(void *addr, unsigned long val, int len)
254 {
255 switch (len) {
256 case 1:
257 {
258 __asm__ __volatile__(
259 "lock; andb %1, %0"
260 : "=m"(*__hp(1, addr))
261 : "iq" ((unsigned char)val)
262 : "memory");
263 return;
264 }
265 case 2:
266 {
267 __asm__ __volatile__(
268 "lock; andw %1, %0"
269 : "=m"(*__hp(2, addr))
270 : "ir" ((unsigned short)val)
271 : "memory");
272 return;
273 }
274 case 4:
275 {
276 __asm__ __volatile__(
277 "lock; andl %1, %0"
278 : "=m"(*__hp(4, addr))
279 : "ir" ((unsigned int)val)
280 : "memory");
281 return;
282 }
283 #if (CAA_BITS_PER_LONG == 64)
284 case 8:
285 {
286 __asm__ __volatile__(
287 "lock; andq %1, %0"
288 : "=m"(*__hp(8, addr))
289 : "er" ((unsigned long)val)
290 : "memory");
291 return;
292 }
293 #endif
294 }
295 /*
296 * generate an illegal instruction. Cannot catch this with
297 * linker tricks when optimizations are disabled.
298 */
299 __asm__ __volatile__("ud2");
300 return;
301 }
302
303 #define _uatomic_and(addr, v) \
304 (__uatomic_and((addr), caa_cast_long_keep_sign(v), sizeof(*(addr))))
305
306 /* uatomic_or */
307
308 static inline __attribute__((always_inline))
309 void __uatomic_or(void *addr, unsigned long val, int len)
310 {
311 switch (len) {
312 case 1:
313 {
314 __asm__ __volatile__(
315 "lock; orb %1, %0"
316 : "=m"(*__hp(1, addr))
317 : "iq" ((unsigned char)val)
318 : "memory");
319 return;
320 }
321 case 2:
322 {
323 __asm__ __volatile__(
324 "lock; orw %1, %0"
325 : "=m"(*__hp(2, addr))
326 : "ir" ((unsigned short)val)
327 : "memory");
328 return;
329 }
330 case 4:
331 {
332 __asm__ __volatile__(
333 "lock; orl %1, %0"
334 : "=m"(*__hp(4, addr))
335 : "ir" ((unsigned int)val)
336 : "memory");
337 return;
338 }
339 #if (CAA_BITS_PER_LONG == 64)
340 case 8:
341 {
342 __asm__ __volatile__(
343 "lock; orq %1, %0"
344 : "=m"(*__hp(8, addr))
345 : "er" ((unsigned long)val)
346 : "memory");
347 return;
348 }
349 #endif
350 }
351 /*
352 * generate an illegal instruction. Cannot catch this with
353 * linker tricks when optimizations are disabled.
354 */
355 __asm__ __volatile__("ud2");
356 return;
357 }
358
359 #define _uatomic_or(addr, v) \
360 (__uatomic_or((addr), caa_cast_long_keep_sign(v), sizeof(*(addr))))
361
362 /* uatomic_add */
363
364 static inline __attribute__((always_inline))
365 void __uatomic_add(void *addr, unsigned long val, int len)
366 {
367 switch (len) {
368 case 1:
369 {
370 __asm__ __volatile__(
371 "lock; addb %1, %0"
372 : "=m"(*__hp(1, addr))
373 : "iq" ((unsigned char)val)
374 : "memory");
375 return;
376 }
377 case 2:
378 {
379 __asm__ __volatile__(
380 "lock; addw %1, %0"
381 : "=m"(*__hp(2, addr))
382 : "ir" ((unsigned short)val)
383 : "memory");
384 return;
385 }
386 case 4:
387 {
388 __asm__ __volatile__(
389 "lock; addl %1, %0"
390 : "=m"(*__hp(4, addr))
391 : "ir" ((unsigned int)val)
392 : "memory");
393 return;
394 }
395 #if (CAA_BITS_PER_LONG == 64)
396 case 8:
397 {
398 __asm__ __volatile__(
399 "lock; addq %1, %0"
400 : "=m"(*__hp(8, addr))
401 : "er" ((unsigned long)val)
402 : "memory");
403 return;
404 }
405 #endif
406 }
407 /*
408 * generate an illegal instruction. Cannot catch this with
409 * linker tricks when optimizations are disabled.
410 */
411 __asm__ __volatile__("ud2");
412 return;
413 }
414
415 #define _uatomic_add(addr, v) \
416 (__uatomic_add((addr), caa_cast_long_keep_sign(v), sizeof(*(addr))))
417
418
419 /* uatomic_inc */
420
421 static inline __attribute__((always_inline))
422 void __uatomic_inc(void *addr, int len)
423 {
424 switch (len) {
425 case 1:
426 {
427 __asm__ __volatile__(
428 "lock; incb %0"
429 : "=m"(*__hp(1, addr))
430 :
431 : "memory");
432 return;
433 }
434 case 2:
435 {
436 __asm__ __volatile__(
437 "lock; incw %0"
438 : "=m"(*__hp(2, addr))
439 :
440 : "memory");
441 return;
442 }
443 case 4:
444 {
445 __asm__ __volatile__(
446 "lock; incl %0"
447 : "=m"(*__hp(4, addr))
448 :
449 : "memory");
450 return;
451 }
452 #if (CAA_BITS_PER_LONG == 64)
453 case 8:
454 {
455 __asm__ __volatile__(
456 "lock; incq %0"
457 : "=m"(*__hp(8, addr))
458 :
459 : "memory");
460 return;
461 }
462 #endif
463 }
464 /* generate an illegal instruction. Cannot catch this with linker tricks
465 * when optimizations are disabled. */
466 __asm__ __volatile__("ud2");
467 return;
468 }
469
470 #define _uatomic_inc(addr) (__uatomic_inc((addr), sizeof(*(addr))))
471
472 /* uatomic_dec */
473
474 static inline __attribute__((always_inline))
475 void __uatomic_dec(void *addr, int len)
476 {
477 switch (len) {
478 case 1:
479 {
480 __asm__ __volatile__(
481 "lock; decb %0"
482 : "=m"(*__hp(1, addr))
483 :
484 : "memory");
485 return;
486 }
487 case 2:
488 {
489 __asm__ __volatile__(
490 "lock; decw %0"
491 : "=m"(*__hp(2, addr))
492 :
493 : "memory");
494 return;
495 }
496 case 4:
497 {
498 __asm__ __volatile__(
499 "lock; decl %0"
500 : "=m"(*__hp(4, addr))
501 :
502 : "memory");
503 return;
504 }
505 #if (CAA_BITS_PER_LONG == 64)
506 case 8:
507 {
508 __asm__ __volatile__(
509 "lock; decq %0"
510 : "=m"(*__hp(8, addr))
511 :
512 : "memory");
513 return;
514 }
515 #endif
516 }
517 /*
518 * generate an illegal instruction. Cannot catch this with
519 * linker tricks when optimizations are disabled.
520 */
521 __asm__ __volatile__("ud2");
522 return;
523 }
524
525 #define _uatomic_dec(addr) (__uatomic_dec((addr), sizeof(*(addr))))
526
527 #ifdef URCU_ARCH_X86_NO_CAS
528
529 /* For backwards compat */
530 #define CONFIG_RCU_COMPAT_ARCH 1
531
532 extern int __rcu_cas_avail;
533 extern int __rcu_cas_init(void);
534
535 #define UATOMIC_COMPAT(insn) \
536 ((caa_likely(__rcu_cas_avail > 0)) \
537 ? (_uatomic_##insn) \
538 : ((caa_unlikely(__rcu_cas_avail < 0) \
539 ? ((__rcu_cas_init() > 0) \
540 ? (_uatomic_##insn) \
541 : (compat_uatomic_##insn)) \
542 : (compat_uatomic_##insn))))
543
544 /*
545 * We leave the return value so we don't break the ABI, but remove the
546 * return value from the API.
547 */
548 extern unsigned long _compat_uatomic_set(void *addr,
549 unsigned long _new, int len);
550 #define compat_uatomic_set(addr, _new) \
551 ((void) _compat_uatomic_set((addr), \
552 caa_cast_long_keep_sign(_new), \
553 sizeof(*(addr))))
554
555
556 extern unsigned long _compat_uatomic_xchg(void *addr,
557 unsigned long _new, int len);
558 #define compat_uatomic_xchg(addr, _new) \
559 ((__typeof__(*(addr))) _compat_uatomic_xchg((addr), \
560 caa_cast_long_keep_sign(_new), \
561 sizeof(*(addr))))
562
563 extern unsigned long _compat_uatomic_cmpxchg(void *addr, unsigned long old,
564 unsigned long _new, int len);
565 #define compat_uatomic_cmpxchg(addr, old, _new) \
566 ((__typeof__(*(addr))) _compat_uatomic_cmpxchg((addr), \
567 caa_cast_long_keep_sign(old), \
568 caa_cast_long_keep_sign(_new), \
569 sizeof(*(addr))))
570
571 extern void _compat_uatomic_and(void *addr, unsigned long _new, int len);
572 #define compat_uatomic_and(addr, v) \
573 (_compat_uatomic_and((addr), \
574 caa_cast_long_keep_sign(v), \
575 sizeof(*(addr))))
576
577 extern void _compat_uatomic_or(void *addr, unsigned long _new, int len);
578 #define compat_uatomic_or(addr, v) \
579 (_compat_uatomic_or((addr), \
580 caa_cast_long_keep_sign(v), \
581 sizeof(*(addr))))
582
583 extern unsigned long _compat_uatomic_add_return(void *addr,
584 unsigned long _new, int len);
585 #define compat_uatomic_add_return(addr, v) \
586 ((__typeof__(*(addr))) _compat_uatomic_add_return((addr), \
587 caa_cast_long_keep_sign(v), \
588 sizeof(*(addr))))
589
590 #define compat_uatomic_add(addr, v) \
591 ((void)compat_uatomic_add_return((addr), (v)))
592 #define compat_uatomic_inc(addr) \
593 (compat_uatomic_add((addr), 1))
594 #define compat_uatomic_dec(addr) \
595 (compat_uatomic_add((addr), -1))
596
597 #else
598 #define UATOMIC_COMPAT(insn) (_uatomic_##insn)
599 #endif
600
601 /* Read is atomic even in compat mode */
602 #define uatomic_set(addr, v) \
603 UATOMIC_COMPAT(set(addr, v))
604
605 #define uatomic_cmpxchg(addr, old, _new) \
606 UATOMIC_COMPAT(cmpxchg(addr, old, _new))
607 #define uatomic_xchg(addr, v) \
608 UATOMIC_COMPAT(xchg(addr, v))
609
610 #define uatomic_and(addr, v) \
611 UATOMIC_COMPAT(and(addr, v))
612 #define cmm_smp_mb__before_uatomic_and() cmm_barrier()
613 #define cmm_smp_mb__after_uatomic_and() cmm_barrier()
614
615 #define uatomic_or(addr, v) \
616 UATOMIC_COMPAT(or(addr, v))
617 #define cmm_smp_mb__before_uatomic_or() cmm_barrier()
618 #define cmm_smp_mb__after_uatomic_or() cmm_barrier()
619
620 #define uatomic_add_return(addr, v) \
621 UATOMIC_COMPAT(add_return(addr, v))
622
623 #define uatomic_add(addr, v) UATOMIC_COMPAT(add(addr, v))
624 #define cmm_smp_mb__before_uatomic_add() cmm_barrier()
625 #define cmm_smp_mb__after_uatomic_add() cmm_barrier()
626
627 #define uatomic_inc(addr) UATOMIC_COMPAT(inc(addr))
628 #define cmm_smp_mb__before_uatomic_inc() cmm_barrier()
629 #define cmm_smp_mb__after_uatomic_inc() cmm_barrier()
630
631 #define uatomic_dec(addr) UATOMIC_COMPAT(dec(addr))
632 #define cmm_smp_mb__before_uatomic_dec() cmm_barrier()
633 #define cmm_smp_mb__after_uatomic_dec() cmm_barrier()
634
635 static inline void _cmm_compat_c11_smp_mb__before_uatomic_read_mo(enum cmm_memorder mo)
636 {
637 /*
638 * A SMP barrier is not necessary for CMM_SEQ_CST because, only a
639 * previous store can be reordered with the load. However, emitting the
640 * memory barrier after the store is sufficient to prevent reordering
641 * between the two. This follows toolchains decision of emitting the
642 * memory fence on the stores instead of the loads.
643 *
644 * A compiler barrier is necessary because the underlying operation does
645 * not clobber the registers.
646 */
647 switch (mo) {
648 case CMM_RELAXED: /* Fall-through */
649 case CMM_ACQUIRE: /* Fall-through */
650 case CMM_CONSUME: /* Fall-through */
651 case CMM_SEQ_CST: /* Fall-through */
652 case CMM_SEQ_CST_FENCE:
653 cmm_barrier();
654 break;
655 case CMM_ACQ_REL: /* Fall-through */
656 case CMM_RELEASE: /* Fall-through */
657 default:
658 abort();
659 break;
660 }
661 }
662
663 static inline void _cmm_compat_c11_smp_mb__after_uatomic_read_mo(enum cmm_memorder mo)
664 {
665 /*
666 * A SMP barrier is not necessary for CMM_SEQ_CST because following
667 * loads and stores cannot be reordered with the load.
668 *
669 * A SMP barrier is however necessary for CMM_SEQ_CST_FENCE to respect
670 * the memory model, since the underlying operation does not have a lock
671 * prefix.
672 *
673 * A compiler barrier is necessary because the underlying operation does
674 * not clobber the registers.
675 */
676 switch (mo) {
677 case CMM_SEQ_CST_FENCE:
678 cmm_smp_mb();
679 break;
680 case CMM_RELAXED: /* Fall-through */
681 case CMM_ACQUIRE: /* Fall-through */
682 case CMM_CONSUME: /* Fall-through */
683 case CMM_SEQ_CST:
684 cmm_barrier();
685 break;
686 case CMM_ACQ_REL: /* Fall-through */
687 case CMM_RELEASE: /* Fall-through */
688 default:
689 abort();
690 break;
691 }
692 }
693
694 static inline void _cmm_compat_c11_smp_mb__before_uatomic_set_mo(enum cmm_memorder mo)
695 {
696 /*
697 * A SMP barrier is not necessary for CMM_SEQ_CST because the store can
698 * only be reodered with later loads
699 *
700 * A compiler barrier is necessary because the underlying operation does
701 * not clobber the registers.
702 */
703 switch (mo) {
704 case CMM_RELAXED: /* Fall-through */
705 case CMM_RELEASE: /* Fall-through */
706 case CMM_SEQ_CST: /* Fall-through */
707 case CMM_SEQ_CST_FENCE:
708 cmm_barrier();
709 break;
710 case CMM_ACQ_REL: /* Fall-through */
711 case CMM_ACQUIRE: /* Fall-through */
712 case CMM_CONSUME: /* Fall-through */
713 default:
714 abort();
715 break;
716 }
717 }
718
719 static inline void _cmm_compat_c11_smp_mb__after_uatomic_set_mo(enum cmm_memorder mo)
720 {
721 /*
722 * A SMP barrier is necessary for CMM_SEQ_CST because the store can be
723 * reorded with later loads. Since no memory barrier is being emitted
724 * before loads, one has to be emitted after the store. This follows
725 * toolchains decision of emitting the memory fence on the stores instead
726 * of the loads.
727 *
728 * A SMP barrier is necessary for CMM_SEQ_CST_FENCE to respect the
729 * memory model, since the underlying store does not have a lock prefix.
730 *
731 * A compiler barrier is necessary because the underlying operation does
732 * not clobber the registers.
733 */
734 switch (mo) {
735 case CMM_SEQ_CST: /* Fall-through */
736 case CMM_SEQ_CST_FENCE:
737 cmm_smp_mb();
738 break;
739 case CMM_RELAXED: /* Fall-through */
740 case CMM_RELEASE:
741 cmm_barrier();
742 break;
743 case CMM_ACQ_REL: /* Fall-through */
744 case CMM_ACQUIRE: /* Fall-through */
745 case CMM_CONSUME: /* Fall-through */
746 default:
747 abort();
748 break;
749 }
750 }
751
752 static inline void _cmm_compat_c11_smp_mb__before_uatomic_xchg_mo(enum cmm_memorder mo)
753 {
754 /* NOP. uatomic_xchg has implicit lock prefix. */
755 switch (mo) {
756 case CMM_RELAXED: /* Fall-through */
757 case CMM_ACQUIRE: /* Fall-through */
758 case CMM_CONSUME: /* Fall-through */
759 case CMM_RELEASE: /* Fall-through */
760 case CMM_ACQ_REL: /* Fall-through */
761 case CMM_SEQ_CST: /* Fall-through */
762 case CMM_SEQ_CST_FENCE:
763 break;
764 default:
765 abort();
766 }
767 }
768
769 static inline void _cmm_compat_c11_smp_mb__after_uatomic_xchg_mo(enum cmm_memorder mo)
770 {
771 /* NOP. uatomic_xchg has implicit lock prefix. */
772 switch (mo) {
773 case CMM_RELAXED: /* Fall-through */
774 case CMM_ACQUIRE: /* Fall-through */
775 case CMM_CONSUME: /* Fall-through */
776 case CMM_RELEASE: /* Fall-through */
777 case CMM_ACQ_REL: /* Fall-through */
778 case CMM_SEQ_CST: /* Fall-through */
779 case CMM_SEQ_CST_FENCE:
780 break;
781 default:
782 abort();
783 }
784 }
785
786 static inline void _cmm_compat_c11_smp_mb__before_uatomic_cmpxchg_mo(enum cmm_memorder mo)
787 {
788 /* NOP. uatomic_cmpxchg has implicit lock prefix. */
789 switch (mo) {
790 case CMM_RELAXED: /* Fall-through */
791 case CMM_ACQUIRE: /* Fall-through */
792 case CMM_CONSUME: /* Fall-through */
793 case CMM_RELEASE: /* Fall-through */
794 case CMM_ACQ_REL: /* Fall-through */
795 case CMM_SEQ_CST: /* Fall-through */
796 case CMM_SEQ_CST_FENCE:
797 break;
798 default:
799 abort();
800 }
801 }
802
803 static inline void _cmm_compat_c11_smp_mb__after_uatomic_cmpxchg_mo(enum cmm_memorder mo)
804 {
805 /* NOP. uatomic_cmpxchg has implicit lock prefix. */
806 switch (mo) {
807 case CMM_RELAXED: /* Fall-through */
808 case CMM_ACQUIRE: /* Fall-through */
809 case CMM_CONSUME: /* Fall-through */
810 case CMM_RELEASE: /* Fall-through */
811 case CMM_ACQ_REL: /* Fall-through */
812 case CMM_SEQ_CST: /* Fall-through */
813 case CMM_SEQ_CST_FENCE:
814 break;
815 default:
816 abort();
817 }
818 }
819
820 static inline void _cmm_compat_c11_smp_mb__before_uatomic_and_mo(enum cmm_memorder mo)
821 {
822 /* NOP. uatomic_and has explicit lock prefix. */
823 switch (mo) {
824 case CMM_RELAXED: /* Fall-through */
825 case CMM_ACQUIRE: /* Fall-through */
826 case CMM_CONSUME: /* Fall-through */
827 case CMM_RELEASE: /* Fall-through */
828 case CMM_ACQ_REL: /* Fall-through */
829 case CMM_SEQ_CST: /* Fall-through */
830 case CMM_SEQ_CST_FENCE:
831 break;
832 default:
833 abort();
834 }
835 }
836
837 static inline void _cmm_compat_c11_smp_mb__after_uatomic_and_mo(enum cmm_memorder mo)
838 {
839 /* NOP. uatomic_and has explicit lock prefix. */
840 switch (mo) {
841 case CMM_RELAXED: /* Fall-through */
842 case CMM_ACQUIRE: /* Fall-through */
843 case CMM_CONSUME: /* Fall-through */
844 case CMM_RELEASE: /* Fall-through */
845 case CMM_ACQ_REL: /* Fall-through */
846 case CMM_SEQ_CST: /* Fall-through */
847 case CMM_SEQ_CST_FENCE:
848 break;
849 default:
850 abort();
851 }
852 }
853
854 static inline void _cmm_compat_c11_smp_mb__before_uatomic_or_mo(enum cmm_memorder mo)
855 {
856 /* NOP. uatomic_or has explicit lock prefix. */
857 switch (mo) {
858 case CMM_RELAXED: /* Fall-through */
859 case CMM_ACQUIRE: /* Fall-through */
860 case CMM_CONSUME: /* Fall-through */
861 case CMM_RELEASE: /* Fall-through */
862 case CMM_ACQ_REL: /* Fall-through */
863 case CMM_SEQ_CST: /* Fall-through */
864 case CMM_SEQ_CST_FENCE:
865 break;
866 default:
867 abort();
868 }
869 }
870
871 static inline void _cmm_compat_c11_smp_mb__after_uatomic_or_mo(enum cmm_memorder mo)
872 {
873 /* NOP. uatomic_or has explicit lock prefix. */
874 switch (mo) {
875 case CMM_RELAXED: /* Fall-through */
876 case CMM_ACQUIRE: /* Fall-through */
877 case CMM_CONSUME: /* Fall-through */
878 case CMM_RELEASE: /* Fall-through */
879 case CMM_ACQ_REL: /* Fall-through */
880 case CMM_SEQ_CST: /* Fall-through */
881 case CMM_SEQ_CST_FENCE:
882 break;
883 default:
884 abort();
885 }
886 }
887
888 static inline void _cmm_compat_c11_smp_mb__before_uatomic_add_mo(enum cmm_memorder mo)
889 {
890 /* NOP. uatomic_add has explicit lock prefix. */
891 switch (mo) {
892 case CMM_RELAXED: /* Fall-through */
893 case CMM_ACQUIRE: /* Fall-through */
894 case CMM_CONSUME: /* Fall-through */
895 case CMM_RELEASE: /* Fall-through */
896 case CMM_ACQ_REL: /* Fall-through */
897 case CMM_SEQ_CST: /* Fall-through */
898 case CMM_SEQ_CST_FENCE:
899 break;
900 default:
901 abort();
902 }
903 }
904
905 static inline void _cmm_compat_c11_smp_mb__after_uatomic_add_mo(enum cmm_memorder mo)
906 {
907 /* NOP. uatomic_add has explicit lock prefix. */
908 switch (mo) {
909 case CMM_RELAXED: /* Fall-through */
910 case CMM_ACQUIRE: /* Fall-through */
911 case CMM_CONSUME: /* Fall-through */
912 case CMM_RELEASE: /* Fall-through */
913 case CMM_ACQ_REL: /* Fall-through */
914 case CMM_SEQ_CST: /* Fall-through */
915 case CMM_SEQ_CST_FENCE:
916 break;
917 default:
918 abort();
919 }
920 }
921
922 static inline void _cmm_compat_c11_smp_mb__before_uatomic_sub_mo(enum cmm_memorder mo)
923 {
924 /* NOP. uatomic_sub has explicit lock prefix. */
925 switch (mo) {
926 case CMM_RELAXED: /* Fall-through */
927 case CMM_ACQUIRE: /* Fall-through */
928 case CMM_CONSUME: /* Fall-through */
929 case CMM_RELEASE: /* Fall-through */
930 case CMM_ACQ_REL: /* Fall-through */
931 case CMM_SEQ_CST: /* Fall-through */
932 case CMM_SEQ_CST_FENCE:
933 break;
934 default:
935 abort();
936 }
937 }
938
939 static inline void _cmm_compat_c11_smp_mb__after_uatomic_sub_mo(enum cmm_memorder mo)
940 {
941 /* NOP. uatomic_sub has explicit lock prefix. */
942 switch (mo) {
943 case CMM_RELAXED: /* Fall-through */
944 case CMM_ACQUIRE: /* Fall-through */
945 case CMM_CONSUME: /* Fall-through */
946 case CMM_RELEASE: /* Fall-through */
947 case CMM_ACQ_REL: /* Fall-through */
948 case CMM_SEQ_CST: /* Fall-through */
949 case CMM_SEQ_CST_FENCE:
950 break;
951 default:
952 abort();
953 }
954 }
955
956 static inline void _cmm_compat_c11_smp_mb__before_uatomic_inc_mo(enum cmm_memorder mo)
957 {
958 /* NOP. uatomic_inc has explicit lock prefix. */
959 switch (mo) {
960 case CMM_RELAXED: /* Fall-through */
961 case CMM_ACQUIRE: /* Fall-through */
962 case CMM_CONSUME: /* Fall-through */
963 case CMM_RELEASE: /* Fall-through */
964 case CMM_ACQ_REL: /* Fall-through */
965 case CMM_SEQ_CST: /* Fall-through */
966 case CMM_SEQ_CST_FENCE:
967 break;
968 default:
969 abort();
970 }
971 }
972
973 static inline void _cmm_compat_c11_smp_mb__after_uatomic_inc_mo(enum cmm_memorder mo)
974 {
975 /* NOP. uatomic_inc has explicit lock prefix. */
976 switch (mo) {
977 case CMM_RELAXED: /* Fall-through */
978 case CMM_ACQUIRE: /* Fall-through */
979 case CMM_CONSUME: /* Fall-through */
980 case CMM_RELEASE: /* Fall-through */
981 case CMM_ACQ_REL: /* Fall-through */
982 case CMM_SEQ_CST: /* Fall-through */
983 case CMM_SEQ_CST_FENCE:
984 break;
985 default:
986 abort();
987 }
988 }
989
990 static inline void _cmm_compat_c11_smp_mb__before_uatomic_dec_mo(enum cmm_memorder mo)
991 {
992 /* NOP. uatomic_dec has explicit lock prefix. */
993 switch (mo) {
994 case CMM_RELAXED: /* Fall-through */
995 case CMM_ACQUIRE: /* Fall-through */
996 case CMM_CONSUME: /* Fall-through */
997 case CMM_RELEASE: /* Fall-through */
998 case CMM_ACQ_REL: /* Fall-through */
999 case CMM_SEQ_CST: /* Fall-through */
1000 case CMM_SEQ_CST_FENCE:
1001 break;
1002 default:
1003 abort();
1004 }
1005 }
1006
1007 static inline void _cmm_compat_c11_smp_mb__after_uatomic_dec_mo(enum cmm_memorder mo)
1008 {
1009 /* NOP. uatomic_dec has explicit lock prefix. */
1010 switch (mo) {
1011 case CMM_RELAXED: /* Fall-through */
1012 case CMM_ACQUIRE: /* Fall-through */
1013 case CMM_CONSUME: /* Fall-through */
1014 case CMM_RELEASE: /* Fall-through */
1015 case CMM_ACQ_REL: /* Fall-through */
1016 case CMM_SEQ_CST: /* Fall-through */
1017 case CMM_SEQ_CST_FENCE:
1018 break;
1019 default:
1020 abort();
1021 }
1022 }
1023
1024 static inline void _cmm_compat_c11_smp_mb__before_uatomic_add_return_mo(enum cmm_memorder mo)
1025 {
1026 /* NOP. uatomic_add_return has explicit lock prefix. */
1027 switch (mo) {
1028 case CMM_RELAXED: /* Fall-through */
1029 case CMM_ACQUIRE: /* Fall-through */
1030 case CMM_CONSUME: /* Fall-through */
1031 case CMM_RELEASE: /* Fall-through */
1032 case CMM_ACQ_REL: /* Fall-through */
1033 case CMM_SEQ_CST: /* Fall-through */
1034 case CMM_SEQ_CST_FENCE:
1035 break;
1036 default:
1037 abort();
1038 }
1039 }
1040
1041 static inline void _cmm_compat_c11_smp_mb__after_uatomic_add_return_mo(enum cmm_memorder mo)
1042 {
1043 /* NOP. uatomic_add_return has explicit lock prefix. */
1044 switch (mo) {
1045 case CMM_RELAXED: /* Fall-through */
1046 case CMM_ACQUIRE: /* Fall-through */
1047 case CMM_CONSUME: /* Fall-through */
1048 case CMM_RELEASE: /* Fall-through */
1049 case CMM_ACQ_REL: /* Fall-through */
1050 case CMM_SEQ_CST: /* Fall-through */
1051 case CMM_SEQ_CST_FENCE:
1052 break;
1053 default:
1054 abort();
1055 }
1056 }
1057
1058 static inline void _cmm_compat_c11_smp_mb__before_uatomic_sub_return_mo(enum cmm_memorder mo)
1059 {
1060 /* NOP. uatomic_sub_return has explicit lock prefix. */
1061 switch (mo) {
1062 case CMM_RELAXED: /* Fall-through */
1063 case CMM_ACQUIRE: /* Fall-through */
1064 case CMM_CONSUME: /* Fall-through */
1065 case CMM_RELEASE: /* Fall-through */
1066 case CMM_ACQ_REL: /* Fall-through */
1067 case CMM_SEQ_CST: /* Fall-through */
1068 case CMM_SEQ_CST_FENCE:
1069 break;
1070 default:
1071 abort();
1072 }
1073 }
1074
1075 static inline void _cmm_compat_c11_smp_mb__after_uatomic_sub_return_mo(enum cmm_memorder mo)
1076 {
1077 /* NOP. uatomic_sub_return has explicit lock prefix. */
1078 switch (mo) {
1079 case CMM_RELAXED: /* Fall-through */
1080 case CMM_ACQUIRE: /* Fall-through */
1081 case CMM_CONSUME: /* Fall-through */
1082 case CMM_RELEASE: /* Fall-through */
1083 case CMM_ACQ_REL: /* Fall-through */
1084 case CMM_SEQ_CST: /* Fall-through */
1085 case CMM_SEQ_CST_FENCE:
1086 break;
1087 default:
1088 abort();
1089 }
1090 }
1091
1092 #define _cmm_compat_c11_smp_mb__before_mo(operation, mo) \
1093 do { \
1094 _cmm_compat_c11_smp_mb__before_ ## operation ## _mo (mo); \
1095 } while (0)
1096
1097 #define _cmm_compat_c11_smp_mb__after_mo(operation, mo) \
1098 do { \
1099 _cmm_compat_c11_smp_mb__after_ ## operation ## _mo (mo); \
1100 } while (0)
1101
1102
1103 #ifdef __cplusplus
1104 }
1105 #endif
1106
1107 #include <urcu/uatomic/generic.h>
1108
1109 #endif /* _URCU_ARCH_UATOMIC_X86_H */
This page took 0.064088 seconds and 4 git commands to generate.