616eee9be3ca60b4be62c8a10cadb0aff2d1ea8d
1 // SPDX-FileCopyrightText: 1991-1994 by Xerox Corporation. All rights reserved.
2 // SPDX-FileCopyrightText: 1996-1999 by Silicon Graphics. All rights reserved.
3 // SPDX-FileCopyrightText: 1999-2004 Hewlett-Packard Development Company, L.P.
4 // SPDX-FileCopyrightText: 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6 // SPDX-License-Identifier: LicenseRef-Boehm-GC
8 #ifndef _URCU_ARCH_UATOMIC_X86_H
9 #define _URCU_ARCH_UATOMIC_X86_H
11 #include <stdlib.h> /* For abort(3). */
14 * Code inspired from libuatomic_ops-1.2, inherited in part from the
15 * Boehm-Demers-Weiser conservative garbage collector.
18 #include <urcu/arch.h>
19 #include <urcu/config.h>
20 #include <urcu/compiler.h>
21 #include <urcu/system.h>
23 #define UATOMIC_HAS_ATOMIC_BYTE
24 #define UATOMIC_HAS_ATOMIC_SHORT
31 * Derived from AO_compare_and_swap() and AO_test_and_set_full().
35 * The __hp() macro casts the void pointer @x to a pointer to a structure
36 * containing an array of char of the specified size. This allows passing the
37 * @addr arguments of the following inline functions as "m" and "+m" operands
38 * to the assembly. The @size parameter should be a constant to support
39 * compilers such as clang which do not support VLA. Create typedefs because
40 * C++ does not allow types be defined in casts.
43 typedef struct { char v
[1]; } __hp_1
;
44 typedef struct { char v
[2]; } __hp_2
;
45 typedef struct { char v
[4]; } __hp_4
;
46 typedef struct { char v
[8]; } __hp_8
;
48 #define __hp(size, x) ((__hp_##size *)(x))
50 #define _uatomic_set(addr, v) ((void) CMM_STORE_SHARED(*(addr), (v)))
54 static inline __attribute__((always_inline
))
55 unsigned long __uatomic_cmpxchg(void *addr
, unsigned long old
,
56 unsigned long _new
, int len
)
61 unsigned char result
= old
;
64 "lock; cmpxchgb %2, %1"
65 : "+a"(result
), "+m"(*__hp(1, addr
))
66 : "q"((unsigned char)_new
)
72 unsigned short result
= old
;
75 "lock; cmpxchgw %2, %1"
76 : "+a"(result
), "+m"(*__hp(2, addr
))
77 : "r"((unsigned short)_new
)
83 unsigned int result
= old
;
86 "lock; cmpxchgl %2, %1"
87 : "+a"(result
), "+m"(*__hp(4, addr
))
88 : "r"((unsigned int)_new
)
92 #if (CAA_BITS_PER_LONG == 64)
95 unsigned long result
= old
;
98 "lock; cmpxchgq %2, %1"
99 : "+a"(result
), "+m"(*__hp(8, addr
))
100 : "r"((unsigned long)_new
)
107 * generate an illegal instruction. Cannot catch this with
108 * linker tricks when optimizations are disabled.
110 __asm__
__volatile__("ud2");
114 #define _uatomic_cmpxchg(addr, old, _new) \
115 ((__typeof__(*(addr))) __uatomic_cmpxchg((addr), \
116 caa_cast_long_keep_sign(old), \
117 caa_cast_long_keep_sign(_new),\
122 static inline __attribute__((always_inline
))
123 unsigned long __uatomic_exchange(void *addr
, unsigned long val
, int len
)
125 /* Note: the "xchg" instruction does not need a "lock" prefix. */
129 unsigned char result
;
130 __asm__
__volatile__(
132 : "=q"(result
), "+m"(*__hp(1, addr
))
133 : "0" ((unsigned char)val
)
139 unsigned short result
;
140 __asm__
__volatile__(
142 : "=r"(result
), "+m"(*__hp(2, addr
))
143 : "0" ((unsigned short)val
)
150 __asm__
__volatile__(
152 : "=r"(result
), "+m"(*__hp(4, addr
))
153 : "0" ((unsigned int)val
)
157 #if (CAA_BITS_PER_LONG == 64)
160 unsigned long result
;
161 __asm__
__volatile__(
163 : "=r"(result
), "+m"(*__hp(8, addr
))
164 : "0" ((unsigned long)val
)
171 * generate an illegal instruction. Cannot catch this with
172 * linker tricks when optimizations are disabled.
174 __asm__
__volatile__("ud2");
178 #define _uatomic_xchg(addr, v) \
179 ((__typeof__(*(addr))) __uatomic_exchange((addr), \
180 caa_cast_long_keep_sign(v), \
183 /* uatomic_add_return */
185 static inline __attribute__((always_inline
))
186 unsigned long __uatomic_add_return(void *addr
, unsigned long val
,
192 unsigned char result
= val
;
194 __asm__
__volatile__(
196 : "+m"(*__hp(1, addr
)), "+q" (result
)
199 return result
+ (unsigned char)val
;
203 unsigned short result
= val
;
205 __asm__
__volatile__(
207 : "+m"(*__hp(2, addr
)), "+r" (result
)
210 return result
+ (unsigned short)val
;
214 unsigned int result
= val
;
216 __asm__
__volatile__(
218 : "+m"(*__hp(4, addr
)), "+r" (result
)
221 return result
+ (unsigned int)val
;
223 #if (CAA_BITS_PER_LONG == 64)
226 unsigned long result
= val
;
228 __asm__
__volatile__(
230 : "+m"(*__hp(8, addr
)), "+r" (result
)
233 return result
+ (unsigned long)val
;
238 * generate an illegal instruction. Cannot catch this with
239 * linker tricks when optimizations are disabled.
241 __asm__
__volatile__("ud2");
245 #define _uatomic_add_return(addr, v) \
246 ((__typeof__(*(addr))) __uatomic_add_return((addr), \
247 caa_cast_long_keep_sign(v), \
252 static inline __attribute__((always_inline
))
253 void __uatomic_and(void *addr
, unsigned long val
, int len
)
258 __asm__
__volatile__(
260 : "=m"(*__hp(1, addr
))
261 : "iq" ((unsigned char)val
)
267 __asm__
__volatile__(
269 : "=m"(*__hp(2, addr
))
270 : "ir" ((unsigned short)val
)
276 __asm__
__volatile__(
278 : "=m"(*__hp(4, addr
))
279 : "ir" ((unsigned int)val
)
283 #if (CAA_BITS_PER_LONG == 64)
286 __asm__
__volatile__(
288 : "=m"(*__hp(8, addr
))
289 : "er" ((unsigned long)val
)
296 * generate an illegal instruction. Cannot catch this with
297 * linker tricks when optimizations are disabled.
299 __asm__
__volatile__("ud2");
303 #define _uatomic_and(addr, v) \
304 (__uatomic_and((addr), caa_cast_long_keep_sign(v), sizeof(*(addr))))
308 static inline __attribute__((always_inline
))
309 void __uatomic_or(void *addr
, unsigned long val
, int len
)
314 __asm__
__volatile__(
316 : "=m"(*__hp(1, addr
))
317 : "iq" ((unsigned char)val
)
323 __asm__
__volatile__(
325 : "=m"(*__hp(2, addr
))
326 : "ir" ((unsigned short)val
)
332 __asm__
__volatile__(
334 : "=m"(*__hp(4, addr
))
335 : "ir" ((unsigned int)val
)
339 #if (CAA_BITS_PER_LONG == 64)
342 __asm__
__volatile__(
344 : "=m"(*__hp(8, addr
))
345 : "er" ((unsigned long)val
)
352 * generate an illegal instruction. Cannot catch this with
353 * linker tricks when optimizations are disabled.
355 __asm__
__volatile__("ud2");
359 #define _uatomic_or(addr, v) \
360 (__uatomic_or((addr), caa_cast_long_keep_sign(v), sizeof(*(addr))))
364 static inline __attribute__((always_inline
))
365 void __uatomic_add(void *addr
, unsigned long val
, int len
)
370 __asm__
__volatile__(
372 : "=m"(*__hp(1, addr
))
373 : "iq" ((unsigned char)val
)
379 __asm__
__volatile__(
381 : "=m"(*__hp(2, addr
))
382 : "ir" ((unsigned short)val
)
388 __asm__
__volatile__(
390 : "=m"(*__hp(4, addr
))
391 : "ir" ((unsigned int)val
)
395 #if (CAA_BITS_PER_LONG == 64)
398 __asm__
__volatile__(
400 : "=m"(*__hp(8, addr
))
401 : "er" ((unsigned long)val
)
408 * generate an illegal instruction. Cannot catch this with
409 * linker tricks when optimizations are disabled.
411 __asm__
__volatile__("ud2");
415 #define _uatomic_add(addr, v) \
416 (__uatomic_add((addr), caa_cast_long_keep_sign(v), sizeof(*(addr))))
421 static inline __attribute__((always_inline
))
422 void __uatomic_inc(void *addr
, int len
)
427 __asm__
__volatile__(
429 : "=m"(*__hp(1, addr
))
436 __asm__
__volatile__(
438 : "=m"(*__hp(2, addr
))
445 __asm__
__volatile__(
447 : "=m"(*__hp(4, addr
))
452 #if (CAA_BITS_PER_LONG == 64)
455 __asm__
__volatile__(
457 : "=m"(*__hp(8, addr
))
464 /* generate an illegal instruction. Cannot catch this with linker tricks
465 * when optimizations are disabled. */
466 __asm__
__volatile__("ud2");
470 #define _uatomic_inc(addr) (__uatomic_inc((addr), sizeof(*(addr))))
474 static inline __attribute__((always_inline
))
475 void __uatomic_dec(void *addr
, int len
)
480 __asm__
__volatile__(
482 : "=m"(*__hp(1, addr
))
489 __asm__
__volatile__(
491 : "=m"(*__hp(2, addr
))
498 __asm__
__volatile__(
500 : "=m"(*__hp(4, addr
))
505 #if (CAA_BITS_PER_LONG == 64)
508 __asm__
__volatile__(
510 : "=m"(*__hp(8, addr
))
518 * generate an illegal instruction. Cannot catch this with
519 * linker tricks when optimizations are disabled.
521 __asm__
__volatile__("ud2");
525 #define _uatomic_dec(addr) (__uatomic_dec((addr), sizeof(*(addr))))
527 #ifdef URCU_ARCH_X86_NO_CAS
529 /* For backwards compat */
530 #define CONFIG_RCU_COMPAT_ARCH 1
532 extern int __rcu_cas_avail
;
533 extern int __rcu_cas_init(void);
535 #define UATOMIC_COMPAT(insn) \
536 ((caa_likely(__rcu_cas_avail > 0)) \
537 ? (_uatomic_##insn) \
538 : ((caa_unlikely(__rcu_cas_avail < 0) \
539 ? ((__rcu_cas_init() > 0) \
540 ? (_uatomic_##insn) \
541 : (compat_uatomic_##insn)) \
542 : (compat_uatomic_##insn))))
545 * We leave the return value so we don't break the ABI, but remove the
546 * return value from the API.
548 extern unsigned long _compat_uatomic_set(void *addr
,
549 unsigned long _new
, int len
);
550 #define compat_uatomic_set(addr, _new) \
551 ((void) _compat_uatomic_set((addr), \
552 caa_cast_long_keep_sign(_new), \
556 extern unsigned long _compat_uatomic_xchg(void *addr
,
557 unsigned long _new
, int len
);
558 #define compat_uatomic_xchg(addr, _new) \
559 ((__typeof__(*(addr))) _compat_uatomic_xchg((addr), \
560 caa_cast_long_keep_sign(_new), \
563 extern unsigned long _compat_uatomic_cmpxchg(void *addr
, unsigned long old
,
564 unsigned long _new
, int len
);
565 #define compat_uatomic_cmpxchg(addr, old, _new) \
566 ((__typeof__(*(addr))) _compat_uatomic_cmpxchg((addr), \
567 caa_cast_long_keep_sign(old), \
568 caa_cast_long_keep_sign(_new), \
571 extern void _compat_uatomic_and(void *addr
, unsigned long _new
, int len
);
572 #define compat_uatomic_and(addr, v) \
573 (_compat_uatomic_and((addr), \
574 caa_cast_long_keep_sign(v), \
577 extern void _compat_uatomic_or(void *addr
, unsigned long _new
, int len
);
578 #define compat_uatomic_or(addr, v) \
579 (_compat_uatomic_or((addr), \
580 caa_cast_long_keep_sign(v), \
583 extern unsigned long _compat_uatomic_add_return(void *addr
,
584 unsigned long _new
, int len
);
585 #define compat_uatomic_add_return(addr, v) \
586 ((__typeof__(*(addr))) _compat_uatomic_add_return((addr), \
587 caa_cast_long_keep_sign(v), \
590 #define compat_uatomic_add(addr, v) \
591 ((void)compat_uatomic_add_return((addr), (v)))
592 #define compat_uatomic_inc(addr) \
593 (compat_uatomic_add((addr), 1))
594 #define compat_uatomic_dec(addr) \
595 (compat_uatomic_add((addr), -1))
598 #define UATOMIC_COMPAT(insn) (_uatomic_##insn)
601 /* Read is atomic even in compat mode */
602 #define uatomic_set(addr, v) \
603 UATOMIC_COMPAT(set(addr, v))
605 #define uatomic_cmpxchg(addr, old, _new) \
606 UATOMIC_COMPAT(cmpxchg(addr, old, _new))
607 #define uatomic_xchg(addr, v) \
608 UATOMIC_COMPAT(xchg(addr, v))
610 #define uatomic_and(addr, v) \
611 UATOMIC_COMPAT(and(addr, v))
612 #define cmm_smp_mb__before_uatomic_and() cmm_barrier()
613 #define cmm_smp_mb__after_uatomic_and() cmm_barrier()
615 #define uatomic_or(addr, v) \
616 UATOMIC_COMPAT(or(addr, v))
617 #define cmm_smp_mb__before_uatomic_or() cmm_barrier()
618 #define cmm_smp_mb__after_uatomic_or() cmm_barrier()
620 #define uatomic_add_return(addr, v) \
621 UATOMIC_COMPAT(add_return(addr, v))
623 #define uatomic_add(addr, v) UATOMIC_COMPAT(add(addr, v))
624 #define cmm_smp_mb__before_uatomic_add() cmm_barrier()
625 #define cmm_smp_mb__after_uatomic_add() cmm_barrier()
627 #define uatomic_inc(addr) UATOMIC_COMPAT(inc(addr))
628 #define cmm_smp_mb__before_uatomic_inc() cmm_barrier()
629 #define cmm_smp_mb__after_uatomic_inc() cmm_barrier()
631 #define uatomic_dec(addr) UATOMIC_COMPAT(dec(addr))
632 #define cmm_smp_mb__before_uatomic_dec() cmm_barrier()
633 #define cmm_smp_mb__after_uatomic_dec() cmm_barrier()
635 static inline void _cmm_compat_c11_smp_mb__before_uatomic_read_mo(enum cmm_memorder mo
)
638 * A SMP barrier is not necessary for CMM_SEQ_CST because, only a
639 * previous store can be reordered with the load. However, emitting the
640 * memory barrier after the store is sufficient to prevent reordering
641 * between the two. This follows toolchains decision of emitting the
642 * memory fence on the stores instead of the loads.
644 * A compiler barrier is necessary because the underlying operation does
645 * not clobber the registers.
648 case CMM_RELAXED
: /* Fall-through */
649 case CMM_ACQUIRE
: /* Fall-through */
650 case CMM_CONSUME
: /* Fall-through */
651 case CMM_SEQ_CST
: /* Fall-through */
652 case CMM_SEQ_CST_FENCE
:
655 case CMM_ACQ_REL
: /* Fall-through */
656 case CMM_RELEASE
: /* Fall-through */
663 static inline void _cmm_compat_c11_smp_mb__after_uatomic_read_mo(enum cmm_memorder mo
)
666 * A SMP barrier is not necessary for CMM_SEQ_CST because following
667 * loads and stores cannot be reordered with the load.
669 * A SMP barrier is however necessary for CMM_SEQ_CST_FENCE to respect
670 * the memory model, since the underlying operation does not have a lock
673 * A compiler barrier is necessary because the underlying operation does
674 * not clobber the registers.
677 case CMM_SEQ_CST_FENCE
:
680 case CMM_RELAXED
: /* Fall-through */
681 case CMM_ACQUIRE
: /* Fall-through */
682 case CMM_CONSUME
: /* Fall-through */
686 case CMM_ACQ_REL
: /* Fall-through */
687 case CMM_RELEASE
: /* Fall-through */
694 static inline void _cmm_compat_c11_smp_mb__before_uatomic_set_mo(enum cmm_memorder mo
)
697 * A SMP barrier is not necessary for CMM_SEQ_CST because the store can
698 * only be reodered with later loads
700 * A compiler barrier is necessary because the underlying operation does
701 * not clobber the registers.
704 case CMM_RELAXED
: /* Fall-through */
705 case CMM_RELEASE
: /* Fall-through */
706 case CMM_SEQ_CST
: /* Fall-through */
707 case CMM_SEQ_CST_FENCE
:
710 case CMM_ACQ_REL
: /* Fall-through */
711 case CMM_ACQUIRE
: /* Fall-through */
712 case CMM_CONSUME
: /* Fall-through */
719 static inline void _cmm_compat_c11_smp_mb__after_uatomic_set_mo(enum cmm_memorder mo
)
722 * A SMP barrier is necessary for CMM_SEQ_CST because the store can be
723 * reorded with later loads. Since no memory barrier is being emitted
724 * before loads, one has to be emitted after the store. This follows
725 * toolchains decision of emitting the memory fence on the stores instead
728 * A SMP barrier is necessary for CMM_SEQ_CST_FENCE to respect the
729 * memory model, since the underlying store does not have a lock prefix.
731 * A compiler barrier is necessary because the underlying operation does
732 * not clobber the registers.
735 case CMM_SEQ_CST
: /* Fall-through */
736 case CMM_SEQ_CST_FENCE
:
739 case CMM_RELAXED
: /* Fall-through */
743 case CMM_ACQ_REL
: /* Fall-through */
744 case CMM_ACQUIRE
: /* Fall-through */
745 case CMM_CONSUME
: /* Fall-through */
752 static inline void _cmm_compat_c11_smp_mb__before_uatomic_xchg_mo(enum cmm_memorder mo
)
754 /* NOP. uatomic_xchg has implicit lock prefix. */
756 case CMM_RELAXED
: /* Fall-through */
757 case CMM_ACQUIRE
: /* Fall-through */
758 case CMM_CONSUME
: /* Fall-through */
759 case CMM_RELEASE
: /* Fall-through */
760 case CMM_ACQ_REL
: /* Fall-through */
761 case CMM_SEQ_CST
: /* Fall-through */
762 case CMM_SEQ_CST_FENCE
:
769 static inline void _cmm_compat_c11_smp_mb__after_uatomic_xchg_mo(enum cmm_memorder mo
)
771 /* NOP. uatomic_xchg has implicit lock prefix. */
773 case CMM_RELAXED
: /* Fall-through */
774 case CMM_ACQUIRE
: /* Fall-through */
775 case CMM_CONSUME
: /* Fall-through */
776 case CMM_RELEASE
: /* Fall-through */
777 case CMM_ACQ_REL
: /* Fall-through */
778 case CMM_SEQ_CST
: /* Fall-through */
779 case CMM_SEQ_CST_FENCE
:
786 static inline void _cmm_compat_c11_smp_mb__before_uatomic_cmpxchg_mo(enum cmm_memorder mo
)
788 /* NOP. uatomic_cmpxchg has implicit lock prefix. */
790 case CMM_RELAXED
: /* Fall-through */
791 case CMM_ACQUIRE
: /* Fall-through */
792 case CMM_CONSUME
: /* Fall-through */
793 case CMM_RELEASE
: /* Fall-through */
794 case CMM_ACQ_REL
: /* Fall-through */
795 case CMM_SEQ_CST
: /* Fall-through */
796 case CMM_SEQ_CST_FENCE
:
803 static inline void _cmm_compat_c11_smp_mb__after_uatomic_cmpxchg_mo(enum cmm_memorder mo
)
805 /* NOP. uatomic_cmpxchg has implicit lock prefix. */
807 case CMM_RELAXED
: /* Fall-through */
808 case CMM_ACQUIRE
: /* Fall-through */
809 case CMM_CONSUME
: /* Fall-through */
810 case CMM_RELEASE
: /* Fall-through */
811 case CMM_ACQ_REL
: /* Fall-through */
812 case CMM_SEQ_CST
: /* Fall-through */
813 case CMM_SEQ_CST_FENCE
:
820 static inline void _cmm_compat_c11_smp_mb__before_uatomic_and_mo(enum cmm_memorder mo
)
822 /* NOP. uatomic_and has explicit lock prefix. */
824 case CMM_RELAXED
: /* Fall-through */
825 case CMM_ACQUIRE
: /* Fall-through */
826 case CMM_CONSUME
: /* Fall-through */
827 case CMM_RELEASE
: /* Fall-through */
828 case CMM_ACQ_REL
: /* Fall-through */
829 case CMM_SEQ_CST
: /* Fall-through */
830 case CMM_SEQ_CST_FENCE
:
837 static inline void _cmm_compat_c11_smp_mb__after_uatomic_and_mo(enum cmm_memorder mo
)
839 /* NOP. uatomic_and has explicit lock prefix. */
841 case CMM_RELAXED
: /* Fall-through */
842 case CMM_ACQUIRE
: /* Fall-through */
843 case CMM_CONSUME
: /* Fall-through */
844 case CMM_RELEASE
: /* Fall-through */
845 case CMM_ACQ_REL
: /* Fall-through */
846 case CMM_SEQ_CST
: /* Fall-through */
847 case CMM_SEQ_CST_FENCE
:
854 static inline void _cmm_compat_c11_smp_mb__before_uatomic_or_mo(enum cmm_memorder mo
)
856 /* NOP. uatomic_or has explicit lock prefix. */
858 case CMM_RELAXED
: /* Fall-through */
859 case CMM_ACQUIRE
: /* Fall-through */
860 case CMM_CONSUME
: /* Fall-through */
861 case CMM_RELEASE
: /* Fall-through */
862 case CMM_ACQ_REL
: /* Fall-through */
863 case CMM_SEQ_CST
: /* Fall-through */
864 case CMM_SEQ_CST_FENCE
:
871 static inline void _cmm_compat_c11_smp_mb__after_uatomic_or_mo(enum cmm_memorder mo
)
873 /* NOP. uatomic_or has explicit lock prefix. */
875 case CMM_RELAXED
: /* Fall-through */
876 case CMM_ACQUIRE
: /* Fall-through */
877 case CMM_CONSUME
: /* Fall-through */
878 case CMM_RELEASE
: /* Fall-through */
879 case CMM_ACQ_REL
: /* Fall-through */
880 case CMM_SEQ_CST
: /* Fall-through */
881 case CMM_SEQ_CST_FENCE
:
888 static inline void _cmm_compat_c11_smp_mb__before_uatomic_add_mo(enum cmm_memorder mo
)
890 /* NOP. uatomic_add has explicit lock prefix. */
892 case CMM_RELAXED
: /* Fall-through */
893 case CMM_ACQUIRE
: /* Fall-through */
894 case CMM_CONSUME
: /* Fall-through */
895 case CMM_RELEASE
: /* Fall-through */
896 case CMM_ACQ_REL
: /* Fall-through */
897 case CMM_SEQ_CST
: /* Fall-through */
898 case CMM_SEQ_CST_FENCE
:
905 static inline void _cmm_compat_c11_smp_mb__after_uatomic_add_mo(enum cmm_memorder mo
)
907 /* NOP. uatomic_add has explicit lock prefix. */
909 case CMM_RELAXED
: /* Fall-through */
910 case CMM_ACQUIRE
: /* Fall-through */
911 case CMM_CONSUME
: /* Fall-through */
912 case CMM_RELEASE
: /* Fall-through */
913 case CMM_ACQ_REL
: /* Fall-through */
914 case CMM_SEQ_CST
: /* Fall-through */
915 case CMM_SEQ_CST_FENCE
:
922 static inline void _cmm_compat_c11_smp_mb__before_uatomic_sub_mo(enum cmm_memorder mo
)
924 /* NOP. uatomic_sub has explicit lock prefix. */
926 case CMM_RELAXED
: /* Fall-through */
927 case CMM_ACQUIRE
: /* Fall-through */
928 case CMM_CONSUME
: /* Fall-through */
929 case CMM_RELEASE
: /* Fall-through */
930 case CMM_ACQ_REL
: /* Fall-through */
931 case CMM_SEQ_CST
: /* Fall-through */
932 case CMM_SEQ_CST_FENCE
:
939 static inline void _cmm_compat_c11_smp_mb__after_uatomic_sub_mo(enum cmm_memorder mo
)
941 /* NOP. uatomic_sub has explicit lock prefix. */
943 case CMM_RELAXED
: /* Fall-through */
944 case CMM_ACQUIRE
: /* Fall-through */
945 case CMM_CONSUME
: /* Fall-through */
946 case CMM_RELEASE
: /* Fall-through */
947 case CMM_ACQ_REL
: /* Fall-through */
948 case CMM_SEQ_CST
: /* Fall-through */
949 case CMM_SEQ_CST_FENCE
:
956 static inline void _cmm_compat_c11_smp_mb__before_uatomic_inc_mo(enum cmm_memorder mo
)
958 /* NOP. uatomic_inc has explicit lock prefix. */
960 case CMM_RELAXED
: /* Fall-through */
961 case CMM_ACQUIRE
: /* Fall-through */
962 case CMM_CONSUME
: /* Fall-through */
963 case CMM_RELEASE
: /* Fall-through */
964 case CMM_ACQ_REL
: /* Fall-through */
965 case CMM_SEQ_CST
: /* Fall-through */
966 case CMM_SEQ_CST_FENCE
:
973 static inline void _cmm_compat_c11_smp_mb__after_uatomic_inc_mo(enum cmm_memorder mo
)
975 /* NOP. uatomic_inc has explicit lock prefix. */
977 case CMM_RELAXED
: /* Fall-through */
978 case CMM_ACQUIRE
: /* Fall-through */
979 case CMM_CONSUME
: /* Fall-through */
980 case CMM_RELEASE
: /* Fall-through */
981 case CMM_ACQ_REL
: /* Fall-through */
982 case CMM_SEQ_CST
: /* Fall-through */
983 case CMM_SEQ_CST_FENCE
:
990 static inline void _cmm_compat_c11_smp_mb__before_uatomic_dec_mo(enum cmm_memorder mo
)
992 /* NOP. uatomic_dec has explicit lock prefix. */
994 case CMM_RELAXED
: /* Fall-through */
995 case CMM_ACQUIRE
: /* Fall-through */
996 case CMM_CONSUME
: /* Fall-through */
997 case CMM_RELEASE
: /* Fall-through */
998 case CMM_ACQ_REL
: /* Fall-through */
999 case CMM_SEQ_CST
: /* Fall-through */
1000 case CMM_SEQ_CST_FENCE
:
1007 static inline void _cmm_compat_c11_smp_mb__after_uatomic_dec_mo(enum cmm_memorder mo
)
1009 /* NOP. uatomic_dec has explicit lock prefix. */
1011 case CMM_RELAXED
: /* Fall-through */
1012 case CMM_ACQUIRE
: /* Fall-through */
1013 case CMM_CONSUME
: /* Fall-through */
1014 case CMM_RELEASE
: /* Fall-through */
1015 case CMM_ACQ_REL
: /* Fall-through */
1016 case CMM_SEQ_CST
: /* Fall-through */
1017 case CMM_SEQ_CST_FENCE
:
1024 static inline void _cmm_compat_c11_smp_mb__before_uatomic_add_return_mo(enum cmm_memorder mo
)
1026 /* NOP. uatomic_add_return has explicit lock prefix. */
1028 case CMM_RELAXED
: /* Fall-through */
1029 case CMM_ACQUIRE
: /* Fall-through */
1030 case CMM_CONSUME
: /* Fall-through */
1031 case CMM_RELEASE
: /* Fall-through */
1032 case CMM_ACQ_REL
: /* Fall-through */
1033 case CMM_SEQ_CST
: /* Fall-through */
1034 case CMM_SEQ_CST_FENCE
:
1041 static inline void _cmm_compat_c11_smp_mb__after_uatomic_add_return_mo(enum cmm_memorder mo
)
1043 /* NOP. uatomic_add_return has explicit lock prefix. */
1045 case CMM_RELAXED
: /* Fall-through */
1046 case CMM_ACQUIRE
: /* Fall-through */
1047 case CMM_CONSUME
: /* Fall-through */
1048 case CMM_RELEASE
: /* Fall-through */
1049 case CMM_ACQ_REL
: /* Fall-through */
1050 case CMM_SEQ_CST
: /* Fall-through */
1051 case CMM_SEQ_CST_FENCE
:
1058 static inline void _cmm_compat_c11_smp_mb__before_uatomic_sub_return_mo(enum cmm_memorder mo
)
1060 /* NOP. uatomic_sub_return has explicit lock prefix. */
1062 case CMM_RELAXED
: /* Fall-through */
1063 case CMM_ACQUIRE
: /* Fall-through */
1064 case CMM_CONSUME
: /* Fall-through */
1065 case CMM_RELEASE
: /* Fall-through */
1066 case CMM_ACQ_REL
: /* Fall-through */
1067 case CMM_SEQ_CST
: /* Fall-through */
1068 case CMM_SEQ_CST_FENCE
:
1075 static inline void _cmm_compat_c11_smp_mb__after_uatomic_sub_return_mo(enum cmm_memorder mo
)
1077 /* NOP. uatomic_sub_return has explicit lock prefix. */
1079 case CMM_RELAXED
: /* Fall-through */
1080 case CMM_ACQUIRE
: /* Fall-through */
1081 case CMM_CONSUME
: /* Fall-through */
1082 case CMM_RELEASE
: /* Fall-through */
1083 case CMM_ACQ_REL
: /* Fall-through */
1084 case CMM_SEQ_CST
: /* Fall-through */
1085 case CMM_SEQ_CST_FENCE
:
1092 #define _cmm_compat_c11_smp_mb__before_mo(operation, mo) \
1094 _cmm_compat_c11_smp_mb__before_ ## operation ## _mo (mo); \
1097 #define _cmm_compat_c11_smp_mb__after_mo(operation, mo) \
1099 _cmm_compat_c11_smp_mb__after_ ## operation ## _mo (mo); \
1107 #include <urcu/uatomic/generic.h>
1109 #endif /* _URCU_ARCH_UATOMIC_X86_H */
This page took 0.064088 seconds and 4 git commands to generate.