fix: handle EINTR correctly in get_cpu_mask_from_sysfs
[urcu.git] / include / urcu / uatomic / x86.h
index 2a4ea1c032a6019f0257ca967200e28fee27edaa..616eee9be3ca60b4be62c8a10cadb0aff2d1ea8d 100644 (file)
@@ -1,25 +1,21 @@
+// SPDX-FileCopyrightText: 1991-1994 by Xerox Corporation.  All rights reserved.
+// SPDX-FileCopyrightText: 1996-1999 by Silicon Graphics.  All rights reserved.
+// SPDX-FileCopyrightText: 1999-2004 Hewlett-Packard Development Company, L.P.
+// SPDX-FileCopyrightText: 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+//
+// SPDX-License-Identifier: LicenseRef-Boehm-GC
+
 #ifndef _URCU_ARCH_UATOMIC_X86_H
 #define _URCU_ARCH_UATOMIC_X86_H
 
+#include <stdlib.h>            /* For abort(3). */
+
 /*
- * Copyright (c) 1991-1994 by Xerox Corporation.  All rights reserved.
- * Copyright (c) 1996-1999 by Silicon Graphics.  All rights reserved.
- * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
- * Copyright (c) 2009      Mathieu Desnoyers
- *
- * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
- * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
- *
- * Permission is hereby granted to use or copy this program
- * for any purpose,  provided the above notices are retained on all copies.
- * Permission to modify the code and to distribute modified code is granted,
- * provided the above notices are retained, and a notice that the code was
- * modified is included with the above copyright notice.
- *
  * Code inspired from libuatomic_ops-1.2, inherited in part from the
  * Boehm-Demers-Weiser conservative garbage collector.
  */
 
+#include <urcu/arch.h>
 #include <urcu/config.h>
 #include <urcu/compiler.h>
 #include <urcu/system.h>
@@ -35,10 +31,21 @@ extern "C" {
  * Derived from AO_compare_and_swap() and AO_test_and_set_full().
  */
 
-struct __uatomic_dummy {
-       unsigned long v[10];
-};
-#define __hp(x)        ((struct __uatomic_dummy *)(x))
+/*
+ * The __hp() macro casts the void pointer @x to a pointer to a structure
+ * containing an array of char of the specified size. This allows passing the
+ * @addr arguments of the following inline functions as "m" and "+m" operands
+ * to the assembly. The @size parameter should be a constant to support
+ * compilers such as clang which do not support VLA. Create typedefs because
+ * C++ does not allow types be defined in casts.
+ */
+
+typedef struct { char v[1]; } __hp_1;
+typedef struct { char v[2]; } __hp_2;
+typedef struct { char v[4]; } __hp_4;
+typedef struct { char v[8]; } __hp_8;
+
+#define __hp(size, x)  ((__hp_##size *)(x))
 
 #define _uatomic_set(addr, v)  ((void) CMM_STORE_SHARED(*(addr), (v)))
 
@@ -55,7 +62,7 @@ unsigned long __uatomic_cmpxchg(void *addr, unsigned long old,
 
                __asm__ __volatile__(
                "lock; cmpxchgb %2, %1"
-                       : "+a"(result), "+m"(*__hp(addr))
+                       : "+a"(result), "+m"(*__hp(1, addr))
                        : "q"((unsigned char)_new)
                        : "memory");
                return result;
@@ -66,7 +73,7 @@ unsigned long __uatomic_cmpxchg(void *addr, unsigned long old,
 
                __asm__ __volatile__(
                "lock; cmpxchgw %2, %1"
-                       : "+a"(result), "+m"(*__hp(addr))
+                       : "+a"(result), "+m"(*__hp(2, addr))
                        : "r"((unsigned short)_new)
                        : "memory");
                return result;
@@ -77,7 +84,7 @@ unsigned long __uatomic_cmpxchg(void *addr, unsigned long old,
 
                __asm__ __volatile__(
                "lock; cmpxchgl %2, %1"
-                       : "+a"(result), "+m"(*__hp(addr))
+                       : "+a"(result), "+m"(*__hp(4, addr))
                        : "r"((unsigned int)_new)
                        : "memory");
                return result;
@@ -89,7 +96,7 @@ unsigned long __uatomic_cmpxchg(void *addr, unsigned long old,
 
                __asm__ __volatile__(
                "lock; cmpxchgq %2, %1"
-                       : "+a"(result), "+m"(*__hp(addr))
+                       : "+a"(result), "+m"(*__hp(8, addr))
                        : "r"((unsigned long)_new)
                        : "memory");
                return result;
@@ -122,7 +129,7 @@ unsigned long __uatomic_exchange(void *addr, unsigned long val, int len)
                unsigned char result;
                __asm__ __volatile__(
                "xchgb %0, %1"
-                       : "=q"(result), "+m"(*__hp(addr))
+                       : "=q"(result), "+m"(*__hp(1, addr))
                        : "0" ((unsigned char)val)
                        : "memory");
                return result;
@@ -132,7 +139,7 @@ unsigned long __uatomic_exchange(void *addr, unsigned long val, int len)
                unsigned short result;
                __asm__ __volatile__(
                "xchgw %0, %1"
-                       : "=r"(result), "+m"(*__hp(addr))
+                       : "=r"(result), "+m"(*__hp(2, addr))
                        : "0" ((unsigned short)val)
                        : "memory");
                return result;
@@ -142,7 +149,7 @@ unsigned long __uatomic_exchange(void *addr, unsigned long val, int len)
                unsigned int result;
                __asm__ __volatile__(
                "xchgl %0, %1"
-                       : "=r"(result), "+m"(*__hp(addr))
+                       : "=r"(result), "+m"(*__hp(4, addr))
                        : "0" ((unsigned int)val)
                        : "memory");
                return result;
@@ -153,7 +160,7 @@ unsigned long __uatomic_exchange(void *addr, unsigned long val, int len)
                unsigned long result;
                __asm__ __volatile__(
                "xchgq %0, %1"
-                       : "=r"(result), "+m"(*__hp(addr))
+                       : "=r"(result), "+m"(*__hp(8, addr))
                        : "0" ((unsigned long)val)
                        : "memory");
                return result;
@@ -186,7 +193,7 @@ unsigned long __uatomic_add_return(void *addr, unsigned long val,
 
                __asm__ __volatile__(
                "lock; xaddb %1, %0"
-                       : "+m"(*__hp(addr)), "+q" (result)
+                       : "+m"(*__hp(1, addr)), "+q" (result)
                        :
                        : "memory");
                return result + (unsigned char)val;
@@ -197,7 +204,7 @@ unsigned long __uatomic_add_return(void *addr, unsigned long val,
 
                __asm__ __volatile__(
                "lock; xaddw %1, %0"
-                       : "+m"(*__hp(addr)), "+r" (result)
+                       : "+m"(*__hp(2, addr)), "+r" (result)
                        :
                        : "memory");
                return result + (unsigned short)val;
@@ -208,7 +215,7 @@ unsigned long __uatomic_add_return(void *addr, unsigned long val,
 
                __asm__ __volatile__(
                "lock; xaddl %1, %0"
-                       : "+m"(*__hp(addr)), "+r" (result)
+                       : "+m"(*__hp(4, addr)), "+r" (result)
                        :
                        : "memory");
                return result + (unsigned int)val;
@@ -220,7 +227,7 @@ unsigned long __uatomic_add_return(void *addr, unsigned long val,
 
                __asm__ __volatile__(
                "lock; xaddq %1, %0"
-                       : "+m"(*__hp(addr)), "+r" (result)
+                       : "+m"(*__hp(8, addr)), "+r" (result)
                        :
                        : "memory");
                return result + (unsigned long)val;
@@ -250,7 +257,7 @@ void __uatomic_and(void *addr, unsigned long val, int len)
        {
                __asm__ __volatile__(
                "lock; andb %1, %0"
-                       : "=m"(*__hp(addr))
+                       : "=m"(*__hp(1, addr))
                        : "iq" ((unsigned char)val)
                        : "memory");
                return;
@@ -259,7 +266,7 @@ void __uatomic_and(void *addr, unsigned long val, int len)
        {
                __asm__ __volatile__(
                "lock; andw %1, %0"
-                       : "=m"(*__hp(addr))
+                       : "=m"(*__hp(2, addr))
                        : "ir" ((unsigned short)val)
                        : "memory");
                return;
@@ -268,7 +275,7 @@ void __uatomic_and(void *addr, unsigned long val, int len)
        {
                __asm__ __volatile__(
                "lock; andl %1, %0"
-                       : "=m"(*__hp(addr))
+                       : "=m"(*__hp(4, addr))
                        : "ir" ((unsigned int)val)
                        : "memory");
                return;
@@ -278,7 +285,7 @@ void __uatomic_and(void *addr, unsigned long val, int len)
        {
                __asm__ __volatile__(
                "lock; andq %1, %0"
-                       : "=m"(*__hp(addr))
+                       : "=m"(*__hp(8, addr))
                        : "er" ((unsigned long)val)
                        : "memory");
                return;
@@ -306,7 +313,7 @@ void __uatomic_or(void *addr, unsigned long val, int len)
        {
                __asm__ __volatile__(
                "lock; orb %1, %0"
-                       : "=m"(*__hp(addr))
+                       : "=m"(*__hp(1, addr))
                        : "iq" ((unsigned char)val)
                        : "memory");
                return;
@@ -315,7 +322,7 @@ void __uatomic_or(void *addr, unsigned long val, int len)
        {
                __asm__ __volatile__(
                "lock; orw %1, %0"
-                       : "=m"(*__hp(addr))
+                       : "=m"(*__hp(2, addr))
                        : "ir" ((unsigned short)val)
                        : "memory");
                return;
@@ -324,7 +331,7 @@ void __uatomic_or(void *addr, unsigned long val, int len)
        {
                __asm__ __volatile__(
                "lock; orl %1, %0"
-                       : "=m"(*__hp(addr))
+                       : "=m"(*__hp(4, addr))
                        : "ir" ((unsigned int)val)
                        : "memory");
                return;
@@ -334,7 +341,7 @@ void __uatomic_or(void *addr, unsigned long val, int len)
        {
                __asm__ __volatile__(
                "lock; orq %1, %0"
-                       : "=m"(*__hp(addr))
+                       : "=m"(*__hp(8, addr))
                        : "er" ((unsigned long)val)
                        : "memory");
                return;
@@ -362,7 +369,7 @@ void __uatomic_add(void *addr, unsigned long val, int len)
        {
                __asm__ __volatile__(
                "lock; addb %1, %0"
-                       : "=m"(*__hp(addr))
+                       : "=m"(*__hp(1, addr))
                        : "iq" ((unsigned char)val)
                        : "memory");
                return;
@@ -371,7 +378,7 @@ void __uatomic_add(void *addr, unsigned long val, int len)
        {
                __asm__ __volatile__(
                "lock; addw %1, %0"
-                       : "=m"(*__hp(addr))
+                       : "=m"(*__hp(2, addr))
                        : "ir" ((unsigned short)val)
                        : "memory");
                return;
@@ -380,7 +387,7 @@ void __uatomic_add(void *addr, unsigned long val, int len)
        {
                __asm__ __volatile__(
                "lock; addl %1, %0"
-                       : "=m"(*__hp(addr))
+                       : "=m"(*__hp(4, addr))
                        : "ir" ((unsigned int)val)
                        : "memory");
                return;
@@ -390,7 +397,7 @@ void __uatomic_add(void *addr, unsigned long val, int len)
        {
                __asm__ __volatile__(
                "lock; addq %1, %0"
-                       : "=m"(*__hp(addr))
+                       : "=m"(*__hp(8, addr))
                        : "er" ((unsigned long)val)
                        : "memory");
                return;
@@ -419,7 +426,7 @@ void __uatomic_inc(void *addr, int len)
        {
                __asm__ __volatile__(
                "lock; incb %0"
-                       : "=m"(*__hp(addr))
+                       : "=m"(*__hp(1, addr))
                        :
                        : "memory");
                return;
@@ -428,7 +435,7 @@ void __uatomic_inc(void *addr, int len)
        {
                __asm__ __volatile__(
                "lock; incw %0"
-                       : "=m"(*__hp(addr))
+                       : "=m"(*__hp(2, addr))
                        :
                        : "memory");
                return;
@@ -437,7 +444,7 @@ void __uatomic_inc(void *addr, int len)
        {
                __asm__ __volatile__(
                "lock; incl %0"
-                       : "=m"(*__hp(addr))
+                       : "=m"(*__hp(4, addr))
                        :
                        : "memory");
                return;
@@ -447,7 +454,7 @@ void __uatomic_inc(void *addr, int len)
        {
                __asm__ __volatile__(
                "lock; incq %0"
-                       : "=m"(*__hp(addr))
+                       : "=m"(*__hp(8, addr))
                        :
                        : "memory");
                return;
@@ -472,7 +479,7 @@ void __uatomic_dec(void *addr, int len)
        {
                __asm__ __volatile__(
                "lock; decb %0"
-                       : "=m"(*__hp(addr))
+                       : "=m"(*__hp(1, addr))
                        :
                        : "memory");
                return;
@@ -481,7 +488,7 @@ void __uatomic_dec(void *addr, int len)
        {
                __asm__ __volatile__(
                "lock; decw %0"
-                       : "=m"(*__hp(addr))
+                       : "=m"(*__hp(2, addr))
                        :
                        : "memory");
                return;
@@ -490,7 +497,7 @@ void __uatomic_dec(void *addr, int len)
        {
                __asm__ __volatile__(
                "lock; decl %0"
-                       : "=m"(*__hp(addr))
+                       : "=m"(*__hp(4, addr))
                        :
                        : "memory");
                return;
@@ -500,7 +507,7 @@ void __uatomic_dec(void *addr, int len)
        {
                __asm__ __volatile__(
                "lock; decq %0"
-                       : "=m"(*__hp(addr))
+                       : "=m"(*__hp(8, addr))
                        :
                        : "memory");
                return;
@@ -517,7 +524,11 @@ void __uatomic_dec(void *addr, int len)
 
 #define _uatomic_dec(addr)     (__uatomic_dec((addr), sizeof(*(addr))))
 
-#if ((CAA_BITS_PER_LONG != 64) && defined(CONFIG_RCU_COMPAT_ARCH))
+#ifdef URCU_ARCH_X86_NO_CAS
+
+/* For backwards compat */
+#define CONFIG_RCU_COMPAT_ARCH 1
+
 extern int __rcu_cas_avail;
 extern int __rcu_cas_init(void);
 
@@ -621,6 +632,474 @@ extern unsigned long _compat_uatomic_add_return(void *addr,
 #define cmm_smp_mb__before_uatomic_dec()       cmm_barrier()
 #define cmm_smp_mb__after_uatomic_dec()                cmm_barrier()
 
+static inline void _cmm_compat_c11_smp_mb__before_uatomic_read_mo(enum cmm_memorder mo)
+{
+       /*
+        * A SMP barrier is not necessary for CMM_SEQ_CST because, only a
+        * previous store can be reordered with the load.  However, emitting the
+        * memory barrier after the store is sufficient to prevent reordering
+        * between the two.  This follows toolchains decision of emitting the
+        * memory fence on the stores instead of the loads.
+        *
+        * A compiler barrier is necessary because the underlying operation does
+        * not clobber the registers.
+        */
+       switch (mo) {
+       case CMM_RELAXED:       /* Fall-through */
+       case CMM_ACQUIRE:       /* Fall-through */
+       case CMM_CONSUME:       /* Fall-through */
+       case CMM_SEQ_CST:       /* Fall-through */
+       case CMM_SEQ_CST_FENCE:
+               cmm_barrier();
+               break;
+       case CMM_ACQ_REL:       /* Fall-through */
+       case CMM_RELEASE:       /* Fall-through */
+       default:
+               abort();
+               break;
+       }
+}
+
+static inline void _cmm_compat_c11_smp_mb__after_uatomic_read_mo(enum cmm_memorder mo)
+{
+       /*
+        * A SMP barrier is not necessary for CMM_SEQ_CST because following
+        * loads and stores cannot be reordered with the load.
+        *
+        * A SMP barrier is however necessary for CMM_SEQ_CST_FENCE to respect
+        * the memory model, since the underlying operation does not have a lock
+        * prefix.
+        *
+        * A compiler barrier is necessary because the underlying operation does
+        * not clobber the registers.
+        */
+       switch (mo) {
+       case CMM_SEQ_CST_FENCE:
+               cmm_smp_mb();
+               break;
+       case CMM_RELAXED:       /* Fall-through */
+       case CMM_ACQUIRE:       /* Fall-through */
+       case CMM_CONSUME:       /* Fall-through */
+       case CMM_SEQ_CST:
+               cmm_barrier();
+               break;
+       case CMM_ACQ_REL:       /* Fall-through */
+       case CMM_RELEASE:       /* Fall-through */
+       default:
+               abort();
+               break;
+       }
+}
+
+static inline void _cmm_compat_c11_smp_mb__before_uatomic_set_mo(enum cmm_memorder mo)
+{
+       /*
+        * A SMP barrier is not necessary for CMM_SEQ_CST because the store can
+        * only be reodered with later loads
+        *
+        * A compiler barrier is necessary because the underlying operation does
+        * not clobber the registers.
+        */
+       switch (mo) {
+       case CMM_RELAXED:       /* Fall-through */
+       case CMM_RELEASE:       /* Fall-through */
+       case CMM_SEQ_CST:       /* Fall-through */
+       case CMM_SEQ_CST_FENCE:
+               cmm_barrier();
+               break;
+       case CMM_ACQ_REL:       /* Fall-through */
+       case CMM_ACQUIRE:       /* Fall-through */
+       case CMM_CONSUME:       /* Fall-through */
+       default:
+               abort();
+               break;
+       }
+}
+
+static inline void _cmm_compat_c11_smp_mb__after_uatomic_set_mo(enum cmm_memorder mo)
+{
+       /*
+        * A SMP barrier is necessary for CMM_SEQ_CST because the store can be
+        * reorded with later loads.  Since no memory barrier is being emitted
+        * before loads, one has to be emitted after the store.  This follows
+        * toolchains decision of emitting the memory fence on the stores instead
+        * of the loads.
+        *
+        * A SMP barrier is necessary for CMM_SEQ_CST_FENCE to respect the
+        * memory model, since the underlying store does not have a lock prefix.
+        *
+        * A compiler barrier is necessary because the underlying operation does
+        * not clobber the registers.
+        */
+       switch (mo) {
+       case CMM_SEQ_CST:       /* Fall-through */
+       case CMM_SEQ_CST_FENCE:
+               cmm_smp_mb();
+               break;
+       case CMM_RELAXED:       /* Fall-through */
+       case CMM_RELEASE:
+               cmm_barrier();
+               break;
+       case CMM_ACQ_REL:       /* Fall-through */
+       case CMM_ACQUIRE:       /* Fall-through */
+       case CMM_CONSUME:       /* Fall-through */
+       default:
+               abort();
+               break;
+       }
+}
+
+static inline void _cmm_compat_c11_smp_mb__before_uatomic_xchg_mo(enum cmm_memorder mo)
+{
+       /* NOP. uatomic_xchg has implicit lock prefix. */
+       switch (mo) {
+       case CMM_RELAXED:       /* Fall-through */
+       case CMM_ACQUIRE:       /* Fall-through */
+       case CMM_CONSUME:       /* Fall-through */
+       case CMM_RELEASE:       /* Fall-through */
+       case CMM_ACQ_REL:       /* Fall-through */
+       case CMM_SEQ_CST:       /* Fall-through */
+       case CMM_SEQ_CST_FENCE:
+               break;
+       default:
+               abort();
+       }
+}
+
+static inline void _cmm_compat_c11_smp_mb__after_uatomic_xchg_mo(enum cmm_memorder mo)
+{
+       /* NOP. uatomic_xchg has implicit lock prefix. */
+       switch (mo) {
+       case CMM_RELAXED:       /* Fall-through */
+       case CMM_ACQUIRE:       /* Fall-through */
+       case CMM_CONSUME:       /* Fall-through */
+       case CMM_RELEASE:       /* Fall-through */
+       case CMM_ACQ_REL:       /* Fall-through */
+       case CMM_SEQ_CST:       /* Fall-through */
+       case CMM_SEQ_CST_FENCE:
+               break;
+       default:
+               abort();
+       }
+}
+
+static inline void _cmm_compat_c11_smp_mb__before_uatomic_cmpxchg_mo(enum cmm_memorder mo)
+{
+       /* NOP. uatomic_cmpxchg has implicit lock prefix. */
+       switch (mo) {
+       case CMM_RELAXED:       /* Fall-through */
+       case CMM_ACQUIRE:       /* Fall-through */
+       case CMM_CONSUME:       /* Fall-through */
+       case CMM_RELEASE:       /* Fall-through */
+       case CMM_ACQ_REL:       /* Fall-through */
+       case CMM_SEQ_CST:       /* Fall-through */
+       case CMM_SEQ_CST_FENCE:
+               break;
+       default:
+               abort();
+       }
+}
+
+static inline void _cmm_compat_c11_smp_mb__after_uatomic_cmpxchg_mo(enum cmm_memorder mo)
+{
+       /* NOP. uatomic_cmpxchg has implicit lock prefix. */
+       switch (mo) {
+       case CMM_RELAXED:       /* Fall-through */
+       case CMM_ACQUIRE:       /* Fall-through */
+       case CMM_CONSUME:       /* Fall-through */
+       case CMM_RELEASE:       /* Fall-through */
+       case CMM_ACQ_REL:       /* Fall-through */
+       case CMM_SEQ_CST:       /* Fall-through */
+       case CMM_SEQ_CST_FENCE:
+               break;
+       default:
+               abort();
+       }
+}
+
+static inline void _cmm_compat_c11_smp_mb__before_uatomic_and_mo(enum cmm_memorder mo)
+{
+       /* NOP. uatomic_and has explicit lock prefix. */
+       switch (mo) {
+       case CMM_RELAXED:       /* Fall-through */
+       case CMM_ACQUIRE:       /* Fall-through */
+       case CMM_CONSUME:       /* Fall-through */
+       case CMM_RELEASE:       /* Fall-through */
+       case CMM_ACQ_REL:       /* Fall-through */
+       case CMM_SEQ_CST:       /* Fall-through */
+       case CMM_SEQ_CST_FENCE:
+               break;
+       default:
+               abort();
+       }
+}
+
+static inline void _cmm_compat_c11_smp_mb__after_uatomic_and_mo(enum cmm_memorder mo)
+{
+       /* NOP. uatomic_and has explicit lock prefix. */
+       switch (mo) {
+       case CMM_RELAXED:       /* Fall-through */
+       case CMM_ACQUIRE:       /* Fall-through */
+       case CMM_CONSUME:       /* Fall-through */
+       case CMM_RELEASE:       /* Fall-through */
+       case CMM_ACQ_REL:       /* Fall-through */
+       case CMM_SEQ_CST:       /* Fall-through */
+       case CMM_SEQ_CST_FENCE:
+               break;
+       default:
+               abort();
+       }
+}
+
+static inline void _cmm_compat_c11_smp_mb__before_uatomic_or_mo(enum cmm_memorder mo)
+{
+       /* NOP. uatomic_or has explicit lock prefix. */
+       switch (mo) {
+       case CMM_RELAXED:       /* Fall-through */
+       case CMM_ACQUIRE:       /* Fall-through */
+       case CMM_CONSUME:       /* Fall-through */
+       case CMM_RELEASE:       /* Fall-through */
+       case CMM_ACQ_REL:       /* Fall-through */
+       case CMM_SEQ_CST:       /* Fall-through */
+       case CMM_SEQ_CST_FENCE:
+               break;
+       default:
+               abort();
+       }
+}
+
+static inline void _cmm_compat_c11_smp_mb__after_uatomic_or_mo(enum cmm_memorder mo)
+{
+       /* NOP. uatomic_or has explicit lock prefix. */
+       switch (mo) {
+       case CMM_RELAXED:       /* Fall-through */
+       case CMM_ACQUIRE:       /* Fall-through */
+       case CMM_CONSUME:       /* Fall-through */
+       case CMM_RELEASE:       /* Fall-through */
+       case CMM_ACQ_REL:       /* Fall-through */
+       case CMM_SEQ_CST:       /* Fall-through */
+       case CMM_SEQ_CST_FENCE:
+               break;
+       default:
+               abort();
+       }
+}
+
+static inline void _cmm_compat_c11_smp_mb__before_uatomic_add_mo(enum cmm_memorder mo)
+{
+       /* NOP. uatomic_add has explicit lock prefix. */
+       switch (mo) {
+       case CMM_RELAXED:       /* Fall-through */
+       case CMM_ACQUIRE:       /* Fall-through */
+       case CMM_CONSUME:       /* Fall-through */
+       case CMM_RELEASE:       /* Fall-through */
+       case CMM_ACQ_REL:       /* Fall-through */
+       case CMM_SEQ_CST:       /* Fall-through */
+       case CMM_SEQ_CST_FENCE:
+               break;
+       default:
+               abort();
+       }
+}
+
+static inline void _cmm_compat_c11_smp_mb__after_uatomic_add_mo(enum cmm_memorder mo)
+{
+       /* NOP. uatomic_add has explicit lock prefix. */
+       switch (mo) {
+       case CMM_RELAXED:       /* Fall-through */
+       case CMM_ACQUIRE:       /* Fall-through */
+       case CMM_CONSUME:       /* Fall-through */
+       case CMM_RELEASE:       /* Fall-through */
+       case CMM_ACQ_REL:       /* Fall-through */
+       case CMM_SEQ_CST:       /* Fall-through */
+       case CMM_SEQ_CST_FENCE:
+               break;
+       default:
+               abort();
+       }
+}
+
+static inline void _cmm_compat_c11_smp_mb__before_uatomic_sub_mo(enum cmm_memorder mo)
+{
+       /* NOP. uatomic_sub has explicit lock prefix. */
+       switch (mo) {
+       case CMM_RELAXED:       /* Fall-through */
+       case CMM_ACQUIRE:       /* Fall-through */
+       case CMM_CONSUME:       /* Fall-through */
+       case CMM_RELEASE:       /* Fall-through */
+       case CMM_ACQ_REL:       /* Fall-through */
+       case CMM_SEQ_CST:       /* Fall-through */
+       case CMM_SEQ_CST_FENCE:
+               break;
+       default:
+               abort();
+       }
+}
+
+static inline void _cmm_compat_c11_smp_mb__after_uatomic_sub_mo(enum cmm_memorder mo)
+{
+       /* NOP. uatomic_sub has explicit lock prefix. */
+       switch (mo) {
+       case CMM_RELAXED:       /* Fall-through */
+       case CMM_ACQUIRE:       /* Fall-through */
+       case CMM_CONSUME:       /* Fall-through */
+       case CMM_RELEASE:       /* Fall-through */
+       case CMM_ACQ_REL:       /* Fall-through */
+       case CMM_SEQ_CST:       /* Fall-through */
+       case CMM_SEQ_CST_FENCE:
+               break;
+       default:
+               abort();
+       }
+}
+
+static inline void _cmm_compat_c11_smp_mb__before_uatomic_inc_mo(enum cmm_memorder mo)
+{
+       /* NOP. uatomic_inc has explicit lock prefix. */
+       switch (mo) {
+       case CMM_RELAXED:       /* Fall-through */
+       case CMM_ACQUIRE:       /* Fall-through */
+       case CMM_CONSUME:       /* Fall-through */
+       case CMM_RELEASE:       /* Fall-through */
+       case CMM_ACQ_REL:       /* Fall-through */
+       case CMM_SEQ_CST:       /* Fall-through */
+       case CMM_SEQ_CST_FENCE:
+               break;
+       default:
+               abort();
+       }
+}
+
+static inline void _cmm_compat_c11_smp_mb__after_uatomic_inc_mo(enum cmm_memorder mo)
+{
+       /* NOP. uatomic_inc has explicit lock prefix. */
+       switch (mo) {
+       case CMM_RELAXED:       /* Fall-through */
+       case CMM_ACQUIRE:       /* Fall-through */
+       case CMM_CONSUME:       /* Fall-through */
+       case CMM_RELEASE:       /* Fall-through */
+       case CMM_ACQ_REL:       /* Fall-through */
+       case CMM_SEQ_CST:       /* Fall-through */
+       case CMM_SEQ_CST_FENCE:
+               break;
+       default:
+               abort();
+       }
+}
+
+static inline void _cmm_compat_c11_smp_mb__before_uatomic_dec_mo(enum cmm_memorder mo)
+{
+       /* NOP. uatomic_dec has explicit lock prefix. */
+       switch (mo) {
+       case CMM_RELAXED:       /* Fall-through */
+       case CMM_ACQUIRE:       /* Fall-through */
+       case CMM_CONSUME:       /* Fall-through */
+       case CMM_RELEASE:       /* Fall-through */
+       case CMM_ACQ_REL:       /* Fall-through */
+       case CMM_SEQ_CST:       /* Fall-through */
+       case CMM_SEQ_CST_FENCE:
+               break;
+       default:
+               abort();
+       }
+}
+
+static inline void _cmm_compat_c11_smp_mb__after_uatomic_dec_mo(enum cmm_memorder mo)
+{
+       /* NOP. uatomic_dec has explicit lock prefix. */
+       switch (mo) {
+       case CMM_RELAXED:       /* Fall-through */
+       case CMM_ACQUIRE:       /* Fall-through */
+       case CMM_CONSUME:       /* Fall-through */
+       case CMM_RELEASE:       /* Fall-through */
+       case CMM_ACQ_REL:       /* Fall-through */
+       case CMM_SEQ_CST:       /* Fall-through */
+       case CMM_SEQ_CST_FENCE:
+               break;
+       default:
+               abort();
+       }
+}
+
+static inline void _cmm_compat_c11_smp_mb__before_uatomic_add_return_mo(enum cmm_memorder mo)
+{
+       /* NOP. uatomic_add_return has explicit lock prefix. */
+       switch (mo) {
+       case CMM_RELAXED:       /* Fall-through */
+       case CMM_ACQUIRE:       /* Fall-through */
+       case CMM_CONSUME:       /* Fall-through */
+       case CMM_RELEASE:       /* Fall-through */
+       case CMM_ACQ_REL:       /* Fall-through */
+       case CMM_SEQ_CST:       /* Fall-through */
+       case CMM_SEQ_CST_FENCE:
+               break;
+       default:
+               abort();
+       }
+}
+
+static inline void _cmm_compat_c11_smp_mb__after_uatomic_add_return_mo(enum cmm_memorder mo)
+{
+       /* NOP. uatomic_add_return has explicit lock prefix. */
+       switch (mo) {
+       case CMM_RELAXED:       /* Fall-through */
+       case CMM_ACQUIRE:       /* Fall-through */
+       case CMM_CONSUME:       /* Fall-through */
+       case CMM_RELEASE:       /* Fall-through */
+       case CMM_ACQ_REL:       /* Fall-through */
+       case CMM_SEQ_CST:       /* Fall-through */
+       case CMM_SEQ_CST_FENCE:
+               break;
+       default:
+               abort();
+       }
+}
+
+static inline void _cmm_compat_c11_smp_mb__before_uatomic_sub_return_mo(enum cmm_memorder mo)
+{
+       /* NOP. uatomic_sub_return has explicit lock prefix. */
+       switch (mo) {
+       case CMM_RELAXED:       /* Fall-through */
+       case CMM_ACQUIRE:       /* Fall-through */
+       case CMM_CONSUME:       /* Fall-through */
+       case CMM_RELEASE:       /* Fall-through */
+       case CMM_ACQ_REL:       /* Fall-through */
+       case CMM_SEQ_CST:       /* Fall-through */
+       case CMM_SEQ_CST_FENCE:
+               break;
+       default:
+               abort();
+       }
+}
+
+static inline void _cmm_compat_c11_smp_mb__after_uatomic_sub_return_mo(enum cmm_memorder mo)
+{
+       /* NOP. uatomic_sub_return has explicit lock prefix. */
+       switch (mo) {
+       case CMM_RELAXED:       /* Fall-through */
+       case CMM_ACQUIRE:       /* Fall-through */
+       case CMM_CONSUME:       /* Fall-through */
+       case CMM_RELEASE:       /* Fall-through */
+       case CMM_ACQ_REL:       /* Fall-through */
+       case CMM_SEQ_CST:       /* Fall-through */
+       case CMM_SEQ_CST_FENCE:
+               break;
+       default:
+               abort();
+       }
+}
+
+#define _cmm_compat_c11_smp_mb__before_mo(operation, mo)                       \
+       do {                                                    \
+               _cmm_compat_c11_smp_mb__before_ ## operation ## _mo (mo);       \
+       } while (0)
+
+#define _cmm_compat_c11_smp_mb__after_mo(operation, mo)                        \
+       do {                                                    \
+               _cmm_compat_c11_smp_mb__after_ ## operation ## _mo (mo);        \
+       } while (0)
+
+
 #ifdef __cplusplus
 }
 #endif
This page took 0.032815 seconds and 4 git commands to generate.