+// SPDX-FileCopyrightText: 1991-1994 by Xerox Corporation. All rights reserved.
+// SPDX-FileCopyrightText: 1996-1999 by Silicon Graphics. All rights reserved.
+// SPDX-FileCopyrightText: 1999-2004 Hewlett-Packard Development Company, L.P.
+// SPDX-FileCopyrightText: 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+//
+// SPDX-License-Identifier: LicenseRef-Boehm-GC
+
#ifndef _URCU_ARCH_UATOMIC_X86_H
#define _URCU_ARCH_UATOMIC_X86_H
+#include <stdlib.h> /* For abort(3). */
+
/*
- * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
- * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
- * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
- * Copyright (c) 2009 Mathieu Desnoyers
- *
- * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
- * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
- *
- * Permission is hereby granted to use or copy this program
- * for any purpose, provided the above notices are retained on all copies.
- * Permission to modify the code and to distribute modified code is granted,
- * provided the above notices are retained, and a notice that the code was
- * modified is included with the above copyright notice.
- *
* Code inspired from libuatomic_ops-1.2, inherited in part from the
* Boehm-Demers-Weiser conservative garbage collector.
*/
*/
/*
- * The __hp() macro casts the void pointer "x" to a pointer to a structure
+ * The __hp() macro casts the void pointer @x to a pointer to a structure
* containing an array of char of the specified size. This allows passing the
* @addr arguments of the following inline functions as "m" and "+m" operands
- * to the assembly.
+ * to the assembly. The @size parameter should be a constant to support
+ * compilers such as clang which do not support VLA. Create typedefs because
+ * C++ does not allow types be defined in casts.
*/
-#define __hp(size, x) ((struct { char v[size]; } *)(x))
+typedef struct { char v[1]; } __hp_1;
+typedef struct { char v[2]; } __hp_2;
+typedef struct { char v[4]; } __hp_4;
+typedef struct { char v[8]; } __hp_8;
+
+#define __hp(size, x) ((__hp_##size *)(x))
#define _uatomic_set(addr, v) ((void) CMM_STORE_SHARED(*(addr), (v)))
__asm__ __volatile__(
"lock; cmpxchgb %2, %1"
- : "+a"(result), "+m"(*__hp(len, addr))
+ : "+a"(result), "+m"(*__hp(1, addr))
: "q"((unsigned char)_new)
: "memory");
return result;
__asm__ __volatile__(
"lock; cmpxchgw %2, %1"
- : "+a"(result), "+m"(*__hp(len, addr))
+ : "+a"(result), "+m"(*__hp(2, addr))
: "r"((unsigned short)_new)
: "memory");
return result;
__asm__ __volatile__(
"lock; cmpxchgl %2, %1"
- : "+a"(result), "+m"(*__hp(len, addr))
+ : "+a"(result), "+m"(*__hp(4, addr))
: "r"((unsigned int)_new)
: "memory");
return result;
__asm__ __volatile__(
"lock; cmpxchgq %2, %1"
- : "+a"(result), "+m"(*__hp(len, addr))
+ : "+a"(result), "+m"(*__hp(8, addr))
: "r"((unsigned long)_new)
: "memory");
return result;
unsigned char result;
__asm__ __volatile__(
"xchgb %0, %1"
- : "=q"(result), "+m"(*__hp(len, addr))
+ : "=q"(result), "+m"(*__hp(1, addr))
: "0" ((unsigned char)val)
: "memory");
return result;
unsigned short result;
__asm__ __volatile__(
"xchgw %0, %1"
- : "=r"(result), "+m"(*__hp(len, addr))
+ : "=r"(result), "+m"(*__hp(2, addr))
: "0" ((unsigned short)val)
: "memory");
return result;
unsigned int result;
__asm__ __volatile__(
"xchgl %0, %1"
- : "=r"(result), "+m"(*__hp(len, addr))
+ : "=r"(result), "+m"(*__hp(4, addr))
: "0" ((unsigned int)val)
: "memory");
return result;
unsigned long result;
__asm__ __volatile__(
"xchgq %0, %1"
- : "=r"(result), "+m"(*__hp(len, addr))
+ : "=r"(result), "+m"(*__hp(8, addr))
: "0" ((unsigned long)val)
: "memory");
return result;
__asm__ __volatile__(
"lock; xaddb %1, %0"
- : "+m"(*__hp(len, addr)), "+q" (result)
+ : "+m"(*__hp(1, addr)), "+q" (result)
:
: "memory");
return result + (unsigned char)val;
__asm__ __volatile__(
"lock; xaddw %1, %0"
- : "+m"(*__hp(len, addr)), "+r" (result)
+ : "+m"(*__hp(2, addr)), "+r" (result)
:
: "memory");
return result + (unsigned short)val;
__asm__ __volatile__(
"lock; xaddl %1, %0"
- : "+m"(*__hp(len, addr)), "+r" (result)
+ : "+m"(*__hp(4, addr)), "+r" (result)
:
: "memory");
return result + (unsigned int)val;
__asm__ __volatile__(
"lock; xaddq %1, %0"
- : "+m"(*__hp(len, addr)), "+r" (result)
+ : "+m"(*__hp(8, addr)), "+r" (result)
:
: "memory");
return result + (unsigned long)val;
{
__asm__ __volatile__(
"lock; andb %1, %0"
- : "=m"(*__hp(len, addr))
+ : "=m"(*__hp(1, addr))
: "iq" ((unsigned char)val)
: "memory");
return;
{
__asm__ __volatile__(
"lock; andw %1, %0"
- : "=m"(*__hp(len, addr))
+ : "=m"(*__hp(2, addr))
: "ir" ((unsigned short)val)
: "memory");
return;
{
__asm__ __volatile__(
"lock; andl %1, %0"
- : "=m"(*__hp(len, addr))
+ : "=m"(*__hp(4, addr))
: "ir" ((unsigned int)val)
: "memory");
return;
{
__asm__ __volatile__(
"lock; andq %1, %0"
- : "=m"(*__hp(len, addr))
+ : "=m"(*__hp(8, addr))
: "er" ((unsigned long)val)
: "memory");
return;
{
__asm__ __volatile__(
"lock; orb %1, %0"
- : "=m"(*__hp(len, addr))
+ : "=m"(*__hp(1, addr))
: "iq" ((unsigned char)val)
: "memory");
return;
{
__asm__ __volatile__(
"lock; orw %1, %0"
- : "=m"(*__hp(len, addr))
+ : "=m"(*__hp(2, addr))
: "ir" ((unsigned short)val)
: "memory");
return;
{
__asm__ __volatile__(
"lock; orl %1, %0"
- : "=m"(*__hp(len, addr))
+ : "=m"(*__hp(4, addr))
: "ir" ((unsigned int)val)
: "memory");
return;
{
__asm__ __volatile__(
"lock; orq %1, %0"
- : "=m"(*__hp(len, addr))
+ : "=m"(*__hp(8, addr))
: "er" ((unsigned long)val)
: "memory");
return;
{
__asm__ __volatile__(
"lock; addb %1, %0"
- : "=m"(*__hp(len, addr))
+ : "=m"(*__hp(1, addr))
: "iq" ((unsigned char)val)
: "memory");
return;
{
__asm__ __volatile__(
"lock; addw %1, %0"
- : "=m"(*__hp(len, addr))
+ : "=m"(*__hp(2, addr))
: "ir" ((unsigned short)val)
: "memory");
return;
{
__asm__ __volatile__(
"lock; addl %1, %0"
- : "=m"(*__hp(len, addr))
+ : "=m"(*__hp(4, addr))
: "ir" ((unsigned int)val)
: "memory");
return;
{
__asm__ __volatile__(
"lock; addq %1, %0"
- : "=m"(*__hp(len, addr))
+ : "=m"(*__hp(8, addr))
: "er" ((unsigned long)val)
: "memory");
return;
{
__asm__ __volatile__(
"lock; incb %0"
- : "=m"(*__hp(len, addr))
+ : "=m"(*__hp(1, addr))
:
: "memory");
return;
{
__asm__ __volatile__(
"lock; incw %0"
- : "=m"(*__hp(len, addr))
+ : "=m"(*__hp(2, addr))
:
: "memory");
return;
{
__asm__ __volatile__(
"lock; incl %0"
- : "=m"(*__hp(len, addr))
+ : "=m"(*__hp(4, addr))
:
: "memory");
return;
{
__asm__ __volatile__(
"lock; incq %0"
- : "=m"(*__hp(len, addr))
+ : "=m"(*__hp(8, addr))
:
: "memory");
return;
{
__asm__ __volatile__(
"lock; decb %0"
- : "=m"(*__hp(len, addr))
+ : "=m"(*__hp(1, addr))
:
: "memory");
return;
{
__asm__ __volatile__(
"lock; decw %0"
- : "=m"(*__hp(len, addr))
+ : "=m"(*__hp(2, addr))
:
: "memory");
return;
{
__asm__ __volatile__(
"lock; decl %0"
- : "=m"(*__hp(len, addr))
+ : "=m"(*__hp(4, addr))
:
: "memory");
return;
{
__asm__ __volatile__(
"lock; decq %0"
- : "=m"(*__hp(len, addr))
+ : "=m"(*__hp(8, addr))
:
: "memory");
return;
#define _uatomic_dec(addr) (__uatomic_dec((addr), sizeof(*(addr))))
-#if ((CAA_BITS_PER_LONG != 64) && defined(URCU_ARCH_I386))
+#ifdef URCU_ARCH_X86_NO_CAS
/* For backwards compat */
#define CONFIG_RCU_COMPAT_ARCH 1
#define cmm_smp_mb__before_uatomic_dec() cmm_barrier()
#define cmm_smp_mb__after_uatomic_dec() cmm_barrier()
+static inline void _cmm_compat_c11_smp_mb__before_uatomic_read_mo(enum cmm_memorder mo)
+{
+ /*
+ * A SMP barrier is not necessary for CMM_SEQ_CST because, only a
+ * previous store can be reordered with the load. However, emitting the
+ * memory barrier after the store is sufficient to prevent reordering
+ * between the two. This follows toolchains decision of emitting the
+ * memory fence on the stores instead of the loads.
+ *
+ * A compiler barrier is necessary because the underlying operation does
+ * not clobber the registers.
+ */
+ switch (mo) {
+ case CMM_RELAXED: /* Fall-through */
+ case CMM_ACQUIRE: /* Fall-through */
+ case CMM_CONSUME: /* Fall-through */
+ case CMM_SEQ_CST: /* Fall-through */
+ case CMM_SEQ_CST_FENCE:
+ cmm_barrier();
+ break;
+ case CMM_ACQ_REL: /* Fall-through */
+ case CMM_RELEASE: /* Fall-through */
+ default:
+ abort();
+ break;
+ }
+}
+
+static inline void _cmm_compat_c11_smp_mb__after_uatomic_read_mo(enum cmm_memorder mo)
+{
+ /*
+ * A SMP barrier is not necessary for CMM_SEQ_CST because following
+ * loads and stores cannot be reordered with the load.
+ *
+ * A SMP barrier is however necessary for CMM_SEQ_CST_FENCE to respect
+ * the memory model, since the underlying operation does not have a lock
+ * prefix.
+ *
+ * A compiler barrier is necessary because the underlying operation does
+ * not clobber the registers.
+ */
+ switch (mo) {
+ case CMM_SEQ_CST_FENCE:
+ cmm_smp_mb();
+ break;
+ case CMM_RELAXED: /* Fall-through */
+ case CMM_ACQUIRE: /* Fall-through */
+ case CMM_CONSUME: /* Fall-through */
+ case CMM_SEQ_CST:
+ cmm_barrier();
+ break;
+ case CMM_ACQ_REL: /* Fall-through */
+ case CMM_RELEASE: /* Fall-through */
+ default:
+ abort();
+ break;
+ }
+}
+
+static inline void _cmm_compat_c11_smp_mb__before_uatomic_set_mo(enum cmm_memorder mo)
+{
+ /*
+ * A SMP barrier is not necessary for CMM_SEQ_CST because the store can
+ * only be reodered with later loads
+ *
+ * A compiler barrier is necessary because the underlying operation does
+ * not clobber the registers.
+ */
+ switch (mo) {
+ case CMM_RELAXED: /* Fall-through */
+ case CMM_RELEASE: /* Fall-through */
+ case CMM_SEQ_CST: /* Fall-through */
+ case CMM_SEQ_CST_FENCE:
+ cmm_barrier();
+ break;
+ case CMM_ACQ_REL: /* Fall-through */
+ case CMM_ACQUIRE: /* Fall-through */
+ case CMM_CONSUME: /* Fall-through */
+ default:
+ abort();
+ break;
+ }
+}
+
+static inline void _cmm_compat_c11_smp_mb__after_uatomic_set_mo(enum cmm_memorder mo)
+{
+ /*
+ * A SMP barrier is necessary for CMM_SEQ_CST because the store can be
+ * reorded with later loads. Since no memory barrier is being emitted
+ * before loads, one has to be emitted after the store. This follows
+ * toolchains decision of emitting the memory fence on the stores instead
+ * of the loads.
+ *
+ * A SMP barrier is necessary for CMM_SEQ_CST_FENCE to respect the
+ * memory model, since the underlying store does not have a lock prefix.
+ *
+ * A compiler barrier is necessary because the underlying operation does
+ * not clobber the registers.
+ */
+ switch (mo) {
+ case CMM_SEQ_CST: /* Fall-through */
+ case CMM_SEQ_CST_FENCE:
+ cmm_smp_mb();
+ break;
+ case CMM_RELAXED: /* Fall-through */
+ case CMM_RELEASE:
+ cmm_barrier();
+ break;
+ case CMM_ACQ_REL: /* Fall-through */
+ case CMM_ACQUIRE: /* Fall-through */
+ case CMM_CONSUME: /* Fall-through */
+ default:
+ abort();
+ break;
+ }
+}
+
+static inline void _cmm_compat_c11_smp_mb__before_uatomic_xchg_mo(enum cmm_memorder mo)
+{
+ /* NOP. uatomic_xchg has implicit lock prefix. */
+ switch (mo) {
+ case CMM_RELAXED: /* Fall-through */
+ case CMM_ACQUIRE: /* Fall-through */
+ case CMM_CONSUME: /* Fall-through */
+ case CMM_RELEASE: /* Fall-through */
+ case CMM_ACQ_REL: /* Fall-through */
+ case CMM_SEQ_CST: /* Fall-through */
+ case CMM_SEQ_CST_FENCE:
+ break;
+ default:
+ abort();
+ }
+}
+
+static inline void _cmm_compat_c11_smp_mb__after_uatomic_xchg_mo(enum cmm_memorder mo)
+{
+ /* NOP. uatomic_xchg has implicit lock prefix. */
+ switch (mo) {
+ case CMM_RELAXED: /* Fall-through */
+ case CMM_ACQUIRE: /* Fall-through */
+ case CMM_CONSUME: /* Fall-through */
+ case CMM_RELEASE: /* Fall-through */
+ case CMM_ACQ_REL: /* Fall-through */
+ case CMM_SEQ_CST: /* Fall-through */
+ case CMM_SEQ_CST_FENCE:
+ break;
+ default:
+ abort();
+ }
+}
+
+static inline void _cmm_compat_c11_smp_mb__before_uatomic_cmpxchg_mo(enum cmm_memorder mo)
+{
+ /* NOP. uatomic_cmpxchg has implicit lock prefix. */
+ switch (mo) {
+ case CMM_RELAXED: /* Fall-through */
+ case CMM_ACQUIRE: /* Fall-through */
+ case CMM_CONSUME: /* Fall-through */
+ case CMM_RELEASE: /* Fall-through */
+ case CMM_ACQ_REL: /* Fall-through */
+ case CMM_SEQ_CST: /* Fall-through */
+ case CMM_SEQ_CST_FENCE:
+ break;
+ default:
+ abort();
+ }
+}
+
+static inline void _cmm_compat_c11_smp_mb__after_uatomic_cmpxchg_mo(enum cmm_memorder mo)
+{
+ /* NOP. uatomic_cmpxchg has implicit lock prefix. */
+ switch (mo) {
+ case CMM_RELAXED: /* Fall-through */
+ case CMM_ACQUIRE: /* Fall-through */
+ case CMM_CONSUME: /* Fall-through */
+ case CMM_RELEASE: /* Fall-through */
+ case CMM_ACQ_REL: /* Fall-through */
+ case CMM_SEQ_CST: /* Fall-through */
+ case CMM_SEQ_CST_FENCE:
+ break;
+ default:
+ abort();
+ }
+}
+
+static inline void _cmm_compat_c11_smp_mb__before_uatomic_and_mo(enum cmm_memorder mo)
+{
+ /* NOP. uatomic_and has explicit lock prefix. */
+ switch (mo) {
+ case CMM_RELAXED: /* Fall-through */
+ case CMM_ACQUIRE: /* Fall-through */
+ case CMM_CONSUME: /* Fall-through */
+ case CMM_RELEASE: /* Fall-through */
+ case CMM_ACQ_REL: /* Fall-through */
+ case CMM_SEQ_CST: /* Fall-through */
+ case CMM_SEQ_CST_FENCE:
+ break;
+ default:
+ abort();
+ }
+}
+
+static inline void _cmm_compat_c11_smp_mb__after_uatomic_and_mo(enum cmm_memorder mo)
+{
+ /* NOP. uatomic_and has explicit lock prefix. */
+ switch (mo) {
+ case CMM_RELAXED: /* Fall-through */
+ case CMM_ACQUIRE: /* Fall-through */
+ case CMM_CONSUME: /* Fall-through */
+ case CMM_RELEASE: /* Fall-through */
+ case CMM_ACQ_REL: /* Fall-through */
+ case CMM_SEQ_CST: /* Fall-through */
+ case CMM_SEQ_CST_FENCE:
+ break;
+ default:
+ abort();
+ }
+}
+
+static inline void _cmm_compat_c11_smp_mb__before_uatomic_or_mo(enum cmm_memorder mo)
+{
+ /* NOP. uatomic_or has explicit lock prefix. */
+ switch (mo) {
+ case CMM_RELAXED: /* Fall-through */
+ case CMM_ACQUIRE: /* Fall-through */
+ case CMM_CONSUME: /* Fall-through */
+ case CMM_RELEASE: /* Fall-through */
+ case CMM_ACQ_REL: /* Fall-through */
+ case CMM_SEQ_CST: /* Fall-through */
+ case CMM_SEQ_CST_FENCE:
+ break;
+ default:
+ abort();
+ }
+}
+
+static inline void _cmm_compat_c11_smp_mb__after_uatomic_or_mo(enum cmm_memorder mo)
+{
+ /* NOP. uatomic_or has explicit lock prefix. */
+ switch (mo) {
+ case CMM_RELAXED: /* Fall-through */
+ case CMM_ACQUIRE: /* Fall-through */
+ case CMM_CONSUME: /* Fall-through */
+ case CMM_RELEASE: /* Fall-through */
+ case CMM_ACQ_REL: /* Fall-through */
+ case CMM_SEQ_CST: /* Fall-through */
+ case CMM_SEQ_CST_FENCE:
+ break;
+ default:
+ abort();
+ }
+}
+
+static inline void _cmm_compat_c11_smp_mb__before_uatomic_add_mo(enum cmm_memorder mo)
+{
+ /* NOP. uatomic_add has explicit lock prefix. */
+ switch (mo) {
+ case CMM_RELAXED: /* Fall-through */
+ case CMM_ACQUIRE: /* Fall-through */
+ case CMM_CONSUME: /* Fall-through */
+ case CMM_RELEASE: /* Fall-through */
+ case CMM_ACQ_REL: /* Fall-through */
+ case CMM_SEQ_CST: /* Fall-through */
+ case CMM_SEQ_CST_FENCE:
+ break;
+ default:
+ abort();
+ }
+}
+
+static inline void _cmm_compat_c11_smp_mb__after_uatomic_add_mo(enum cmm_memorder mo)
+{
+ /* NOP. uatomic_add has explicit lock prefix. */
+ switch (mo) {
+ case CMM_RELAXED: /* Fall-through */
+ case CMM_ACQUIRE: /* Fall-through */
+ case CMM_CONSUME: /* Fall-through */
+ case CMM_RELEASE: /* Fall-through */
+ case CMM_ACQ_REL: /* Fall-through */
+ case CMM_SEQ_CST: /* Fall-through */
+ case CMM_SEQ_CST_FENCE:
+ break;
+ default:
+ abort();
+ }
+}
+
+static inline void _cmm_compat_c11_smp_mb__before_uatomic_sub_mo(enum cmm_memorder mo)
+{
+ /* NOP. uatomic_sub has explicit lock prefix. */
+ switch (mo) {
+ case CMM_RELAXED: /* Fall-through */
+ case CMM_ACQUIRE: /* Fall-through */
+ case CMM_CONSUME: /* Fall-through */
+ case CMM_RELEASE: /* Fall-through */
+ case CMM_ACQ_REL: /* Fall-through */
+ case CMM_SEQ_CST: /* Fall-through */
+ case CMM_SEQ_CST_FENCE:
+ break;
+ default:
+ abort();
+ }
+}
+
+static inline void _cmm_compat_c11_smp_mb__after_uatomic_sub_mo(enum cmm_memorder mo)
+{
+ /* NOP. uatomic_sub has explicit lock prefix. */
+ switch (mo) {
+ case CMM_RELAXED: /* Fall-through */
+ case CMM_ACQUIRE: /* Fall-through */
+ case CMM_CONSUME: /* Fall-through */
+ case CMM_RELEASE: /* Fall-through */
+ case CMM_ACQ_REL: /* Fall-through */
+ case CMM_SEQ_CST: /* Fall-through */
+ case CMM_SEQ_CST_FENCE:
+ break;
+ default:
+ abort();
+ }
+}
+
+static inline void _cmm_compat_c11_smp_mb__before_uatomic_inc_mo(enum cmm_memorder mo)
+{
+ /* NOP. uatomic_inc has explicit lock prefix. */
+ switch (mo) {
+ case CMM_RELAXED: /* Fall-through */
+ case CMM_ACQUIRE: /* Fall-through */
+ case CMM_CONSUME: /* Fall-through */
+ case CMM_RELEASE: /* Fall-through */
+ case CMM_ACQ_REL: /* Fall-through */
+ case CMM_SEQ_CST: /* Fall-through */
+ case CMM_SEQ_CST_FENCE:
+ break;
+ default:
+ abort();
+ }
+}
+
+static inline void _cmm_compat_c11_smp_mb__after_uatomic_inc_mo(enum cmm_memorder mo)
+{
+ /* NOP. uatomic_inc has explicit lock prefix. */
+ switch (mo) {
+ case CMM_RELAXED: /* Fall-through */
+ case CMM_ACQUIRE: /* Fall-through */
+ case CMM_CONSUME: /* Fall-through */
+ case CMM_RELEASE: /* Fall-through */
+ case CMM_ACQ_REL: /* Fall-through */
+ case CMM_SEQ_CST: /* Fall-through */
+ case CMM_SEQ_CST_FENCE:
+ break;
+ default:
+ abort();
+ }
+}
+
+static inline void _cmm_compat_c11_smp_mb__before_uatomic_dec_mo(enum cmm_memorder mo)
+{
+ /* NOP. uatomic_dec has explicit lock prefix. */
+ switch (mo) {
+ case CMM_RELAXED: /* Fall-through */
+ case CMM_ACQUIRE: /* Fall-through */
+ case CMM_CONSUME: /* Fall-through */
+ case CMM_RELEASE: /* Fall-through */
+ case CMM_ACQ_REL: /* Fall-through */
+ case CMM_SEQ_CST: /* Fall-through */
+ case CMM_SEQ_CST_FENCE:
+ break;
+ default:
+ abort();
+ }
+}
+
+static inline void _cmm_compat_c11_smp_mb__after_uatomic_dec_mo(enum cmm_memorder mo)
+{
+ /* NOP. uatomic_dec has explicit lock prefix. */
+ switch (mo) {
+ case CMM_RELAXED: /* Fall-through */
+ case CMM_ACQUIRE: /* Fall-through */
+ case CMM_CONSUME: /* Fall-through */
+ case CMM_RELEASE: /* Fall-through */
+ case CMM_ACQ_REL: /* Fall-through */
+ case CMM_SEQ_CST: /* Fall-through */
+ case CMM_SEQ_CST_FENCE:
+ break;
+ default:
+ abort();
+ }
+}
+
+static inline void _cmm_compat_c11_smp_mb__before_uatomic_add_return_mo(enum cmm_memorder mo)
+{
+ /* NOP. uatomic_add_return has explicit lock prefix. */
+ switch (mo) {
+ case CMM_RELAXED: /* Fall-through */
+ case CMM_ACQUIRE: /* Fall-through */
+ case CMM_CONSUME: /* Fall-through */
+ case CMM_RELEASE: /* Fall-through */
+ case CMM_ACQ_REL: /* Fall-through */
+ case CMM_SEQ_CST: /* Fall-through */
+ case CMM_SEQ_CST_FENCE:
+ break;
+ default:
+ abort();
+ }
+}
+
+static inline void _cmm_compat_c11_smp_mb__after_uatomic_add_return_mo(enum cmm_memorder mo)
+{
+ /* NOP. uatomic_add_return has explicit lock prefix. */
+ switch (mo) {
+ case CMM_RELAXED: /* Fall-through */
+ case CMM_ACQUIRE: /* Fall-through */
+ case CMM_CONSUME: /* Fall-through */
+ case CMM_RELEASE: /* Fall-through */
+ case CMM_ACQ_REL: /* Fall-through */
+ case CMM_SEQ_CST: /* Fall-through */
+ case CMM_SEQ_CST_FENCE:
+ break;
+ default:
+ abort();
+ }
+}
+
+static inline void _cmm_compat_c11_smp_mb__before_uatomic_sub_return_mo(enum cmm_memorder mo)
+{
+ /* NOP. uatomic_sub_return has explicit lock prefix. */
+ switch (mo) {
+ case CMM_RELAXED: /* Fall-through */
+ case CMM_ACQUIRE: /* Fall-through */
+ case CMM_CONSUME: /* Fall-through */
+ case CMM_RELEASE: /* Fall-through */
+ case CMM_ACQ_REL: /* Fall-through */
+ case CMM_SEQ_CST: /* Fall-through */
+ case CMM_SEQ_CST_FENCE:
+ break;
+ default:
+ abort();
+ }
+}
+
+static inline void _cmm_compat_c11_smp_mb__after_uatomic_sub_return_mo(enum cmm_memorder mo)
+{
+ /* NOP. uatomic_sub_return has explicit lock prefix. */
+ switch (mo) {
+ case CMM_RELAXED: /* Fall-through */
+ case CMM_ACQUIRE: /* Fall-through */
+ case CMM_CONSUME: /* Fall-through */
+ case CMM_RELEASE: /* Fall-through */
+ case CMM_ACQ_REL: /* Fall-through */
+ case CMM_SEQ_CST: /* Fall-through */
+ case CMM_SEQ_CST_FENCE:
+ break;
+ default:
+ abort();
+ }
+}
+
+#define _cmm_compat_c11_smp_mb__before_mo(operation, mo) \
+ do { \
+ _cmm_compat_c11_smp_mb__before_ ## operation ## _mo (mo); \
+ } while (0)
+
+#define _cmm_compat_c11_smp_mb__after_mo(operation, mo) \
+ do { \
+ _cmm_compat_c11_smp_mb__after_ ## operation ## _mo (mo); \
+ } while (0)
+
+
#ifdef __cplusplus
}
#endif