#define uatomic_set(addr, v) ((void) CMM_STORE_SHARED(*(addr), (v)))
#endif
+/*
+ * Can be defined for the architecture.
+ *
+ * What needs to be emitted _before_ the `operation' with memory ordering `mo'.
+ */
+#ifndef _cmm_compat_c11_smp_mb__before_mo
+# define _cmm_compat_c11_smp_mb__before_mo(operation, mo) cmm_smp_mb()
+#endif
+
+/*
+ * Can be defined for the architecture.
+ *
+ * What needs to be emitted _after_ the `operation' with memory ordering `mo'.
+ */
+#ifndef _cmm_compat_c11_smp_mb__after_mo
+# define _cmm_compat_c11_smp_mb__after_mo(operation, mo) cmm_smp_mb()
+#endif
+
+#define uatomic_load_store_return_op(op, addr, v, mo) \
+ __extension__ \
+ ({ \
+ _cmm_compat_c11_smp_mb__before_mo(op, mo); \
+ __typeof__((*addr)) _value = op(addr, v); \
+ _cmm_compat_c11_smp_mb__after_mo(op, mo); \
+ \
+ _value; \
+ })
+
+#define uatomic_load_store_op(op, addr, v, mo) \
+ do { \
+ _cmm_compat_c11_smp_mb__before_mo(op, mo); \
+ op(addr, v); \
+ _cmm_compat_c11_smp_mb__after_mo(op, mo); \
+ } while (0)
+
+#define uatomic_store(addr, v, mo) \
+ do { \
+ _cmm_compat_c11_smp_mb__before_mo(uatomic_set, mo); \
+ uatomic_set(addr, v); \
+ _cmm_compat_c11_smp_mb__after_mo(uatomic_set, mo); \
+ } while (0)
+
+#define uatomic_and_mo(addr, v, mo) \
+ uatomic_load_store_op(uatomic_and, addr, v, mo)
+
+#define uatomic_or_mo(addr, v, mo) \
+ uatomic_load_store_op(uatomic_or, addr, v, mo)
+
+#define uatomic_add_mo(addr, v, mo) \
+ uatomic_load_store_op(uatomic_add, addr, v, mo)
+
+#define uatomic_sub_mo(addr, v, mo) \
+ uatomic_load_store_op(uatomic_sub, addr, v, mo)
+
+#define uatomic_inc_mo(addr, mo) \
+ uatomic_load_store_op(uatomic_add, addr, 1, mo)
+
+#define uatomic_dec_mo(addr, mo) \
+ uatomic_load_store_op(uatomic_add, addr, -1, mo)
+/*
+ * NOTE: We can not just do switch (_value == (old) ? mos : mof) otherwise the
+ * compiler emit a -Wduplicated-cond warning.
+ */
+#define uatomic_cmpxchg_mo(addr, old, new, mos, mof) \
+ __extension__ \
+ ({ \
+ _cmm_compat_c11_smp_mb__before_mo(uatomic_cmpxchg, mos); \
+ __typeof__(*(addr)) _value = uatomic_cmpxchg(addr, old, \
+ new); \
+ \
+ if (_value == (old)) { \
+ _cmm_compat_c11_smp_mb__after_mo(uatomic_cmpxchg, mos); \
+ } else { \
+ _cmm_compat_c11_smp_mb__after_mo(uatomic_cmpxchg, mof); \
+ } \
+ _value; \
+ })
+
+#define uatomic_xchg_mo(addr, v, mo) \
+ uatomic_load_store_return_op(uatomic_xchg, addr, v, mo)
+
+#define uatomic_add_return_mo(addr, v, mo) \
+ uatomic_load_store_return_op(uatomic_add_return, addr, v)
+
+#define uatomic_sub_return_mo(addr, v, mo) \
+ uatomic_load_store_return_op(uatomic_sub_return, addr, v)
+
#ifndef uatomic_read
#define uatomic_read(addr) CMM_LOAD_SHARED(*(addr))
#endif
+#define uatomic_load(addr, mo) \
+ __extension__ \
+ ({ \
+ _cmm_compat_c11_smp_mb__before_mo(uatomic_read, mo); \
+ __typeof__(*(addr)) _rcu_value = uatomic_read(addr); \
+ _cmm_compat_c11_smp_mb__after_mo(uatomic_read, mo); \
+ \
+ _rcu_value; \
+ })
+
#if !defined __OPTIMIZE__ || defined UATOMIC_NO_LINK_ERROR
#ifdef ILLEGAL_INSTR
static inline __attribute__((always_inline))