+// SPDX-FileCopyrightText: 1991-1994 by Xerox Corporation. All rights reserved.
+// SPDX-FileCopyrightText: 1996-1999 by Silicon Graphics. All rights reserved.
+// SPDX-FileCopyrightText: 1999-2004 Hewlett-Packard Development Company, L.P.
+// SPDX-FileCopyrightText: 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+// SPDX-FileCopyrightText: 2010 Paolo Bonzini
+//
+// SPDX-License-Identifier: LicenseRef-Boehm-GC
+
#ifndef _URCU_UATOMIC_GENERIC_H
#define _URCU_UATOMIC_GENERIC_H
/*
- * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
- * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
- * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
- * Copyright (c) 2009 Mathieu Desnoyers
- * Copyright (c) 2010 Paolo Bonzini
- *
- * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
- * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
- *
- * Permission is hereby granted to use or copy this program
- * for any purpose, provided the above notices are retained on all copies.
- * Permission to modify the code and to distribute modified code is granted,
- * provided the above notices are retained, and a notice that the code was
- * modified is included with the above copyright notice.
- *
* Code inspired from libuatomic_ops-1.2, inherited in part from the
* Boehm-Demers-Weiser conservative garbage collector.
*/
#include <stdint.h>
+#include <stdlib.h>
#include <urcu/compiler.h>
#include <urcu/system.h>
#define uatomic_set(addr, v) ((void) CMM_STORE_SHARED(*(addr), (v)))
#endif
+#define uatomic_load_store_return_op(op, addr, v, mo) \
+ __extension__ \
+ ({ \
+ \
+ switch (mo) { \
+ case CMM_ACQUIRE: \
+ case CMM_CONSUME: \
+ case CMM_RELAXED: \
+ break; \
+ case CMM_RELEASE: \
+ case CMM_ACQ_REL: \
+ case CMM_SEQ_CST: \
+ case CMM_SEQ_CST_FENCE: \
+ cmm_smp_mb(); \
+ break; \
+ default: \
+ abort(); \
+ } \
+ \
+ __typeof__((*addr)) _value = op(addr, v); \
+ \
+ switch (mo) { \
+ case CMM_CONSUME: \
+ cmm_smp_read_barrier_depends(); \
+ break; \
+ case CMM_ACQUIRE: \
+ case CMM_ACQ_REL: \
+ case CMM_SEQ_CST: \
+ case CMM_SEQ_CST_FENCE: \
+ cmm_smp_mb(); \
+ break; \
+ case CMM_RELAXED: \
+ case CMM_RELEASE: \
+ break; \
+ default: \
+ abort(); \
+ } \
+ _value; \
+ })
+
+#define uatomic_load_store_op(op, addr, v, mo) \
+ do { \
+ switch (mo) { \
+ case CMM_ACQUIRE: \
+ case CMM_CONSUME: \
+ case CMM_RELAXED: \
+ break; \
+ case CMM_RELEASE: \
+ case CMM_ACQ_REL: \
+ case CMM_SEQ_CST: \
+ case CMM_SEQ_CST_FENCE: \
+ cmm_smp_mb(); \
+ break; \
+ default: \
+ abort(); \
+ } \
+ \
+ op(addr, v); \
+ \
+ switch (mo) { \
+ case CMM_CONSUME: \
+ cmm_smp_read_barrier_depends(); \
+ break; \
+ case CMM_ACQUIRE: \
+ case CMM_ACQ_REL: \
+ case CMM_SEQ_CST: \
+ case CMM_SEQ_CST_FENCE: \
+ cmm_smp_mb(); \
+ break; \
+ case CMM_RELAXED: \
+ case CMM_RELEASE: \
+ break; \
+ default: \
+ abort(); \
+ } \
+ } while (0)
+
+#define uatomic_store(addr, v, mo) \
+ do { \
+ switch (mo) { \
+ case CMM_RELAXED: \
+ break; \
+ case CMM_RELEASE: \
+ case CMM_SEQ_CST: \
+ case CMM_SEQ_CST_FENCE: \
+ cmm_smp_mb(); \
+ break; \
+ default: \
+ abort(); \
+ } \
+ \
+ uatomic_set(addr, v); \
+ \
+ switch (mo) { \
+ case CMM_RELAXED: \
+ case CMM_RELEASE: \
+ break; \
+ case CMM_SEQ_CST: \
+ case CMM_SEQ_CST_FENCE: \
+ cmm_smp_mb(); \
+ break; \
+ default: \
+ abort(); \
+ } \
+ } while (0)
+
+#define uatomic_and_mo(addr, v, mo) \
+ uatomic_load_store_op(uatomic_and, addr, v, mo)
+
+#define uatomic_or_mo(addr, v, mo) \
+ uatomic_load_store_op(uatomic_or, addr, v, mo)
+
+#define uatomic_add_mo(addr, v, mo) \
+ uatomic_load_store_op(uatomic_add, addr, v, mo)
+
+#define uatomic_sub_mo(addr, v, mo) \
+ uatomic_load_store_op(uatomic_sub, addr, v, mo)
+
+#define uatomic_inc_mo(addr, mo) \
+ uatomic_load_store_op(uatomic_add, addr, 1, mo)
+
+#define uatomic_dec_mo(addr, mo) \
+ uatomic_load_store_op(uatomic_add, addr, -1, mo)
+/*
+ * NOTE: We can not just do switch (_value == (old) ? mos : mof) otherwise the
+ * compiler emit a -Wduplicated-cond warning.
+ */
+#define uatomic_cmpxchg_mo(addr, old, new, mos, mof) \
+ __extension__ \
+ ({ \
+ switch (mos) { \
+ case CMM_ACQUIRE: \
+ case CMM_CONSUME: \
+ case CMM_RELAXED: \
+ break; \
+ case CMM_RELEASE: \
+ case CMM_ACQ_REL: \
+ case CMM_SEQ_CST: \
+ case CMM_SEQ_CST_FENCE: \
+ cmm_smp_mb(); \
+ break; \
+ default: \
+ abort(); \
+ } \
+ \
+ __typeof__(*(addr)) _value = uatomic_cmpxchg(addr, old, \
+ new); \
+ \
+ if (_value == (old)) { \
+ switch (mos) { \
+ case CMM_CONSUME: \
+ cmm_smp_read_barrier_depends(); \
+ break; \
+ case CMM_ACQUIRE: \
+ case CMM_ACQ_REL: \
+ case CMM_SEQ_CST: \
+ case CMM_SEQ_CST_FENCE: \
+ cmm_smp_mb(); \
+ break; \
+ case CMM_RELAXED: \
+ case CMM_RELEASE: \
+ break; \
+ default: \
+ abort(); \
+ } \
+ } else { \
+ switch (mof) { \
+ case CMM_CONSUME: \
+ cmm_smp_read_barrier_depends(); \
+ break; \
+ case CMM_ACQUIRE: \
+ case CMM_ACQ_REL: \
+ case CMM_SEQ_CST: \
+ case CMM_SEQ_CST_FENCE: \
+ cmm_smp_mb(); \
+ break; \
+ case CMM_RELAXED: \
+ case CMM_RELEASE: \
+ break; \
+ default: \
+ abort(); \
+ } \
+ } \
+ _value; \
+ })
+
+#define uatomic_xchg_mo(addr, v, mo) \
+ uatomic_load_store_return_op(uatomic_xchg, addr, v, mo)
+
+#define uatomic_add_return_mo(addr, v, mo) \
+ uatomic_load_store_return_op(uatomic_add_return, addr, v)
+
+#define uatomic_sub_return_mo(addr, v, mo) \
+ uatomic_load_store_return_op(uatomic_sub_return, addr, v)
+
+
#ifndef uatomic_read
#define uatomic_read(addr) CMM_LOAD_SHARED(*(addr))
#endif
+#define uatomic_load(addr, mo) \
+ __extension__ \
+ ({ \
+ switch (mo) { \
+ case CMM_ACQUIRE: \
+ case CMM_CONSUME: \
+ case CMM_RELAXED: \
+ break; \
+ case CMM_SEQ_CST: \
+ case CMM_SEQ_CST_FENCE: \
+ cmm_smp_mb(); \
+ break; \
+ default: \
+ abort(); \
+ } \
+ \
+ __typeof__(*(addr)) _rcu_value = uatomic_read(addr); \
+ \
+ switch (mo) { \
+ case CMM_RELAXED: \
+ break; \
+ case CMM_CONSUME: \
+ cmm_smp_read_barrier_depends(); \
+ break; \
+ case CMM_ACQUIRE: \
+ case CMM_SEQ_CST: \
+ case CMM_SEQ_CST_FENCE: \
+ cmm_smp_mb(); \
+ break; \
+ default: \
+ abort(); \
+ } \
+ \
+ _rcu_value; \
+ })
+
#if !defined __OPTIMIZE__ || defined UATOMIC_NO_LINK_ERROR
-static inline __attribute__((always_inline, noreturn))
+#ifdef ILLEGAL_INSTR
+static inline __attribute__((always_inline))
void _uatomic_link_error(void)
{
-#ifdef ILLEGAL_INSTR
/*
* generate an illegal instruction. Cannot catch this with
* linker tricks when optimizations are disabled.
*/
__asm__ __volatile__(ILLEGAL_INSTR);
+}
#else
+static inline __attribute__((always_inline, __noreturn__))
+void _uatomic_link_error(void)
+{
__builtin_trap();
-#endif
}
+#endif
#else /* #if !defined __OPTIMIZE__ || defined UATOMIC_NO_LINK_ERROR */
extern void _uatomic_link_error(void);