fix: handle EINTR correctly in get_cpu_mask_from_sysfs
[urcu.git] / include / urcu / uatomic / generic.h
index e31a19be7b269fbacb090ac349d4df45b433ac7e..ed655bb8def13a5120990d049eb12199ee47376a 100644 (file)
@@ -1,22 +1,15 @@
+// SPDX-FileCopyrightText: 1991-1994 by Xerox Corporation.  All rights reserved.
+// SPDX-FileCopyrightText: 1996-1999 by Silicon Graphics.  All rights reserved.
+// SPDX-FileCopyrightText: 1999-2004 Hewlett-Packard Development Company, L.P.
+// SPDX-FileCopyrightText: 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+// SPDX-FileCopyrightText: 2010 Paolo Bonzini
+//
+// SPDX-License-Identifier: LicenseRef-Boehm-GC
+
 #ifndef _URCU_UATOMIC_GENERIC_H
 #define _URCU_UATOMIC_GENERIC_H
 
 /*
- * Copyright (c) 1991-1994 by Xerox Corporation.  All rights reserved.
- * Copyright (c) 1996-1999 by Silicon Graphics.  All rights reserved.
- * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
- * Copyright (c) 2009      Mathieu Desnoyers
- * Copyright (c) 2010      Paolo Bonzini
- *
- * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
- * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
- *
- * Permission is hereby granted to use or copy this program
- * for any purpose,  provided the above notices are retained on all copies.
- * Permission to modify the code and to distribute modified code is granted,
- * provided the above notices are retained, and a notice that the code was
- * modified is included with the above copyright notice.
- *
  * Code inspired from libuatomic_ops-1.2, inherited in part from the
  * Boehm-Demers-Weiser conservative garbage collector.
  */
@@ -33,10 +26,107 @@ extern "C" {
 #define uatomic_set(addr, v)   ((void) CMM_STORE_SHARED(*(addr), (v)))
 #endif
 
+/*
+ * Can be defined for the architecture.
+ *
+ * What needs to be emitted _before_ the `operation' with memory ordering `mo'.
+ */
+#ifndef _cmm_compat_c11_smp_mb__before_mo
+# define _cmm_compat_c11_smp_mb__before_mo(operation, mo) cmm_smp_mb()
+#endif
+
+/*
+ * Can be defined for the architecture.
+ *
+ * What needs to be emitted _after_ the `operation' with memory ordering `mo'.
+ */
+#ifndef _cmm_compat_c11_smp_mb__after_mo
+# define _cmm_compat_c11_smp_mb__after_mo(operation, mo) cmm_smp_mb()
+#endif
+
+#define uatomic_load_store_return_op(op, addr, v, mo)          \
+       __extension__                                           \
+       ({                                                      \
+               _cmm_compat_c11_smp_mb__before_mo(op, mo);      \
+               __typeof__((*addr)) _value = op(addr, v);       \
+               _cmm_compat_c11_smp_mb__after_mo(op, mo);       \
+                                                               \
+               _value;                                         \
+       })
+
+#define uatomic_load_store_op(op, addr, v, mo)                 \
+       do {                                                    \
+               _cmm_compat_c11_smp_mb__before_mo(op, mo);      \
+               op(addr, v);                                    \
+               _cmm_compat_c11_smp_mb__after_mo(op, mo);       \
+       } while (0)
+
+#define uatomic_store(addr, v, mo)                                     \
+       do {                                                            \
+               _cmm_compat_c11_smp_mb__before_mo(uatomic_set, mo);     \
+               uatomic_set(addr, v);                                   \
+               _cmm_compat_c11_smp_mb__after_mo(uatomic_set, mo);      \
+       } while (0)
+
+#define uatomic_and_mo(addr, v, mo)                    \
+       uatomic_load_store_op(uatomic_and, addr, v, mo)
+
+#define uatomic_or_mo(addr, v, mo)                     \
+       uatomic_load_store_op(uatomic_or, addr, v, mo)
+
+#define uatomic_add_mo(addr, v, mo)                    \
+       uatomic_load_store_op(uatomic_add, addr, v, mo)
+
+#define uatomic_sub_mo(addr, v, mo)                    \
+       uatomic_load_store_op(uatomic_sub, addr, v, mo)
+
+#define uatomic_inc_mo(addr, mo)                       \
+       uatomic_load_store_op(uatomic_add, addr, 1, mo)
+
+#define uatomic_dec_mo(addr, mo)                               \
+       uatomic_load_store_op(uatomic_add, addr, -1, mo)
+/*
+ * NOTE: We can not just do switch (_value == (old) ? mos : mof) otherwise the
+ * compiler emit a -Wduplicated-cond warning.
+ */
+#define uatomic_cmpxchg_mo(addr, old, new, mos, mof)                   \
+       __extension__                                                   \
+       ({                                                              \
+               _cmm_compat_c11_smp_mb__before_mo(uatomic_cmpxchg, mos); \
+               __typeof__(*(addr)) _value = uatomic_cmpxchg(addr, old, \
+                                                       new);           \
+                                                                       \
+               if (_value == (old)) {                                  \
+                       _cmm_compat_c11_smp_mb__after_mo(uatomic_cmpxchg, mos); \
+               } else {                                                \
+                       _cmm_compat_c11_smp_mb__after_mo(uatomic_cmpxchg, mof); \
+               }                                                       \
+               _value;                                                 \
+       })
+
+#define uatomic_xchg_mo(addr, v, mo)                           \
+       uatomic_load_store_return_op(uatomic_xchg, addr, v, mo)
+
+#define uatomic_add_return_mo(addr, v, mo)                             \
+       uatomic_load_store_return_op(uatomic_add_return, addr, v)
+
+#define uatomic_sub_return_mo(addr, v, mo)                             \
+       uatomic_load_store_return_op(uatomic_sub_return, addr, v)
+
 #ifndef uatomic_read
 #define uatomic_read(addr)     CMM_LOAD_SHARED(*(addr))
 #endif
 
+#define uatomic_load(addr, mo)                                         \
+       __extension__                                                   \
+       ({                                                              \
+               _cmm_compat_c11_smp_mb__before_mo(uatomic_read, mo);    \
+               __typeof__(*(addr)) _rcu_value = uatomic_read(addr);    \
+               _cmm_compat_c11_smp_mb__after_mo(uatomic_read, mo);     \
+                                                                       \
+               _rcu_value;                                             \
+       })
+
 #if !defined __OPTIMIZE__  || defined UATOMIC_NO_LINK_ERROR
 #ifdef ILLEGAL_INSTR
 static inline __attribute__((always_inline))
This page took 0.045843 seconds and 4 git commands to generate.