rculfhash: make cds_lfht_iter_get_node argument const master
authorMathieu Desnoyers <mathieu.desnoyers@efficios.com>
Thu, 13 Jun 2024 18:47:33 +0000 (14:47 -0400)
committerMathieu Desnoyers <mathieu.desnoyers@efficios.com>
Thu, 13 Jun 2024 18:47:33 +0000 (14:47 -0400)
cds_lfht_iter_get_node doesn't modify its argument. Hence, it can be
marked as `const`.

Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Change-Id: I69f6460928e90faa0a44e6c795a0eccb3f418738

16 files changed:
extras/abi/dump_abi.sh
include/urcu/futex.h
include/urcu/lfstack.h
include/urcu/list.h
include/urcu/rculfhash.h
include/urcu/static/lfstack.h
include/urcu/static/wfcqueue.h
include/urcu/static/wfstack.h
include/urcu/uatomic/generic.h
include/urcu/uatomic/x86.h
include/urcu/wfcqueue.h
include/urcu/wfstack.h
src/compat-smp.h
src/lfstack.c
src/wfcqueue.c
src/wfstack.c

index a7bd5fda4c9ffcca25b51b7cd32298c1c7ba804a..673b8391e16442d244076c79ff2ef20b91e88eb6 100755 (executable)
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/usr/bin/env bash
 
 # SPDX-FileCopyrightText: 2021 Michael Jeanson <mjeanson@efficios.com>
 #
 
 # SPDX-FileCopyrightText: 2021 Michael Jeanson <mjeanson@efficios.com>
 #
index 9d0a997473c025563bd9c557225e2029ecd6f831..f1181ee4a6011022af997b53b375addc93ba9137 100644 (file)
 #if (defined(__linux__) && defined(__NR_futex))
 
 /* For backwards compat */
 #if (defined(__linux__) && defined(__NR_futex))
 
 /* For backwards compat */
-#define CONFIG_RCU_HAVE_FUTEX 1
+# define CONFIG_RCU_HAVE_FUTEX 1
 
 
-#include <unistd.h>
-#include <errno.h>
-#include <urcu/compiler.h>
-#include <urcu/arch.h>
+# include <unistd.h>
+# include <errno.h>
+# include <urcu/compiler.h>
+# include <urcu/arch.h>
+# include <urcu/assert.h>
 
 #elif defined(__FreeBSD__)
 
 
 #elif defined(__FreeBSD__)
 
-#include <sys/types.h>
-#include <sys/umtx.h>
+# include <sys/types.h>
+# include <sys/umtx.h>
+
+#elif defined(__OpenBSD__)
+
+# include <sys/time.h>
+# include <sys/futex.h>
 
 #endif
 
 
 #endif
 
 extern "C" {
 #endif
 
 extern "C" {
 #endif
 
-#define FUTEX_WAIT             0
-#define FUTEX_WAKE             1
+#ifndef __OpenBSD__
+# define FUTEX_WAIT            0
+# define FUTEX_WAKE            1
+#endif
 
 /*
  * sys_futex compatibility header.
 
 /*
  * sys_futex compatibility header.
@@ -64,8 +72,7 @@ extern int compat_futex_async(int32_t *uaddr, int op, int32_t val,
 static inline int futex(int32_t *uaddr, int op, int32_t val,
                const struct timespec *timeout, int32_t *uaddr2, int32_t val3)
 {
 static inline int futex(int32_t *uaddr, int op, int32_t val,
                const struct timespec *timeout, int32_t *uaddr2, int32_t val3)
 {
-       return syscall(__NR_futex, uaddr, op, val, timeout,
-                       uaddr2, val3);
+       return syscall(__NR_futex, uaddr, op, val, timeout, uaddr2, val3);
 }
 
 static inline int futex_noasync(int32_t *uaddr, int op, int32_t val,
 }
 
 static inline int futex_noasync(int32_t *uaddr, int op, int32_t val,
@@ -107,9 +114,7 @@ static inline int futex_async(int32_t *uaddr, int op, int32_t val,
 #elif defined(__FreeBSD__)
 
 static inline int futex_async(int32_t *uaddr, int op, int32_t val,
 #elif defined(__FreeBSD__)
 
 static inline int futex_async(int32_t *uaddr, int op, int32_t val,
-               const struct timespec *timeout,
-               int32_t *uaddr2 __attribute__((unused)),
-               int32_t val3 __attribute__((unused)))
+               const struct timespec *timeout, int32_t *uaddr2, int32_t val3)
 {
        int umtx_op;
        void *umtx_uaddr = NULL, *umtx_uaddr2 = NULL;
 {
        int umtx_op;
        void *umtx_uaddr = NULL, *umtx_uaddr2 = NULL;
@@ -118,6 +123,13 @@ static inline int futex_async(int32_t *uaddr, int op, int32_t val,
                ._clockid = CLOCK_MONOTONIC,
        };
 
                ._clockid = CLOCK_MONOTONIC,
        };
 
+       /*
+        * Check if NULL or zero. Don't let users expect that they are
+        * taken into account.
+        */
+       urcu_posix_assert(!uaddr2);
+       urcu_posix_assert(!val3);
+
        switch (op) {
        case FUTEX_WAIT:
                /* On FreeBSD, a "u_int" is a 32-bit integer. */
        switch (op) {
        case FUTEX_WAIT:
                /* On FreeBSD, a "u_int" is a 32-bit integer. */
@@ -146,6 +158,48 @@ static inline int futex_noasync(int32_t *uaddr, int op, int32_t val,
        return futex_async(uaddr, op, val, timeout, uaddr2, val3);
 }
 
        return futex_async(uaddr, op, val, timeout, uaddr2, val3);
 }
 
+#elif defined(__OpenBSD__)
+
+static inline int futex_noasync(int32_t *uaddr, int op, int32_t val,
+               const struct timespec *timeout, int32_t *uaddr2, int32_t val3)
+{
+       int ret;
+
+       /*
+        * Check that val3 is zero. Don't let users expect that it is
+        * taken into account.
+        */
+       urcu_posix_assert(!val3);
+
+       ret = futex((volatile uint32_t *) uaddr, op, val, timeout,
+               (volatile uint32_t *) uaddr2);
+       if (caa_unlikely(ret < 0 && errno == ENOSYS)) {
+               return compat_futex_noasync(uaddr, op, val, timeout,
+                               uaddr2, val3);
+       }
+       return ret;
+}
+
+static inline int futex_async(int32_t *uaddr, int op, int32_t val,
+               const struct timespec *timeout, int32_t *uaddr2, int32_t val3)
+{
+       int ret;
+
+       /*
+        * Check that val3 is zero. Don't let users expect that it is
+        * taken into account.
+        */
+       urcu_posix_assert(!val3);
+
+       ret = futex((volatile uint32_t *) uaddr, op, val, timeout,
+               (volatile uint32_t *) uaddr2);
+       if (caa_unlikely(ret < 0 && errno == ENOSYS)) {
+               return compat_futex_async(uaddr, op, val, timeout,
+                               uaddr2, val3);
+       }
+       return ret;
+}
+
 #elif defined(__CYGWIN__)
 
 /*
 #elif defined(__CYGWIN__)
 
 /*
index 77af43bf18069df89619f85be6ac46178980de5c..2a3073d695b7ae479d2a848420b15c7b70be647e 100644 (file)
@@ -84,6 +84,11 @@ typedef union {
        struct __cds_lfs_stack *_s;
        struct cds_lfs_stack *s;
 } __attribute__((__transparent_union__)) cds_lfs_stack_ptr_t;
        struct __cds_lfs_stack *_s;
        struct cds_lfs_stack *s;
 } __attribute__((__transparent_union__)) cds_lfs_stack_ptr_t;
+
+typedef union {
+       const struct __cds_lfs_stack *_s;
+       const struct cds_lfs_stack *s;
+} __attribute__((__transparent_union__)) cds_lfs_stack_const_ptr_t;
 #if defined(__clang__)
 #pragma clang diagnostic pop
 #endif
 #if defined(__clang__)
 #pragma clang diagnostic pop
 #endif
@@ -141,7 +146,7 @@ extern void __cds_lfs_init(struct __cds_lfs_stack *s);
  *
  * No memory barrier is issued. No mutual exclusion is required.
  */
  *
  * No memory barrier is issued. No mutual exclusion is required.
  */
-extern bool cds_lfs_empty(cds_lfs_stack_ptr_t s);
+extern bool cds_lfs_empty(cds_lfs_stack_const_ptr_t s);
 
 /*
  * cds_lfs_push: push a node into the stack.
 
 /*
  * cds_lfs_push: push a node into the stack.
@@ -276,9 +281,25 @@ static inline cds_lfs_stack_ptr_t cds_lfs_stack_cast(struct cds_lfs_stack *s)
        return ret;
 }
 
        return ret;
 }
 
+static inline cds_lfs_stack_const_ptr_t cds_lfs_stack_const_cast(const struct __cds_lfs_stack *s)
+{
+       cds_lfs_stack_const_ptr_t ret = {
+               ._s = s,
+       };
+       return ret;
+}
+
+static inline cds_lfs_stack_const_ptr_t cds_lfs_stack_const_cast(const struct cds_lfs_stack *s)
+{
+       cds_lfs_stack_const_ptr_t ret = {
+               .s = s,
+       };
+       return ret;
+}
+
 template<typename T> static inline bool cds_lfs_empty(T s)
 {
 template<typename T> static inline bool cds_lfs_empty(T s)
 {
-       return cds_lfs_empty(cds_lfs_stack_cast(s));
+       return cds_lfs_empty(cds_lfs_stack_const_cast(s));
 }
 
 template<typename T> static inline bool cds_lfs_push(T s,
 }
 
 template<typename T> static inline bool cds_lfs_push(T s,
index 4bc88b3f22c505e0f83355d3919eff80fb3ac868..f2d77bb067d0c0ff69342b2aea374cf62706c981 100644 (file)
@@ -88,7 +88,7 @@ void cds_list_move(struct cds_list_head *elem, struct cds_list_head *head)
 
 /* Replace an old entry. */
 static inline
 
 /* Replace an old entry. */
 static inline
-void cds_list_replace(struct cds_list_head *old, struct cds_list_head *_new)
+void cds_list_replace(const struct cds_list_head *old, struct cds_list_head *_new)
 {
        _new->next = old->next;
        _new->prev = old->prev;
 {
        _new->next = old->next;
        _new->prev = old->prev;
@@ -169,7 +169,7 @@ void cds_list_splice(struct cds_list_head *add, struct cds_list_head *head)
                 pos = (p), p = cds_list_entry((pos)->member.next, __typeof__(*(pos)), member))
 
 static inline
                 pos = (p), p = cds_list_entry((pos)->member.next, __typeof__(*(pos)), member))
 
 static inline
-int cds_list_empty(struct cds_list_head *head)
+int cds_list_empty(const struct cds_list_head *head)
 {
        return head == head->next;
 }
 {
        return head == head->next;
 }
index e0f4b351fc9b027a32ecee2c1e8a958b3ce21b2f..69a251c29c5e594ed8d2ea38572b400693ec78a6 100644 (file)
@@ -81,7 +81,7 @@ struct cds_lfht_alloc {
 };
 
 static inline
 };
 
 static inline
-struct cds_lfht_node *cds_lfht_iter_get_node(struct cds_lfht_iter *iter)
+struct cds_lfht_node *cds_lfht_iter_get_node(const struct cds_lfht_iter *iter)
 {
        return iter->node;
 }
 {
        return iter->node;
 }
index d7e70d4966d6bab1f5f040c634a025c84f7baa99..22233d8d22b3d3deacfef4abf502e28dd8bcc2e1 100644 (file)
@@ -87,7 +87,7 @@ void ___cds_lfs_init(struct __cds_lfs_stack *s)
 }
 
 static inline
 }
 
 static inline
-bool ___cds_lfs_empty_head(struct cds_lfs_head *head)
+bool ___cds_lfs_empty_head(const struct cds_lfs_head *head)
 {
        return head == NULL;
 }
 {
        return head == NULL;
 }
@@ -98,7 +98,7 @@ bool ___cds_lfs_empty_head(struct cds_lfs_head *head)
  * No memory barrier is issued. No mutual exclusion is required.
  */
 static inline
  * No memory barrier is issued. No mutual exclusion is required.
  */
 static inline
-bool _cds_lfs_empty(cds_lfs_stack_ptr_t s)
+bool _cds_lfs_empty(cds_lfs_stack_const_ptr_t s)
 {
        return ___cds_lfs_empty_head(uatomic_load(&s._s->head, CMM_RELAXED));
 }
 {
        return ___cds_lfs_empty_head(uatomic_load(&s._s->head, CMM_RELAXED));
 }
index 26741ae81d241ed04f1917b68e7755847c90490b..2799c61bb38b3c9f53610af50fad0d516445bf5f 100644 (file)
@@ -133,10 +133,10 @@ static inline void ___cds_wfcq_init(struct __cds_wfcq_head *head,
  * make a queue appear empty if an enqueuer is preempted for a long time
  * between xchg() and setting the previous node's next pointer.
  */
  * make a queue appear empty if an enqueuer is preempted for a long time
  * between xchg() and setting the previous node's next pointer.
  */
-static inline bool _cds_wfcq_empty(cds_wfcq_head_ptr_t u_head,
-               struct cds_wfcq_tail *tail)
+static inline bool _cds_wfcq_empty(cds_wfcq_head_const_ptr_t u_head,
+               const struct cds_wfcq_tail *tail)
 {
 {
-       struct __cds_wfcq_head *head = u_head._h;
+       const struct __cds_wfcq_head *head = u_head._h;
        /*
         * Queue is empty if no node is pointed by head->node.next nor
         * tail->p. Even though the tail->p check is sufficient to find
        /*
         * Queue is empty if no node is pointed by head->node.next nor
         * tail->p. Even though the tail->p check is sufficient to find
@@ -283,7 +283,7 @@ ___cds_wfcq_first(cds_wfcq_head_ptr_t u_head,
        struct __cds_wfcq_head *head = u_head._h;
        struct cds_wfcq_node *node;
 
        struct __cds_wfcq_head *head = u_head._h;
        struct cds_wfcq_node *node;
 
-       if (_cds_wfcq_empty(__cds_wfcq_head_cast(head), tail))
+       if (_cds_wfcq_empty(__cds_wfcq_head_const_cast(head), tail))
                return NULL;
        node = ___cds_wfcq_node_sync_next(&head->node, blocking);
 
                return NULL;
        node = ___cds_wfcq_node_sync_next(&head->node, blocking);
 
@@ -399,7 +399,7 @@ ___cds_wfcq_dequeue_with_state(cds_wfcq_head_ptr_t u_head,
        if (state)
                *state = 0;
 
        if (state)
                *state = 0;
 
-       if (_cds_wfcq_empty(__cds_wfcq_head_cast(head), tail)) {
+       if (_cds_wfcq_empty(__cds_wfcq_head_const_cast(head), tail)) {
                return NULL;
        }
 
                return NULL;
        }
 
@@ -530,7 +530,7 @@ ___cds_wfcq_splice(
         * Initial emptiness check to speed up cases where queue is
         * empty: only require loads to check if queue is empty.
         */
         * Initial emptiness check to speed up cases where queue is
         * empty: only require loads to check if queue is empty.
         */
-       if (_cds_wfcq_empty(__cds_wfcq_head_cast(src_q_head), src_q_tail))
+       if (_cds_wfcq_empty(__cds_wfcq_head_const_cast(src_q_head), src_q_tail))
                return CDS_WFCQ_RET_SRC_EMPTY;
 
        for (;;) {
                return CDS_WFCQ_RET_SRC_EMPTY;
 
        for (;;) {
index c46e97d9f25197e0e701a55ef24b0975bd1b2aac..97c5192a8f975c51cd000b296e2953526c3af865 100644 (file)
@@ -106,9 +106,9 @@ static inline bool ___cds_wfs_end(void *node)
  *
  * No memory barrier is issued. No mutual exclusion is required.
  */
  *
  * No memory barrier is issued. No mutual exclusion is required.
  */
-static inline bool _cds_wfs_empty(cds_wfs_stack_ptr_t u_stack)
+static inline bool _cds_wfs_empty(cds_wfs_stack_const_ptr_t u_stack)
 {
 {
-       struct __cds_wfs_stack *s = u_stack._s;
+       const struct __cds_wfs_stack *s = u_stack._s;
 
        return ___cds_wfs_end(uatomic_load(&s->head, CMM_RELAXED));
 }
 
        return ___cds_wfs_end(uatomic_load(&s->head, CMM_RELAXED));
 }
index 8f8c43785ba0c7609d827ab41a6d5d5882dca0ab..ed655bb8def13a5120990d049eb12199ee47376a 100644 (file)
@@ -15,7 +15,6 @@
  */
 
 #include <stdint.h>
  */
 
 #include <stdint.h>
-#include <stdlib.h>
 #include <urcu/compiler.h>
 #include <urcu/system.h>
 
 #include <urcu/compiler.h>
 #include <urcu/system.h>
 
@@ -27,125 +26,61 @@ extern "C" {
 #define uatomic_set(addr, v)   ((void) CMM_STORE_SHARED(*(addr), (v)))
 #endif
 
 #define uatomic_set(addr, v)   ((void) CMM_STORE_SHARED(*(addr), (v)))
 #endif
 
-#define uatomic_load_store_return_op(op, addr, v, mo)                  \
-       __extension__                                                   \
-       ({                                                              \
-                                                                       \
-               switch (mo) {                                           \
-               case CMM_ACQUIRE:                                       \
-               case CMM_CONSUME:                                       \
-               case CMM_RELAXED:                                       \
-                       break;                                          \
-               case CMM_RELEASE:                                       \
-               case CMM_ACQ_REL:                                       \
-               case CMM_SEQ_CST:                                       \
-               case CMM_SEQ_CST_FENCE:                                 \
-                       cmm_smp_mb();                                   \
-                       break;                                          \
-               default:                                                \
-                       abort();                                        \
-               }                                                       \
-                                                                       \
-               __typeof__((*addr)) _value = op(addr, v);               \
-                                                                       \
-               switch (mo) {                                           \
-               case CMM_CONSUME:                                       \
-                       cmm_smp_read_barrier_depends();                 \
-                       break;                                          \
-               case CMM_ACQUIRE:                                       \
-               case CMM_ACQ_REL:                                       \
-               case CMM_SEQ_CST:                                       \
-               case CMM_SEQ_CST_FENCE:                                 \
-                       cmm_smp_mb();                                   \
-                       break;                                          \
-               case CMM_RELAXED:                                       \
-               case CMM_RELEASE:                                       \
-                       break;                                          \
-               default:                                                \
-                       abort();                                        \
-               }                                                       \
-               _value;                                                 \
+/*
+ * Can be defined for the architecture.
+ *
+ * What needs to be emitted _before_ the `operation' with memory ordering `mo'.
+ */
+#ifndef _cmm_compat_c11_smp_mb__before_mo
+# define _cmm_compat_c11_smp_mb__before_mo(operation, mo) cmm_smp_mb()
+#endif
+
+/*
+ * Can be defined for the architecture.
+ *
+ * What needs to be emitted _after_ the `operation' with memory ordering `mo'.
+ */
+#ifndef _cmm_compat_c11_smp_mb__after_mo
+# define _cmm_compat_c11_smp_mb__after_mo(operation, mo) cmm_smp_mb()
+#endif
+
+#define uatomic_load_store_return_op(op, addr, v, mo)          \
+       __extension__                                           \
+       ({                                                      \
+               _cmm_compat_c11_smp_mb__before_mo(op, mo);      \
+               __typeof__((*addr)) _value = op(addr, v);       \
+               _cmm_compat_c11_smp_mb__after_mo(op, mo);       \
+                                                               \
+               _value;                                         \
        })
 
        })
 
-#define uatomic_load_store_op(op, addr, v, mo)                         \
-       do {                                                            \
-               switch (mo) {                                           \
-               case CMM_ACQUIRE:                                       \
-               case CMM_CONSUME:                                       \
-               case CMM_RELAXED:                                       \
-                       break;                                          \
-               case CMM_RELEASE:                                       \
-               case CMM_ACQ_REL:                                       \
-               case CMM_SEQ_CST:                                       \
-               case CMM_SEQ_CST_FENCE:                                 \
-                       cmm_smp_mb();                                   \
-                       break;                                          \
-               default:                                                \
-                       abort();                                        \
-               }                                                       \
-                                                                       \
-               op(addr, v);                                            \
-                                                                       \
-               switch (mo) {                                           \
-               case CMM_CONSUME:                                       \
-                       cmm_smp_read_barrier_depends();                 \
-                       break;                                          \
-               case CMM_ACQUIRE:                                       \
-               case CMM_ACQ_REL:                                       \
-               case CMM_SEQ_CST:                                       \
-               case CMM_SEQ_CST_FENCE:                                 \
-                       cmm_smp_mb();                                   \
-                       break;                                          \
-               case CMM_RELAXED:                                       \
-               case CMM_RELEASE:                                       \
-                       break;                                          \
-               default:                                                \
-                       abort();                                        \
-               }                                                       \
+#define uatomic_load_store_op(op, addr, v, mo)                 \
+       do {                                                    \
+               _cmm_compat_c11_smp_mb__before_mo(op, mo);      \
+               op(addr, v);                                    \
+               _cmm_compat_c11_smp_mb__after_mo(op, mo);       \
        } while (0)
 
        } while (0)
 
-#define uatomic_store(addr, v, mo)                     \
-       do {                                            \
-               switch (mo) {                           \
-               case CMM_RELAXED:                       \
-                       break;                          \
-               case CMM_RELEASE:                       \
-               case CMM_SEQ_CST:                       \
-               case CMM_SEQ_CST_FENCE:                 \
-                       cmm_smp_mb();                   \
-                       break;                          \
-               default:                                \
-                       abort();                        \
-               }                                       \
-                                                       \
-               uatomic_set(addr, v);                   \
-                                                       \
-               switch (mo) {                           \
-               case CMM_RELAXED:                       \
-               case CMM_RELEASE:                       \
-                       break;                          \
-               case CMM_SEQ_CST:                       \
-               case CMM_SEQ_CST_FENCE:                 \
-                       cmm_smp_mb();                   \
-                       break;                          \
-               default:                                \
-                       abort();                        \
-               }                                       \
+#define uatomic_store(addr, v, mo)                                     \
+       do {                                                            \
+               _cmm_compat_c11_smp_mb__before_mo(uatomic_set, mo);     \
+               uatomic_set(addr, v);                                   \
+               _cmm_compat_c11_smp_mb__after_mo(uatomic_set, mo);      \
        } while (0)
 
        } while (0)
 
-#define uatomic_and_mo(addr, v, mo)                            \
+#define uatomic_and_mo(addr, v, mo)                    \
        uatomic_load_store_op(uatomic_and, addr, v, mo)
 
        uatomic_load_store_op(uatomic_and, addr, v, mo)
 
-#define uatomic_or_mo(addr, v, mo)                             \
+#define uatomic_or_mo(addr, v, mo)                     \
        uatomic_load_store_op(uatomic_or, addr, v, mo)
 
        uatomic_load_store_op(uatomic_or, addr, v, mo)
 
-#define uatomic_add_mo(addr, v, mo)                            \
+#define uatomic_add_mo(addr, v, mo)                    \
        uatomic_load_store_op(uatomic_add, addr, v, mo)
 
        uatomic_load_store_op(uatomic_add, addr, v, mo)
 
-#define uatomic_sub_mo(addr, v, mo)                            \
+#define uatomic_sub_mo(addr, v, mo)                    \
        uatomic_load_store_op(uatomic_sub, addr, v, mo)
 
        uatomic_load_store_op(uatomic_sub, addr, v, mo)
 
-#define uatomic_inc_mo(addr, mo)                               \
+#define uatomic_inc_mo(addr, mo)                       \
        uatomic_load_store_op(uatomic_add, addr, 1, mo)
 
 #define uatomic_dec_mo(addr, mo)                               \
        uatomic_load_store_op(uatomic_add, addr, 1, mo)
 
 #define uatomic_dec_mo(addr, mo)                               \
@@ -157,58 +92,14 @@ extern "C" {
 #define uatomic_cmpxchg_mo(addr, old, new, mos, mof)                   \
        __extension__                                                   \
        ({                                                              \
 #define uatomic_cmpxchg_mo(addr, old, new, mos, mof)                   \
        __extension__                                                   \
        ({                                                              \
-               switch (mos) {                                          \
-               case CMM_ACQUIRE:                                       \
-               case CMM_CONSUME:                                       \
-               case CMM_RELAXED:                                       \
-                       break;                                          \
-               case CMM_RELEASE:                                       \
-               case CMM_ACQ_REL:                                       \
-               case CMM_SEQ_CST:                                       \
-               case CMM_SEQ_CST_FENCE:                                 \
-                       cmm_smp_mb();                                   \
-                       break;                                          \
-               default:                                                \
-                       abort();                                        \
-               }                                                       \
-                                                                       \
+               _cmm_compat_c11_smp_mb__before_mo(uatomic_cmpxchg, mos); \
                __typeof__(*(addr)) _value = uatomic_cmpxchg(addr, old, \
                                                        new);           \
                                                                        \
                if (_value == (old)) {                                  \
                __typeof__(*(addr)) _value = uatomic_cmpxchg(addr, old, \
                                                        new);           \
                                                                        \
                if (_value == (old)) {                                  \
-                       switch (mos) {                                  \
-                       case CMM_CONSUME:                               \
-                               cmm_smp_read_barrier_depends();         \
-                               break;                                  \
-                       case CMM_ACQUIRE:                               \
-                       case CMM_ACQ_REL:                               \
-                       case CMM_SEQ_CST:                               \
-                       case CMM_SEQ_CST_FENCE:                         \
-                               cmm_smp_mb();                           \
-                               break;                                  \
-                       case CMM_RELAXED:                               \
-                       case CMM_RELEASE:                               \
-                               break;                                  \
-                       default:                                        \
-                               abort();                                \
-                       }                                               \
+                       _cmm_compat_c11_smp_mb__after_mo(uatomic_cmpxchg, mos); \
                } else {                                                \
                } else {                                                \
-                       switch (mof) {                                  \
-                       case CMM_CONSUME:                               \
-                               cmm_smp_read_barrier_depends();         \
-                               break;                                  \
-                       case CMM_ACQUIRE:                               \
-                       case CMM_ACQ_REL:                               \
-                       case CMM_SEQ_CST:                               \
-                       case CMM_SEQ_CST_FENCE:                         \
-                               cmm_smp_mb();                           \
-                               break;                                  \
-                       case CMM_RELAXED:                               \
-                       case CMM_RELEASE:                               \
-                               break;                                  \
-                       default:                                        \
-                               abort();                                \
-                       }                                               \
+                       _cmm_compat_c11_smp_mb__after_mo(uatomic_cmpxchg, mof); \
                }                                                       \
                _value;                                                 \
        })
                }                                                       \
                _value;                                                 \
        })
@@ -222,7 +113,6 @@ extern "C" {
 #define uatomic_sub_return_mo(addr, v, mo)                             \
        uatomic_load_store_return_op(uatomic_sub_return, addr, v)
 
 #define uatomic_sub_return_mo(addr, v, mo)                             \
        uatomic_load_store_return_op(uatomic_sub_return, addr, v)
 
-
 #ifndef uatomic_read
 #define uatomic_read(addr)     CMM_LOAD_SHARED(*(addr))
 #endif
 #ifndef uatomic_read
 #define uatomic_read(addr)     CMM_LOAD_SHARED(*(addr))
 #endif
@@ -230,35 +120,9 @@ extern "C" {
 #define uatomic_load(addr, mo)                                         \
        __extension__                                                   \
        ({                                                              \
 #define uatomic_load(addr, mo)                                         \
        __extension__                                                   \
        ({                                                              \
-               switch (mo) {                                           \
-               case CMM_ACQUIRE:                                       \
-               case CMM_CONSUME:                                       \
-               case CMM_RELAXED:                                       \
-                       break;                                          \
-               case CMM_SEQ_CST:                                       \
-               case CMM_SEQ_CST_FENCE:                                 \
-                       cmm_smp_mb();                                   \
-                       break;                                          \
-               default:                                                \
-                       abort();                                        \
-               }                                                       \
-                                                                       \
+               _cmm_compat_c11_smp_mb__before_mo(uatomic_read, mo);    \
                __typeof__(*(addr)) _rcu_value = uatomic_read(addr);    \
                __typeof__(*(addr)) _rcu_value = uatomic_read(addr);    \
-                                                                       \
-               switch (mo) {                                           \
-               case CMM_RELAXED:                                       \
-                       break;                                          \
-               case CMM_CONSUME:                                       \
-                       cmm_smp_read_barrier_depends();                 \
-                       break;                                          \
-               case CMM_ACQUIRE:                                       \
-               case CMM_SEQ_CST:                                       \
-               case CMM_SEQ_CST_FENCE:                                 \
-                       cmm_smp_mb();                                   \
-                       break;                                          \
-               default:                                                \
-                       abort();                                        \
-               }                                                       \
+               _cmm_compat_c11_smp_mb__after_mo(uatomic_read, mo);     \
                                                                        \
                _rcu_value;                                             \
        })
                                                                        \
                _rcu_value;                                             \
        })
index b5725e0f7fd5a5203b9a2775a99731bcbc097cc2..616eee9be3ca60b4be62c8a10cadb0aff2d1ea8d 100644 (file)
@@ -8,6 +8,8 @@
 #ifndef _URCU_ARCH_UATOMIC_X86_H
 #define _URCU_ARCH_UATOMIC_X86_H
 
 #ifndef _URCU_ARCH_UATOMIC_X86_H
 #define _URCU_ARCH_UATOMIC_X86_H
 
+#include <stdlib.h>            /* For abort(3). */
+
 /*
  * Code inspired from libuatomic_ops-1.2, inherited in part from the
  * Boehm-Demers-Weiser conservative garbage collector.
 /*
  * Code inspired from libuatomic_ops-1.2, inherited in part from the
  * Boehm-Demers-Weiser conservative garbage collector.
@@ -630,6 +632,474 @@ extern unsigned long _compat_uatomic_add_return(void *addr,
 #define cmm_smp_mb__before_uatomic_dec()       cmm_barrier()
 #define cmm_smp_mb__after_uatomic_dec()                cmm_barrier()
 
 #define cmm_smp_mb__before_uatomic_dec()       cmm_barrier()
 #define cmm_smp_mb__after_uatomic_dec()                cmm_barrier()
 
+static inline void _cmm_compat_c11_smp_mb__before_uatomic_read_mo(enum cmm_memorder mo)
+{
+       /*
+        * A SMP barrier is not necessary for CMM_SEQ_CST because, only a
+        * previous store can be reordered with the load.  However, emitting the
+        * memory barrier after the store is sufficient to prevent reordering
+        * between the two.  This follows toolchains decision of emitting the
+        * memory fence on the stores instead of the loads.
+        *
+        * A compiler barrier is necessary because the underlying operation does
+        * not clobber the registers.
+        */
+       switch (mo) {
+       case CMM_RELAXED:       /* Fall-through */
+       case CMM_ACQUIRE:       /* Fall-through */
+       case CMM_CONSUME:       /* Fall-through */
+       case CMM_SEQ_CST:       /* Fall-through */
+       case CMM_SEQ_CST_FENCE:
+               cmm_barrier();
+               break;
+       case CMM_ACQ_REL:       /* Fall-through */
+       case CMM_RELEASE:       /* Fall-through */
+       default:
+               abort();
+               break;
+       }
+}
+
+static inline void _cmm_compat_c11_smp_mb__after_uatomic_read_mo(enum cmm_memorder mo)
+{
+       /*
+        * A SMP barrier is not necessary for CMM_SEQ_CST because following
+        * loads and stores cannot be reordered with the load.
+        *
+        * A SMP barrier is however necessary for CMM_SEQ_CST_FENCE to respect
+        * the memory model, since the underlying operation does not have a lock
+        * prefix.
+        *
+        * A compiler barrier is necessary because the underlying operation does
+        * not clobber the registers.
+        */
+       switch (mo) {
+       case CMM_SEQ_CST_FENCE:
+               cmm_smp_mb();
+               break;
+       case CMM_RELAXED:       /* Fall-through */
+       case CMM_ACQUIRE:       /* Fall-through */
+       case CMM_CONSUME:       /* Fall-through */
+       case CMM_SEQ_CST:
+               cmm_barrier();
+               break;
+       case CMM_ACQ_REL:       /* Fall-through */
+       case CMM_RELEASE:       /* Fall-through */
+       default:
+               abort();
+               break;
+       }
+}
+
+static inline void _cmm_compat_c11_smp_mb__before_uatomic_set_mo(enum cmm_memorder mo)
+{
+       /*
+        * A SMP barrier is not necessary for CMM_SEQ_CST because the store can
+        * only be reodered with later loads
+        *
+        * A compiler barrier is necessary because the underlying operation does
+        * not clobber the registers.
+        */
+       switch (mo) {
+       case CMM_RELAXED:       /* Fall-through */
+       case CMM_RELEASE:       /* Fall-through */
+       case CMM_SEQ_CST:       /* Fall-through */
+       case CMM_SEQ_CST_FENCE:
+               cmm_barrier();
+               break;
+       case CMM_ACQ_REL:       /* Fall-through */
+       case CMM_ACQUIRE:       /* Fall-through */
+       case CMM_CONSUME:       /* Fall-through */
+       default:
+               abort();
+               break;
+       }
+}
+
+static inline void _cmm_compat_c11_smp_mb__after_uatomic_set_mo(enum cmm_memorder mo)
+{
+       /*
+        * A SMP barrier is necessary for CMM_SEQ_CST because the store can be
+        * reorded with later loads.  Since no memory barrier is being emitted
+        * before loads, one has to be emitted after the store.  This follows
+        * toolchains decision of emitting the memory fence on the stores instead
+        * of the loads.
+        *
+        * A SMP barrier is necessary for CMM_SEQ_CST_FENCE to respect the
+        * memory model, since the underlying store does not have a lock prefix.
+        *
+        * A compiler barrier is necessary because the underlying operation does
+        * not clobber the registers.
+        */
+       switch (mo) {
+       case CMM_SEQ_CST:       /* Fall-through */
+       case CMM_SEQ_CST_FENCE:
+               cmm_smp_mb();
+               break;
+       case CMM_RELAXED:       /* Fall-through */
+       case CMM_RELEASE:
+               cmm_barrier();
+               break;
+       case CMM_ACQ_REL:       /* Fall-through */
+       case CMM_ACQUIRE:       /* Fall-through */
+       case CMM_CONSUME:       /* Fall-through */
+       default:
+               abort();
+               break;
+       }
+}
+
+static inline void _cmm_compat_c11_smp_mb__before_uatomic_xchg_mo(enum cmm_memorder mo)
+{
+       /* NOP. uatomic_xchg has implicit lock prefix. */
+       switch (mo) {
+       case CMM_RELAXED:       /* Fall-through */
+       case CMM_ACQUIRE:       /* Fall-through */
+       case CMM_CONSUME:       /* Fall-through */
+       case CMM_RELEASE:       /* Fall-through */
+       case CMM_ACQ_REL:       /* Fall-through */
+       case CMM_SEQ_CST:       /* Fall-through */
+       case CMM_SEQ_CST_FENCE:
+               break;
+       default:
+               abort();
+       }
+}
+
+static inline void _cmm_compat_c11_smp_mb__after_uatomic_xchg_mo(enum cmm_memorder mo)
+{
+       /* NOP. uatomic_xchg has implicit lock prefix. */
+       switch (mo) {
+       case CMM_RELAXED:       /* Fall-through */
+       case CMM_ACQUIRE:       /* Fall-through */
+       case CMM_CONSUME:       /* Fall-through */
+       case CMM_RELEASE:       /* Fall-through */
+       case CMM_ACQ_REL:       /* Fall-through */
+       case CMM_SEQ_CST:       /* Fall-through */
+       case CMM_SEQ_CST_FENCE:
+               break;
+       default:
+               abort();
+       }
+}
+
+static inline void _cmm_compat_c11_smp_mb__before_uatomic_cmpxchg_mo(enum cmm_memorder mo)
+{
+       /* NOP. uatomic_cmpxchg has implicit lock prefix. */
+       switch (mo) {
+       case CMM_RELAXED:       /* Fall-through */
+       case CMM_ACQUIRE:       /* Fall-through */
+       case CMM_CONSUME:       /* Fall-through */
+       case CMM_RELEASE:       /* Fall-through */
+       case CMM_ACQ_REL:       /* Fall-through */
+       case CMM_SEQ_CST:       /* Fall-through */
+       case CMM_SEQ_CST_FENCE:
+               break;
+       default:
+               abort();
+       }
+}
+
+static inline void _cmm_compat_c11_smp_mb__after_uatomic_cmpxchg_mo(enum cmm_memorder mo)
+{
+       /* NOP. uatomic_cmpxchg has implicit lock prefix. */
+       switch (mo) {
+       case CMM_RELAXED:       /* Fall-through */
+       case CMM_ACQUIRE:       /* Fall-through */
+       case CMM_CONSUME:       /* Fall-through */
+       case CMM_RELEASE:       /* Fall-through */
+       case CMM_ACQ_REL:       /* Fall-through */
+       case CMM_SEQ_CST:       /* Fall-through */
+       case CMM_SEQ_CST_FENCE:
+               break;
+       default:
+               abort();
+       }
+}
+
+static inline void _cmm_compat_c11_smp_mb__before_uatomic_and_mo(enum cmm_memorder mo)
+{
+       /* NOP. uatomic_and has explicit lock prefix. */
+       switch (mo) {
+       case CMM_RELAXED:       /* Fall-through */
+       case CMM_ACQUIRE:       /* Fall-through */
+       case CMM_CONSUME:       /* Fall-through */
+       case CMM_RELEASE:       /* Fall-through */
+       case CMM_ACQ_REL:       /* Fall-through */
+       case CMM_SEQ_CST:       /* Fall-through */
+       case CMM_SEQ_CST_FENCE:
+               break;
+       default:
+               abort();
+       }
+}
+
+static inline void _cmm_compat_c11_smp_mb__after_uatomic_and_mo(enum cmm_memorder mo)
+{
+       /* NOP. uatomic_and has explicit lock prefix. */
+       switch (mo) {
+       case CMM_RELAXED:       /* Fall-through */
+       case CMM_ACQUIRE:       /* Fall-through */
+       case CMM_CONSUME:       /* Fall-through */
+       case CMM_RELEASE:       /* Fall-through */
+       case CMM_ACQ_REL:       /* Fall-through */
+       case CMM_SEQ_CST:       /* Fall-through */
+       case CMM_SEQ_CST_FENCE:
+               break;
+       default:
+               abort();
+       }
+}
+
+static inline void _cmm_compat_c11_smp_mb__before_uatomic_or_mo(enum cmm_memorder mo)
+{
+       /* NOP. uatomic_or has explicit lock prefix. */
+       switch (mo) {
+       case CMM_RELAXED:       /* Fall-through */
+       case CMM_ACQUIRE:       /* Fall-through */
+       case CMM_CONSUME:       /* Fall-through */
+       case CMM_RELEASE:       /* Fall-through */
+       case CMM_ACQ_REL:       /* Fall-through */
+       case CMM_SEQ_CST:       /* Fall-through */
+       case CMM_SEQ_CST_FENCE:
+               break;
+       default:
+               abort();
+       }
+}
+
+static inline void _cmm_compat_c11_smp_mb__after_uatomic_or_mo(enum cmm_memorder mo)
+{
+       /* NOP. uatomic_or has explicit lock prefix. */
+       switch (mo) {
+       case CMM_RELAXED:       /* Fall-through */
+       case CMM_ACQUIRE:       /* Fall-through */
+       case CMM_CONSUME:       /* Fall-through */
+       case CMM_RELEASE:       /* Fall-through */
+       case CMM_ACQ_REL:       /* Fall-through */
+       case CMM_SEQ_CST:       /* Fall-through */
+       case CMM_SEQ_CST_FENCE:
+               break;
+       default:
+               abort();
+       }
+}
+
+static inline void _cmm_compat_c11_smp_mb__before_uatomic_add_mo(enum cmm_memorder mo)
+{
+       /* NOP. uatomic_add has explicit lock prefix. */
+       switch (mo) {
+       case CMM_RELAXED:       /* Fall-through */
+       case CMM_ACQUIRE:       /* Fall-through */
+       case CMM_CONSUME:       /* Fall-through */
+       case CMM_RELEASE:       /* Fall-through */
+       case CMM_ACQ_REL:       /* Fall-through */
+       case CMM_SEQ_CST:       /* Fall-through */
+       case CMM_SEQ_CST_FENCE:
+               break;
+       default:
+               abort();
+       }
+}
+
+static inline void _cmm_compat_c11_smp_mb__after_uatomic_add_mo(enum cmm_memorder mo)
+{
+       /* NOP. uatomic_add has explicit lock prefix. */
+       switch (mo) {
+       case CMM_RELAXED:       /* Fall-through */
+       case CMM_ACQUIRE:       /* Fall-through */
+       case CMM_CONSUME:       /* Fall-through */
+       case CMM_RELEASE:       /* Fall-through */
+       case CMM_ACQ_REL:       /* Fall-through */
+       case CMM_SEQ_CST:       /* Fall-through */
+       case CMM_SEQ_CST_FENCE:
+               break;
+       default:
+               abort();
+       }
+}
+
+static inline void _cmm_compat_c11_smp_mb__before_uatomic_sub_mo(enum cmm_memorder mo)
+{
+       /* NOP. uatomic_sub has explicit lock prefix. */
+       switch (mo) {
+       case CMM_RELAXED:       /* Fall-through */
+       case CMM_ACQUIRE:       /* Fall-through */
+       case CMM_CONSUME:       /* Fall-through */
+       case CMM_RELEASE:       /* Fall-through */
+       case CMM_ACQ_REL:       /* Fall-through */
+       case CMM_SEQ_CST:       /* Fall-through */
+       case CMM_SEQ_CST_FENCE:
+               break;
+       default:
+               abort();
+       }
+}
+
+static inline void _cmm_compat_c11_smp_mb__after_uatomic_sub_mo(enum cmm_memorder mo)
+{
+       /* NOP. uatomic_sub has explicit lock prefix. */
+       switch (mo) {
+       case CMM_RELAXED:       /* Fall-through */
+       case CMM_ACQUIRE:       /* Fall-through */
+       case CMM_CONSUME:       /* Fall-through */
+       case CMM_RELEASE:       /* Fall-through */
+       case CMM_ACQ_REL:       /* Fall-through */
+       case CMM_SEQ_CST:       /* Fall-through */
+       case CMM_SEQ_CST_FENCE:
+               break;
+       default:
+               abort();
+       }
+}
+
+static inline void _cmm_compat_c11_smp_mb__before_uatomic_inc_mo(enum cmm_memorder mo)
+{
+       /* NOP. uatomic_inc has explicit lock prefix. */
+       switch (mo) {
+       case CMM_RELAXED:       /* Fall-through */
+       case CMM_ACQUIRE:       /* Fall-through */
+       case CMM_CONSUME:       /* Fall-through */
+       case CMM_RELEASE:       /* Fall-through */
+       case CMM_ACQ_REL:       /* Fall-through */
+       case CMM_SEQ_CST:       /* Fall-through */
+       case CMM_SEQ_CST_FENCE:
+               break;
+       default:
+               abort();
+       }
+}
+
+static inline void _cmm_compat_c11_smp_mb__after_uatomic_inc_mo(enum cmm_memorder mo)
+{
+       /* NOP. uatomic_inc has explicit lock prefix. */
+       switch (mo) {
+       case CMM_RELAXED:       /* Fall-through */
+       case CMM_ACQUIRE:       /* Fall-through */
+       case CMM_CONSUME:       /* Fall-through */
+       case CMM_RELEASE:       /* Fall-through */
+       case CMM_ACQ_REL:       /* Fall-through */
+       case CMM_SEQ_CST:       /* Fall-through */
+       case CMM_SEQ_CST_FENCE:
+               break;
+       default:
+               abort();
+       }
+}
+
+static inline void _cmm_compat_c11_smp_mb__before_uatomic_dec_mo(enum cmm_memorder mo)
+{
+       /* NOP. uatomic_dec has explicit lock prefix. */
+       switch (mo) {
+       case CMM_RELAXED:       /* Fall-through */
+       case CMM_ACQUIRE:       /* Fall-through */
+       case CMM_CONSUME:       /* Fall-through */
+       case CMM_RELEASE:       /* Fall-through */
+       case CMM_ACQ_REL:       /* Fall-through */
+       case CMM_SEQ_CST:       /* Fall-through */
+       case CMM_SEQ_CST_FENCE:
+               break;
+       default:
+               abort();
+       }
+}
+
+static inline void _cmm_compat_c11_smp_mb__after_uatomic_dec_mo(enum cmm_memorder mo)
+{
+       /* NOP. uatomic_dec has explicit lock prefix. */
+       switch (mo) {
+       case CMM_RELAXED:       /* Fall-through */
+       case CMM_ACQUIRE:       /* Fall-through */
+       case CMM_CONSUME:       /* Fall-through */
+       case CMM_RELEASE:       /* Fall-through */
+       case CMM_ACQ_REL:       /* Fall-through */
+       case CMM_SEQ_CST:       /* Fall-through */
+       case CMM_SEQ_CST_FENCE:
+               break;
+       default:
+               abort();
+       }
+}
+
+static inline void _cmm_compat_c11_smp_mb__before_uatomic_add_return_mo(enum cmm_memorder mo)
+{
+       /* NOP. uatomic_add_return has explicit lock prefix. */
+       switch (mo) {
+       case CMM_RELAXED:       /* Fall-through */
+       case CMM_ACQUIRE:       /* Fall-through */
+       case CMM_CONSUME:       /* Fall-through */
+       case CMM_RELEASE:       /* Fall-through */
+       case CMM_ACQ_REL:       /* Fall-through */
+       case CMM_SEQ_CST:       /* Fall-through */
+       case CMM_SEQ_CST_FENCE:
+               break;
+       default:
+               abort();
+       }
+}
+
+static inline void _cmm_compat_c11_smp_mb__after_uatomic_add_return_mo(enum cmm_memorder mo)
+{
+       /* NOP. uatomic_add_return has explicit lock prefix. */
+       switch (mo) {
+       case CMM_RELAXED:       /* Fall-through */
+       case CMM_ACQUIRE:       /* Fall-through */
+       case CMM_CONSUME:       /* Fall-through */
+       case CMM_RELEASE:       /* Fall-through */
+       case CMM_ACQ_REL:       /* Fall-through */
+       case CMM_SEQ_CST:       /* Fall-through */
+       case CMM_SEQ_CST_FENCE:
+               break;
+       default:
+               abort();
+       }
+}
+
+static inline void _cmm_compat_c11_smp_mb__before_uatomic_sub_return_mo(enum cmm_memorder mo)
+{
+       /* NOP. uatomic_sub_return has explicit lock prefix. */
+       switch (mo) {
+       case CMM_RELAXED:       /* Fall-through */
+       case CMM_ACQUIRE:       /* Fall-through */
+       case CMM_CONSUME:       /* Fall-through */
+       case CMM_RELEASE:       /* Fall-through */
+       case CMM_ACQ_REL:       /* Fall-through */
+       case CMM_SEQ_CST:       /* Fall-through */
+       case CMM_SEQ_CST_FENCE:
+               break;
+       default:
+               abort();
+       }
+}
+
+static inline void _cmm_compat_c11_smp_mb__after_uatomic_sub_return_mo(enum cmm_memorder mo)
+{
+       /* NOP. uatomic_sub_return has explicit lock prefix. */
+       switch (mo) {
+       case CMM_RELAXED:       /* Fall-through */
+       case CMM_ACQUIRE:       /* Fall-through */
+       case CMM_CONSUME:       /* Fall-through */
+       case CMM_RELEASE:       /* Fall-through */
+       case CMM_ACQ_REL:       /* Fall-through */
+       case CMM_SEQ_CST:       /* Fall-through */
+       case CMM_SEQ_CST_FENCE:
+               break;
+       default:
+               abort();
+       }
+}
+
+#define _cmm_compat_c11_smp_mb__before_mo(operation, mo)                       \
+       do {                                                    \
+               _cmm_compat_c11_smp_mb__before_ ## operation ## _mo (mo);       \
+       } while (0)
+
+#define _cmm_compat_c11_smp_mb__after_mo(operation, mo)                        \
+       do {                                                    \
+               _cmm_compat_c11_smp_mb__after_ ## operation ## _mo (mo);        \
+       } while (0)
+
+
 #ifdef __cplusplus
 }
 #endif
 #ifdef __cplusplus
 }
 #endif
index bba5c558d96cef1015c2a3197c5fe43797356e9d..5a4add454f59df3e04f2f098e82d9cdc06955853 100644 (file)
@@ -78,6 +78,11 @@ typedef union {
        struct __cds_wfcq_head *_h;
        struct cds_wfcq_head *h;
 } __attribute__((__transparent_union__)) cds_wfcq_head_ptr_t;
        struct __cds_wfcq_head *_h;
        struct cds_wfcq_head *h;
 } __attribute__((__transparent_union__)) cds_wfcq_head_ptr_t;
+
+typedef union {
+       const struct __cds_wfcq_head *_h;
+       const struct cds_wfcq_head *h;
+} __attribute__((__transparent_union__)) cds_wfcq_head_const_ptr_t;
 #if defined(__clang__)
 #pragma clang diagnostic pop
 #endif
 #if defined(__clang__)
 #pragma clang diagnostic pop
 #endif
@@ -100,6 +105,25 @@ static inline struct cds_wfcq_head *cds_wfcq_head_cast(struct cds_wfcq_head *hea
 {
        return head;
 }
 {
        return head;
 }
+
+/*
+ * This static inline is only present for compatibility with C++. It is
+ * effect-less in C.
+ */
+static inline const struct __cds_wfcq_head *__cds_wfcq_head_const_cast(const struct __cds_wfcq_head *head)
+{
+       return head;
+}
+
+/*
+ * This static inline is only present for compatibility with C++. It is
+ * effect-less in C.
+ */
+static inline const struct cds_wfcq_head *cds_wfcq_head_const_cast(const struct cds_wfcq_head *head)
+{
+       return head;
+}
+
 #else /* #ifndef __cplusplus */
 
 /*
 #else /* #ifndef __cplusplus */
 
 /*
@@ -121,6 +145,27 @@ static inline cds_wfcq_head_ptr_t cds_wfcq_head_cast(struct cds_wfcq_head *head)
        cds_wfcq_head_ptr_t ret = { .h = head };
        return ret;
 }
        cds_wfcq_head_ptr_t ret = { .h = head };
        return ret;
 }
+
+/*
+ * This static inline is used by internally in the static inline
+ * implementation of the API.
+ */
+static inline cds_wfcq_head_const_ptr_t __cds_wfcq_head_const_cast(const struct __cds_wfcq_head *head)
+{
+       cds_wfcq_head_const_ptr_t ret = { ._h = head };
+       return ret;
+}
+
+/*
+ * This static inline is used by internally in the static inline
+ * implementation of the API.
+ */
+static inline cds_wfcq_head_const_ptr_t cds_wfcq_head_const_cast(const struct cds_wfcq_head *head)
+{
+       cds_wfcq_head_const_ptr_t ret = { .h = head };
+       return ret;
+}
+
 #endif /* #else #ifndef __cplusplus */
 
 struct cds_wfcq_tail {
 #endif /* #else #ifndef __cplusplus */
 
 struct cds_wfcq_tail {
@@ -238,8 +283,8 @@ extern void __cds_wfcq_init(struct __cds_wfcq_head *head,
  *
  * No memory barrier is issued. No mutual exclusion is required.
  */
  *
  * No memory barrier is issued. No mutual exclusion is required.
  */
-extern bool cds_wfcq_empty(cds_wfcq_head_ptr_t head,
-               struct cds_wfcq_tail *tail);
+extern bool cds_wfcq_empty(cds_wfcq_head_const_ptr_t head,
+               const struct cds_wfcq_tail *tail);
 
 /*
  * cds_wfcq_dequeue_lock: take the dequeue mutual exclusion lock.
 
 /*
  * cds_wfcq_dequeue_lock: take the dequeue mutual exclusion lock.
@@ -500,10 +545,22 @@ static inline cds_wfcq_head_ptr_t cds_wfcq_head_cast_cpp(struct cds_wfcq_head *h
        return ret;
 }
 
        return ret;
 }
 
+static inline cds_wfcq_head_const_ptr_t cds_wfcq_head_const_cast_cpp(const struct __cds_wfcq_head *head)
+{
+       cds_wfcq_head_const_ptr_t ret = { ._h = head };
+       return ret;
+}
+
+static inline cds_wfcq_head_const_ptr_t cds_wfcq_head_const_cast_cpp(const struct cds_wfcq_head *head)
+{
+       cds_wfcq_head_const_ptr_t ret = { .h = head };
+       return ret;
+}
+
 template<typename T> static inline bool cds_wfcq_empty(T head,
 template<typename T> static inline bool cds_wfcq_empty(T head,
-               struct cds_wfcq_tail *tail)
+               const struct cds_wfcq_tail *tail)
 {
 {
-       return cds_wfcq_empty(cds_wfcq_head_cast_cpp(head), tail);
+       return cds_wfcq_empty(cds_wfcq_head_const_cast_cpp(head), tail);
 }
 
 template<typename T> static inline bool cds_wfcq_enqueue(T head,
 }
 
 template<typename T> static inline bool cds_wfcq_enqueue(T head,
index 38e5b6b2dcb3099dec87f8a6b3fb4f5750e93fb1..66d415027ab33577b19cee609889558cada6b9fa 100644 (file)
@@ -96,6 +96,11 @@ typedef union {
        struct __cds_wfs_stack *_s;
        struct cds_wfs_stack *s;
 } __attribute__((__transparent_union__)) cds_wfs_stack_ptr_t;
        struct __cds_wfs_stack *_s;
        struct cds_wfs_stack *s;
 } __attribute__((__transparent_union__)) cds_wfs_stack_ptr_t;
+
+typedef union {
+       const struct __cds_wfs_stack *_s;
+       const struct cds_wfs_stack *s;
+} __attribute__((__transparent_union__)) cds_wfs_stack_const_ptr_t;
 #if defined(__clang__)
 #pragma clang diagnostic pop
 #endif
 #if defined(__clang__)
 #pragma clang diagnostic pop
 #endif
@@ -167,7 +172,7 @@ extern void __cds_wfs_init(struct __cds_wfs_stack *s);
  *
  * No memory barrier is issued. No mutual exclusion is required.
  */
  *
  * No memory barrier is issued. No mutual exclusion is required.
  */
-extern bool cds_wfs_empty(cds_wfs_stack_ptr_t u_stack);
+extern bool cds_wfs_empty(cds_wfs_stack_const_ptr_t u_stack);
 
 /*
  * cds_wfs_push: push a node into the stack.
 
 /*
  * cds_wfs_push: push a node into the stack.
@@ -372,9 +377,25 @@ static inline cds_wfs_stack_ptr_t cds_wfs_stack_cast(struct cds_wfs_stack *s)
        return ret;
 }
 
        return ret;
 }
 
+static inline cds_wfs_stack_const_ptr_t cds_wfs_stack_const_cast(const struct __cds_wfs_stack *s)
+{
+       cds_wfs_stack_const_ptr_t ret = {
+               ._s = s,
+       };
+       return ret;
+}
+
+static inline cds_wfs_stack_const_ptr_t cds_wfs_stack_const_cast(const struct cds_wfs_stack *s)
+{
+       cds_wfs_stack_const_ptr_t ret = {
+               .s = s,
+       };
+       return ret;
+}
+
 template<typename T> static inline bool cds_wfs_empty(T s)
 {
 template<typename T> static inline bool cds_wfs_empty(T s)
 {
-       return cds_wfs_empty(cds_wfs_stack_cast(s));
+       return cds_wfs_empty(cds_wfs_stack_const_cast(s));
 }
 
 template<typename T> static inline int cds_wfs_push(T s, struct cds_wfs_node *node)
 }
 
 template<typename T> static inline int cds_wfs_push(T s, struct cds_wfs_node *node)
index 31fa979a7040736999823a49ca1f01b7a4d92c48..5da8d6a03d7e7fd9b4b65e9617533c9b039bb436 100644 (file)
@@ -1,5 +1,5 @@
 /*
 /*
- * SPDX-License-Identifier: LGPL-2.1-only
+ * SPDX-License-Identifier: MIT
  *
  * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
  * Copyright (C) 2019 Michael Jeanson <mjeanson@efficios.com>
  *
  * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
  * Copyright (C) 2019 Michael Jeanson <mjeanson@efficios.com>
@@ -164,7 +164,7 @@ static inline int get_cpu_mask_from_sysfs(char *buf, size_t max_bytes, const cha
 
                total_bytes_read += bytes_read;
                assert(total_bytes_read <= max_bytes);
 
                total_bytes_read += bytes_read;
                assert(total_bytes_read <= max_bytes);
-       } while (max_bytes > total_bytes_read && bytes_read > 0);
+       } while (max_bytes > total_bytes_read && bytes_read != 0);
 
        /*
         * Make sure the mask read is a null terminated string.
 
        /*
         * Make sure the mask read is a null terminated string.
index 5a2c80f71f347a7994e40310098f13430715dc5c..ca3de8595d7bfd8ee9121e621f8618449ec88ef5 100644 (file)
@@ -36,7 +36,7 @@ void __cds_lfs_init(struct __cds_lfs_stack *s)
        ___cds_lfs_init(s);
 }
 
        ___cds_lfs_init(s);
 }
 
-bool cds_lfs_empty(cds_lfs_stack_ptr_t s)
+bool cds_lfs_empty(cds_lfs_stack_const_ptr_t s)
 {
        return _cds_lfs_empty(s);
 }
 {
        return _cds_lfs_empty(s);
 }
index ff05510bd29d0f87aba4632bfcf92b8c34c43cc9..294b2662ba023115cc90c7fe921e93b6a6994be4 100644 (file)
@@ -38,8 +38,8 @@ void __cds_wfcq_init(struct __cds_wfcq_head *head,
        ___cds_wfcq_init(head, tail);
 }
 
        ___cds_wfcq_init(head, tail);
 }
 
-bool cds_wfcq_empty(cds_wfcq_head_ptr_t head,
-               struct cds_wfcq_tail *tail)
+bool cds_wfcq_empty(cds_wfcq_head_const_ptr_t head,
+               const struct cds_wfcq_tail *tail)
 
 {
        return _cds_wfcq_empty(head, tail);
 
 {
        return _cds_wfcq_empty(head, tail);
index 8fddaecf32a9c1d2e1f3326947f0f7e5c71ed2ef..6308a94b9876fa0ac0c99ed41a288471dbfa7fc9 100644 (file)
@@ -34,7 +34,7 @@ void __cds_wfs_init(struct __cds_wfs_stack *s)
        ___cds_wfs_init(s);
 }
 
        ___cds_wfs_init(s);
 }
 
-bool cds_wfs_empty(cds_wfs_stack_ptr_t u_stack)
+bool cds_wfs_empty(cds_wfs_stack_const_ptr_t u_stack)
 {
        return _cds_wfs_empty(u_stack);
 }
 {
        return _cds_wfs_empty(u_stack);
 }
This page took 0.043756 seconds and 4 git commands to generate.