-#!/bin/bash
+#!/usr/bin/env bash
# SPDX-FileCopyrightText: 2021 Michael Jeanson <mjeanson@efficios.com>
#
#if (defined(__linux__) && defined(__NR_futex))
/* For backwards compat */
-#define CONFIG_RCU_HAVE_FUTEX 1
+# define CONFIG_RCU_HAVE_FUTEX 1
-#include <unistd.h>
-#include <errno.h>
-#include <urcu/compiler.h>
-#include <urcu/arch.h>
+# include <unistd.h>
+# include <errno.h>
+# include <urcu/compiler.h>
+# include <urcu/arch.h>
+# include <urcu/assert.h>
#elif defined(__FreeBSD__)
-#include <sys/types.h>
-#include <sys/umtx.h>
+# include <sys/types.h>
+# include <sys/umtx.h>
+
+#elif defined(__OpenBSD__)
+
+# include <sys/time.h>
+# include <sys/futex.h>
#endif
extern "C" {
#endif
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
+#ifndef __OpenBSD__
+# define FUTEX_WAIT 0
+# define FUTEX_WAKE 1
+#endif
/*
* sys_futex compatibility header.
static inline int futex(int32_t *uaddr, int op, int32_t val,
const struct timespec *timeout, int32_t *uaddr2, int32_t val3)
{
- return syscall(__NR_futex, uaddr, op, val, timeout,
- uaddr2, val3);
+ return syscall(__NR_futex, uaddr, op, val, timeout, uaddr2, val3);
}
static inline int futex_noasync(int32_t *uaddr, int op, int32_t val,
#elif defined(__FreeBSD__)
static inline int futex_async(int32_t *uaddr, int op, int32_t val,
- const struct timespec *timeout,
- int32_t *uaddr2 __attribute__((unused)),
- int32_t val3 __attribute__((unused)))
+ const struct timespec *timeout, int32_t *uaddr2, int32_t val3)
{
int umtx_op;
void *umtx_uaddr = NULL, *umtx_uaddr2 = NULL;
._clockid = CLOCK_MONOTONIC,
};
+ /*
+ * Check if NULL or zero. Don't let users expect that they are
+ * taken into account.
+ */
+ urcu_posix_assert(!uaddr2);
+ urcu_posix_assert(!val3);
+
switch (op) {
case FUTEX_WAIT:
/* On FreeBSD, a "u_int" is a 32-bit integer. */
return futex_async(uaddr, op, val, timeout, uaddr2, val3);
}
+#elif defined(__OpenBSD__)
+
+static inline int futex_noasync(int32_t *uaddr, int op, int32_t val,
+ const struct timespec *timeout, int32_t *uaddr2, int32_t val3)
+{
+ int ret;
+
+ /*
+ * Check that val3 is zero. Don't let users expect that it is
+ * taken into account.
+ */
+ urcu_posix_assert(!val3);
+
+ ret = futex((volatile uint32_t *) uaddr, op, val, timeout,
+ (volatile uint32_t *) uaddr2);
+ if (caa_unlikely(ret < 0 && errno == ENOSYS)) {
+ return compat_futex_noasync(uaddr, op, val, timeout,
+ uaddr2, val3);
+ }
+ return ret;
+}
+
+static inline int futex_async(int32_t *uaddr, int op, int32_t val,
+ const struct timespec *timeout, int32_t *uaddr2, int32_t val3)
+{
+ int ret;
+
+ /*
+ * Check that val3 is zero. Don't let users expect that it is
+ * taken into account.
+ */
+ urcu_posix_assert(!val3);
+
+ ret = futex((volatile uint32_t *) uaddr, op, val, timeout,
+ (volatile uint32_t *) uaddr2);
+ if (caa_unlikely(ret < 0 && errno == ENOSYS)) {
+ return compat_futex_async(uaddr, op, val, timeout,
+ uaddr2, val3);
+ }
+ return ret;
+}
+
#elif defined(__CYGWIN__)
/*
struct __cds_lfs_stack *_s;
struct cds_lfs_stack *s;
} __attribute__((__transparent_union__)) cds_lfs_stack_ptr_t;
+
+typedef union {
+ const struct __cds_lfs_stack *_s;
+ const struct cds_lfs_stack *s;
+} __attribute__((__transparent_union__)) cds_lfs_stack_const_ptr_t;
#if defined(__clang__)
#pragma clang diagnostic pop
#endif
*
* No memory barrier is issued. No mutual exclusion is required.
*/
-extern bool cds_lfs_empty(cds_lfs_stack_ptr_t s);
+extern bool cds_lfs_empty(cds_lfs_stack_const_ptr_t s);
/*
* cds_lfs_push: push a node into the stack.
return ret;
}
+static inline cds_lfs_stack_const_ptr_t cds_lfs_stack_const_cast(const struct __cds_lfs_stack *s)
+{
+ cds_lfs_stack_const_ptr_t ret = {
+ ._s = s,
+ };
+ return ret;
+}
+
+static inline cds_lfs_stack_const_ptr_t cds_lfs_stack_const_cast(const struct cds_lfs_stack *s)
+{
+ cds_lfs_stack_const_ptr_t ret = {
+ .s = s,
+ };
+ return ret;
+}
+
template<typename T> static inline bool cds_lfs_empty(T s)
{
- return cds_lfs_empty(cds_lfs_stack_cast(s));
+ return cds_lfs_empty(cds_lfs_stack_const_cast(s));
}
template<typename T> static inline bool cds_lfs_push(T s,
/* Replace an old entry. */
static inline
-void cds_list_replace(struct cds_list_head *old, struct cds_list_head *_new)
+void cds_list_replace(const struct cds_list_head *old, struct cds_list_head *_new)
{
_new->next = old->next;
_new->prev = old->prev;
pos = (p), p = cds_list_entry((pos)->member.next, __typeof__(*(pos)), member))
static inline
-int cds_list_empty(struct cds_list_head *head)
+int cds_list_empty(const struct cds_list_head *head)
{
return head == head->next;
}
};
static inline
-struct cds_lfht_node *cds_lfht_iter_get_node(struct cds_lfht_iter *iter)
+struct cds_lfht_node *cds_lfht_iter_get_node(const struct cds_lfht_iter *iter)
{
return iter->node;
}
}
static inline
-bool ___cds_lfs_empty_head(struct cds_lfs_head *head)
+bool ___cds_lfs_empty_head(const struct cds_lfs_head *head)
{
return head == NULL;
}
* No memory barrier is issued. No mutual exclusion is required.
*/
static inline
-bool _cds_lfs_empty(cds_lfs_stack_ptr_t s)
+bool _cds_lfs_empty(cds_lfs_stack_const_ptr_t s)
{
return ___cds_lfs_empty_head(uatomic_load(&s._s->head, CMM_RELAXED));
}
* make a queue appear empty if an enqueuer is preempted for a long time
* between xchg() and setting the previous node's next pointer.
*/
-static inline bool _cds_wfcq_empty(cds_wfcq_head_ptr_t u_head,
- struct cds_wfcq_tail *tail)
+static inline bool _cds_wfcq_empty(cds_wfcq_head_const_ptr_t u_head,
+ const struct cds_wfcq_tail *tail)
{
- struct __cds_wfcq_head *head = u_head._h;
+ const struct __cds_wfcq_head *head = u_head._h;
/*
* Queue is empty if no node is pointed by head->node.next nor
* tail->p. Even though the tail->p check is sufficient to find
struct __cds_wfcq_head *head = u_head._h;
struct cds_wfcq_node *node;
- if (_cds_wfcq_empty(__cds_wfcq_head_cast(head), tail))
+ if (_cds_wfcq_empty(__cds_wfcq_head_const_cast(head), tail))
return NULL;
node = ___cds_wfcq_node_sync_next(&head->node, blocking);
if (state)
*state = 0;
- if (_cds_wfcq_empty(__cds_wfcq_head_cast(head), tail)) {
+ if (_cds_wfcq_empty(__cds_wfcq_head_const_cast(head), tail)) {
return NULL;
}
* Initial emptiness check to speed up cases where queue is
* empty: only require loads to check if queue is empty.
*/
- if (_cds_wfcq_empty(__cds_wfcq_head_cast(src_q_head), src_q_tail))
+ if (_cds_wfcq_empty(__cds_wfcq_head_const_cast(src_q_head), src_q_tail))
return CDS_WFCQ_RET_SRC_EMPTY;
for (;;) {
*
* No memory barrier is issued. No mutual exclusion is required.
*/
-static inline bool _cds_wfs_empty(cds_wfs_stack_ptr_t u_stack)
+static inline bool _cds_wfs_empty(cds_wfs_stack_const_ptr_t u_stack)
{
- struct __cds_wfs_stack *s = u_stack._s;
+ const struct __cds_wfs_stack *s = u_stack._s;
return ___cds_wfs_end(uatomic_load(&s->head, CMM_RELAXED));
}
*/
#include <stdint.h>
-#include <stdlib.h>
#include <urcu/compiler.h>
#include <urcu/system.h>
#define uatomic_set(addr, v) ((void) CMM_STORE_SHARED(*(addr), (v)))
#endif
-#define uatomic_load_store_return_op(op, addr, v, mo) \
- __extension__ \
- ({ \
- \
- switch (mo) { \
- case CMM_ACQUIRE: \
- case CMM_CONSUME: \
- case CMM_RELAXED: \
- break; \
- case CMM_RELEASE: \
- case CMM_ACQ_REL: \
- case CMM_SEQ_CST: \
- case CMM_SEQ_CST_FENCE: \
- cmm_smp_mb(); \
- break; \
- default: \
- abort(); \
- } \
- \
- __typeof__((*addr)) _value = op(addr, v); \
- \
- switch (mo) { \
- case CMM_CONSUME: \
- cmm_smp_read_barrier_depends(); \
- break; \
- case CMM_ACQUIRE: \
- case CMM_ACQ_REL: \
- case CMM_SEQ_CST: \
- case CMM_SEQ_CST_FENCE: \
- cmm_smp_mb(); \
- break; \
- case CMM_RELAXED: \
- case CMM_RELEASE: \
- break; \
- default: \
- abort(); \
- } \
- _value; \
+/*
+ * Can be defined for the architecture.
+ *
+ * What needs to be emitted _before_ the `operation' with memory ordering `mo'.
+ */
+#ifndef _cmm_compat_c11_smp_mb__before_mo
+# define _cmm_compat_c11_smp_mb__before_mo(operation, mo) cmm_smp_mb()
+#endif
+
+/*
+ * Can be defined for the architecture.
+ *
+ * What needs to be emitted _after_ the `operation' with memory ordering `mo'.
+ */
+#ifndef _cmm_compat_c11_smp_mb__after_mo
+# define _cmm_compat_c11_smp_mb__after_mo(operation, mo) cmm_smp_mb()
+#endif
+
+#define uatomic_load_store_return_op(op, addr, v, mo) \
+ __extension__ \
+ ({ \
+ _cmm_compat_c11_smp_mb__before_mo(op, mo); \
+ __typeof__((*addr)) _value = op(addr, v); \
+ _cmm_compat_c11_smp_mb__after_mo(op, mo); \
+ \
+ _value; \
})
-#define uatomic_load_store_op(op, addr, v, mo) \
- do { \
- switch (mo) { \
- case CMM_ACQUIRE: \
- case CMM_CONSUME: \
- case CMM_RELAXED: \
- break; \
- case CMM_RELEASE: \
- case CMM_ACQ_REL: \
- case CMM_SEQ_CST: \
- case CMM_SEQ_CST_FENCE: \
- cmm_smp_mb(); \
- break; \
- default: \
- abort(); \
- } \
- \
- op(addr, v); \
- \
- switch (mo) { \
- case CMM_CONSUME: \
- cmm_smp_read_barrier_depends(); \
- break; \
- case CMM_ACQUIRE: \
- case CMM_ACQ_REL: \
- case CMM_SEQ_CST: \
- case CMM_SEQ_CST_FENCE: \
- cmm_smp_mb(); \
- break; \
- case CMM_RELAXED: \
- case CMM_RELEASE: \
- break; \
- default: \
- abort(); \
- } \
+#define uatomic_load_store_op(op, addr, v, mo) \
+ do { \
+ _cmm_compat_c11_smp_mb__before_mo(op, mo); \
+ op(addr, v); \
+ _cmm_compat_c11_smp_mb__after_mo(op, mo); \
} while (0)
-#define uatomic_store(addr, v, mo) \
- do { \
- switch (mo) { \
- case CMM_RELAXED: \
- break; \
- case CMM_RELEASE: \
- case CMM_SEQ_CST: \
- case CMM_SEQ_CST_FENCE: \
- cmm_smp_mb(); \
- break; \
- default: \
- abort(); \
- } \
- \
- uatomic_set(addr, v); \
- \
- switch (mo) { \
- case CMM_RELAXED: \
- case CMM_RELEASE: \
- break; \
- case CMM_SEQ_CST: \
- case CMM_SEQ_CST_FENCE: \
- cmm_smp_mb(); \
- break; \
- default: \
- abort(); \
- } \
+#define uatomic_store(addr, v, mo) \
+ do { \
+ _cmm_compat_c11_smp_mb__before_mo(uatomic_set, mo); \
+ uatomic_set(addr, v); \
+ _cmm_compat_c11_smp_mb__after_mo(uatomic_set, mo); \
} while (0)
-#define uatomic_and_mo(addr, v, mo) \
+#define uatomic_and_mo(addr, v, mo) \
uatomic_load_store_op(uatomic_and, addr, v, mo)
-#define uatomic_or_mo(addr, v, mo) \
+#define uatomic_or_mo(addr, v, mo) \
uatomic_load_store_op(uatomic_or, addr, v, mo)
-#define uatomic_add_mo(addr, v, mo) \
+#define uatomic_add_mo(addr, v, mo) \
uatomic_load_store_op(uatomic_add, addr, v, mo)
-#define uatomic_sub_mo(addr, v, mo) \
+#define uatomic_sub_mo(addr, v, mo) \
uatomic_load_store_op(uatomic_sub, addr, v, mo)
-#define uatomic_inc_mo(addr, mo) \
+#define uatomic_inc_mo(addr, mo) \
uatomic_load_store_op(uatomic_add, addr, 1, mo)
#define uatomic_dec_mo(addr, mo) \
#define uatomic_cmpxchg_mo(addr, old, new, mos, mof) \
__extension__ \
({ \
- switch (mos) { \
- case CMM_ACQUIRE: \
- case CMM_CONSUME: \
- case CMM_RELAXED: \
- break; \
- case CMM_RELEASE: \
- case CMM_ACQ_REL: \
- case CMM_SEQ_CST: \
- case CMM_SEQ_CST_FENCE: \
- cmm_smp_mb(); \
- break; \
- default: \
- abort(); \
- } \
- \
+ _cmm_compat_c11_smp_mb__before_mo(uatomic_cmpxchg, mos); \
__typeof__(*(addr)) _value = uatomic_cmpxchg(addr, old, \
new); \
\
if (_value == (old)) { \
- switch (mos) { \
- case CMM_CONSUME: \
- cmm_smp_read_barrier_depends(); \
- break; \
- case CMM_ACQUIRE: \
- case CMM_ACQ_REL: \
- case CMM_SEQ_CST: \
- case CMM_SEQ_CST_FENCE: \
- cmm_smp_mb(); \
- break; \
- case CMM_RELAXED: \
- case CMM_RELEASE: \
- break; \
- default: \
- abort(); \
- } \
+ _cmm_compat_c11_smp_mb__after_mo(uatomic_cmpxchg, mos); \
} else { \
- switch (mof) { \
- case CMM_CONSUME: \
- cmm_smp_read_barrier_depends(); \
- break; \
- case CMM_ACQUIRE: \
- case CMM_ACQ_REL: \
- case CMM_SEQ_CST: \
- case CMM_SEQ_CST_FENCE: \
- cmm_smp_mb(); \
- break; \
- case CMM_RELAXED: \
- case CMM_RELEASE: \
- break; \
- default: \
- abort(); \
- } \
+ _cmm_compat_c11_smp_mb__after_mo(uatomic_cmpxchg, mof); \
} \
_value; \
})
#define uatomic_sub_return_mo(addr, v, mo) \
uatomic_load_store_return_op(uatomic_sub_return, addr, v)
-
#ifndef uatomic_read
#define uatomic_read(addr) CMM_LOAD_SHARED(*(addr))
#endif
#define uatomic_load(addr, mo) \
__extension__ \
({ \
- switch (mo) { \
- case CMM_ACQUIRE: \
- case CMM_CONSUME: \
- case CMM_RELAXED: \
- break; \
- case CMM_SEQ_CST: \
- case CMM_SEQ_CST_FENCE: \
- cmm_smp_mb(); \
- break; \
- default: \
- abort(); \
- } \
- \
+ _cmm_compat_c11_smp_mb__before_mo(uatomic_read, mo); \
__typeof__(*(addr)) _rcu_value = uatomic_read(addr); \
- \
- switch (mo) { \
- case CMM_RELAXED: \
- break; \
- case CMM_CONSUME: \
- cmm_smp_read_barrier_depends(); \
- break; \
- case CMM_ACQUIRE: \
- case CMM_SEQ_CST: \
- case CMM_SEQ_CST_FENCE: \
- cmm_smp_mb(); \
- break; \
- default: \
- abort(); \
- } \
+ _cmm_compat_c11_smp_mb__after_mo(uatomic_read, mo); \
\
_rcu_value; \
})
#ifndef _URCU_ARCH_UATOMIC_X86_H
#define _URCU_ARCH_UATOMIC_X86_H
+#include <stdlib.h> /* For abort(3). */
+
/*
* Code inspired from libuatomic_ops-1.2, inherited in part from the
* Boehm-Demers-Weiser conservative garbage collector.
#define cmm_smp_mb__before_uatomic_dec() cmm_barrier()
#define cmm_smp_mb__after_uatomic_dec() cmm_barrier()
+static inline void _cmm_compat_c11_smp_mb__before_uatomic_read_mo(enum cmm_memorder mo)
+{
+ /*
+ * A SMP barrier is not necessary for CMM_SEQ_CST because, only a
+ * previous store can be reordered with the load. However, emitting the
+ * memory barrier after the store is sufficient to prevent reordering
+ * between the two. This follows toolchains decision of emitting the
+ * memory fence on the stores instead of the loads.
+ *
+ * A compiler barrier is necessary because the underlying operation does
+ * not clobber the registers.
+ */
+ switch (mo) {
+ case CMM_RELAXED: /* Fall-through */
+ case CMM_ACQUIRE: /* Fall-through */
+ case CMM_CONSUME: /* Fall-through */
+ case CMM_SEQ_CST: /* Fall-through */
+ case CMM_SEQ_CST_FENCE:
+ cmm_barrier();
+ break;
+ case CMM_ACQ_REL: /* Fall-through */
+ case CMM_RELEASE: /* Fall-through */
+ default:
+ abort();
+ break;
+ }
+}
+
+static inline void _cmm_compat_c11_smp_mb__after_uatomic_read_mo(enum cmm_memorder mo)
+{
+ /*
+ * A SMP barrier is not necessary for CMM_SEQ_CST because following
+ * loads and stores cannot be reordered with the load.
+ *
+ * A SMP barrier is however necessary for CMM_SEQ_CST_FENCE to respect
+ * the memory model, since the underlying operation does not have a lock
+ * prefix.
+ *
+ * A compiler barrier is necessary because the underlying operation does
+ * not clobber the registers.
+ */
+ switch (mo) {
+ case CMM_SEQ_CST_FENCE:
+ cmm_smp_mb();
+ break;
+ case CMM_RELAXED: /* Fall-through */
+ case CMM_ACQUIRE: /* Fall-through */
+ case CMM_CONSUME: /* Fall-through */
+ case CMM_SEQ_CST:
+ cmm_barrier();
+ break;
+ case CMM_ACQ_REL: /* Fall-through */
+ case CMM_RELEASE: /* Fall-through */
+ default:
+ abort();
+ break;
+ }
+}
+
+static inline void _cmm_compat_c11_smp_mb__before_uatomic_set_mo(enum cmm_memorder mo)
+{
+ /*
+ * A SMP barrier is not necessary for CMM_SEQ_CST because the store can
+ * only be reodered with later loads
+ *
+ * A compiler barrier is necessary because the underlying operation does
+ * not clobber the registers.
+ */
+ switch (mo) {
+ case CMM_RELAXED: /* Fall-through */
+ case CMM_RELEASE: /* Fall-through */
+ case CMM_SEQ_CST: /* Fall-through */
+ case CMM_SEQ_CST_FENCE:
+ cmm_barrier();
+ break;
+ case CMM_ACQ_REL: /* Fall-through */
+ case CMM_ACQUIRE: /* Fall-through */
+ case CMM_CONSUME: /* Fall-through */
+ default:
+ abort();
+ break;
+ }
+}
+
+static inline void _cmm_compat_c11_smp_mb__after_uatomic_set_mo(enum cmm_memorder mo)
+{
+ /*
+ * A SMP barrier is necessary for CMM_SEQ_CST because the store can be
+ * reorded with later loads. Since no memory barrier is being emitted
+ * before loads, one has to be emitted after the store. This follows
+ * toolchains decision of emitting the memory fence on the stores instead
+ * of the loads.
+ *
+ * A SMP barrier is necessary for CMM_SEQ_CST_FENCE to respect the
+ * memory model, since the underlying store does not have a lock prefix.
+ *
+ * A compiler barrier is necessary because the underlying operation does
+ * not clobber the registers.
+ */
+ switch (mo) {
+ case CMM_SEQ_CST: /* Fall-through */
+ case CMM_SEQ_CST_FENCE:
+ cmm_smp_mb();
+ break;
+ case CMM_RELAXED: /* Fall-through */
+ case CMM_RELEASE:
+ cmm_barrier();
+ break;
+ case CMM_ACQ_REL: /* Fall-through */
+ case CMM_ACQUIRE: /* Fall-through */
+ case CMM_CONSUME: /* Fall-through */
+ default:
+ abort();
+ break;
+ }
+}
+
+static inline void _cmm_compat_c11_smp_mb__before_uatomic_xchg_mo(enum cmm_memorder mo)
+{
+ /* NOP. uatomic_xchg has implicit lock prefix. */
+ switch (mo) {
+ case CMM_RELAXED: /* Fall-through */
+ case CMM_ACQUIRE: /* Fall-through */
+ case CMM_CONSUME: /* Fall-through */
+ case CMM_RELEASE: /* Fall-through */
+ case CMM_ACQ_REL: /* Fall-through */
+ case CMM_SEQ_CST: /* Fall-through */
+ case CMM_SEQ_CST_FENCE:
+ break;
+ default:
+ abort();
+ }
+}
+
+static inline void _cmm_compat_c11_smp_mb__after_uatomic_xchg_mo(enum cmm_memorder mo)
+{
+ /* NOP. uatomic_xchg has implicit lock prefix. */
+ switch (mo) {
+ case CMM_RELAXED: /* Fall-through */
+ case CMM_ACQUIRE: /* Fall-through */
+ case CMM_CONSUME: /* Fall-through */
+ case CMM_RELEASE: /* Fall-through */
+ case CMM_ACQ_REL: /* Fall-through */
+ case CMM_SEQ_CST: /* Fall-through */
+ case CMM_SEQ_CST_FENCE:
+ break;
+ default:
+ abort();
+ }
+}
+
+static inline void _cmm_compat_c11_smp_mb__before_uatomic_cmpxchg_mo(enum cmm_memorder mo)
+{
+ /* NOP. uatomic_cmpxchg has implicit lock prefix. */
+ switch (mo) {
+ case CMM_RELAXED: /* Fall-through */
+ case CMM_ACQUIRE: /* Fall-through */
+ case CMM_CONSUME: /* Fall-through */
+ case CMM_RELEASE: /* Fall-through */
+ case CMM_ACQ_REL: /* Fall-through */
+ case CMM_SEQ_CST: /* Fall-through */
+ case CMM_SEQ_CST_FENCE:
+ break;
+ default:
+ abort();
+ }
+}
+
+static inline void _cmm_compat_c11_smp_mb__after_uatomic_cmpxchg_mo(enum cmm_memorder mo)
+{
+ /* NOP. uatomic_cmpxchg has implicit lock prefix. */
+ switch (mo) {
+ case CMM_RELAXED: /* Fall-through */
+ case CMM_ACQUIRE: /* Fall-through */
+ case CMM_CONSUME: /* Fall-through */
+ case CMM_RELEASE: /* Fall-through */
+ case CMM_ACQ_REL: /* Fall-through */
+ case CMM_SEQ_CST: /* Fall-through */
+ case CMM_SEQ_CST_FENCE:
+ break;
+ default:
+ abort();
+ }
+}
+
+static inline void _cmm_compat_c11_smp_mb__before_uatomic_and_mo(enum cmm_memorder mo)
+{
+ /* NOP. uatomic_and has explicit lock prefix. */
+ switch (mo) {
+ case CMM_RELAXED: /* Fall-through */
+ case CMM_ACQUIRE: /* Fall-through */
+ case CMM_CONSUME: /* Fall-through */
+ case CMM_RELEASE: /* Fall-through */
+ case CMM_ACQ_REL: /* Fall-through */
+ case CMM_SEQ_CST: /* Fall-through */
+ case CMM_SEQ_CST_FENCE:
+ break;
+ default:
+ abort();
+ }
+}
+
+static inline void _cmm_compat_c11_smp_mb__after_uatomic_and_mo(enum cmm_memorder mo)
+{
+ /* NOP. uatomic_and has explicit lock prefix. */
+ switch (mo) {
+ case CMM_RELAXED: /* Fall-through */
+ case CMM_ACQUIRE: /* Fall-through */
+ case CMM_CONSUME: /* Fall-through */
+ case CMM_RELEASE: /* Fall-through */
+ case CMM_ACQ_REL: /* Fall-through */
+ case CMM_SEQ_CST: /* Fall-through */
+ case CMM_SEQ_CST_FENCE:
+ break;
+ default:
+ abort();
+ }
+}
+
+static inline void _cmm_compat_c11_smp_mb__before_uatomic_or_mo(enum cmm_memorder mo)
+{
+ /* NOP. uatomic_or has explicit lock prefix. */
+ switch (mo) {
+ case CMM_RELAXED: /* Fall-through */
+ case CMM_ACQUIRE: /* Fall-through */
+ case CMM_CONSUME: /* Fall-through */
+ case CMM_RELEASE: /* Fall-through */
+ case CMM_ACQ_REL: /* Fall-through */
+ case CMM_SEQ_CST: /* Fall-through */
+ case CMM_SEQ_CST_FENCE:
+ break;
+ default:
+ abort();
+ }
+}
+
+static inline void _cmm_compat_c11_smp_mb__after_uatomic_or_mo(enum cmm_memorder mo)
+{
+ /* NOP. uatomic_or has explicit lock prefix. */
+ switch (mo) {
+ case CMM_RELAXED: /* Fall-through */
+ case CMM_ACQUIRE: /* Fall-through */
+ case CMM_CONSUME: /* Fall-through */
+ case CMM_RELEASE: /* Fall-through */
+ case CMM_ACQ_REL: /* Fall-through */
+ case CMM_SEQ_CST: /* Fall-through */
+ case CMM_SEQ_CST_FENCE:
+ break;
+ default:
+ abort();
+ }
+}
+
+static inline void _cmm_compat_c11_smp_mb__before_uatomic_add_mo(enum cmm_memorder mo)
+{
+ /* NOP. uatomic_add has explicit lock prefix. */
+ switch (mo) {
+ case CMM_RELAXED: /* Fall-through */
+ case CMM_ACQUIRE: /* Fall-through */
+ case CMM_CONSUME: /* Fall-through */
+ case CMM_RELEASE: /* Fall-through */
+ case CMM_ACQ_REL: /* Fall-through */
+ case CMM_SEQ_CST: /* Fall-through */
+ case CMM_SEQ_CST_FENCE:
+ break;
+ default:
+ abort();
+ }
+}
+
+static inline void _cmm_compat_c11_smp_mb__after_uatomic_add_mo(enum cmm_memorder mo)
+{
+ /* NOP. uatomic_add has explicit lock prefix. */
+ switch (mo) {
+ case CMM_RELAXED: /* Fall-through */
+ case CMM_ACQUIRE: /* Fall-through */
+ case CMM_CONSUME: /* Fall-through */
+ case CMM_RELEASE: /* Fall-through */
+ case CMM_ACQ_REL: /* Fall-through */
+ case CMM_SEQ_CST: /* Fall-through */
+ case CMM_SEQ_CST_FENCE:
+ break;
+ default:
+ abort();
+ }
+}
+
+static inline void _cmm_compat_c11_smp_mb__before_uatomic_sub_mo(enum cmm_memorder mo)
+{
+ /* NOP. uatomic_sub has explicit lock prefix. */
+ switch (mo) {
+ case CMM_RELAXED: /* Fall-through */
+ case CMM_ACQUIRE: /* Fall-through */
+ case CMM_CONSUME: /* Fall-through */
+ case CMM_RELEASE: /* Fall-through */
+ case CMM_ACQ_REL: /* Fall-through */
+ case CMM_SEQ_CST: /* Fall-through */
+ case CMM_SEQ_CST_FENCE:
+ break;
+ default:
+ abort();
+ }
+}
+
+static inline void _cmm_compat_c11_smp_mb__after_uatomic_sub_mo(enum cmm_memorder mo)
+{
+ /* NOP. uatomic_sub has explicit lock prefix. */
+ switch (mo) {
+ case CMM_RELAXED: /* Fall-through */
+ case CMM_ACQUIRE: /* Fall-through */
+ case CMM_CONSUME: /* Fall-through */
+ case CMM_RELEASE: /* Fall-through */
+ case CMM_ACQ_REL: /* Fall-through */
+ case CMM_SEQ_CST: /* Fall-through */
+ case CMM_SEQ_CST_FENCE:
+ break;
+ default:
+ abort();
+ }
+}
+
+static inline void _cmm_compat_c11_smp_mb__before_uatomic_inc_mo(enum cmm_memorder mo)
+{
+ /* NOP. uatomic_inc has explicit lock prefix. */
+ switch (mo) {
+ case CMM_RELAXED: /* Fall-through */
+ case CMM_ACQUIRE: /* Fall-through */
+ case CMM_CONSUME: /* Fall-through */
+ case CMM_RELEASE: /* Fall-through */
+ case CMM_ACQ_REL: /* Fall-through */
+ case CMM_SEQ_CST: /* Fall-through */
+ case CMM_SEQ_CST_FENCE:
+ break;
+ default:
+ abort();
+ }
+}
+
+static inline void _cmm_compat_c11_smp_mb__after_uatomic_inc_mo(enum cmm_memorder mo)
+{
+ /* NOP. uatomic_inc has explicit lock prefix. */
+ switch (mo) {
+ case CMM_RELAXED: /* Fall-through */
+ case CMM_ACQUIRE: /* Fall-through */
+ case CMM_CONSUME: /* Fall-through */
+ case CMM_RELEASE: /* Fall-through */
+ case CMM_ACQ_REL: /* Fall-through */
+ case CMM_SEQ_CST: /* Fall-through */
+ case CMM_SEQ_CST_FENCE:
+ break;
+ default:
+ abort();
+ }
+}
+
+static inline void _cmm_compat_c11_smp_mb__before_uatomic_dec_mo(enum cmm_memorder mo)
+{
+ /* NOP. uatomic_dec has explicit lock prefix. */
+ switch (mo) {
+ case CMM_RELAXED: /* Fall-through */
+ case CMM_ACQUIRE: /* Fall-through */
+ case CMM_CONSUME: /* Fall-through */
+ case CMM_RELEASE: /* Fall-through */
+ case CMM_ACQ_REL: /* Fall-through */
+ case CMM_SEQ_CST: /* Fall-through */
+ case CMM_SEQ_CST_FENCE:
+ break;
+ default:
+ abort();
+ }
+}
+
+static inline void _cmm_compat_c11_smp_mb__after_uatomic_dec_mo(enum cmm_memorder mo)
+{
+ /* NOP. uatomic_dec has explicit lock prefix. */
+ switch (mo) {
+ case CMM_RELAXED: /* Fall-through */
+ case CMM_ACQUIRE: /* Fall-through */
+ case CMM_CONSUME: /* Fall-through */
+ case CMM_RELEASE: /* Fall-through */
+ case CMM_ACQ_REL: /* Fall-through */
+ case CMM_SEQ_CST: /* Fall-through */
+ case CMM_SEQ_CST_FENCE:
+ break;
+ default:
+ abort();
+ }
+}
+
+static inline void _cmm_compat_c11_smp_mb__before_uatomic_add_return_mo(enum cmm_memorder mo)
+{
+ /* NOP. uatomic_add_return has explicit lock prefix. */
+ switch (mo) {
+ case CMM_RELAXED: /* Fall-through */
+ case CMM_ACQUIRE: /* Fall-through */
+ case CMM_CONSUME: /* Fall-through */
+ case CMM_RELEASE: /* Fall-through */
+ case CMM_ACQ_REL: /* Fall-through */
+ case CMM_SEQ_CST: /* Fall-through */
+ case CMM_SEQ_CST_FENCE:
+ break;
+ default:
+ abort();
+ }
+}
+
+static inline void _cmm_compat_c11_smp_mb__after_uatomic_add_return_mo(enum cmm_memorder mo)
+{
+ /* NOP. uatomic_add_return has explicit lock prefix. */
+ switch (mo) {
+ case CMM_RELAXED: /* Fall-through */
+ case CMM_ACQUIRE: /* Fall-through */
+ case CMM_CONSUME: /* Fall-through */
+ case CMM_RELEASE: /* Fall-through */
+ case CMM_ACQ_REL: /* Fall-through */
+ case CMM_SEQ_CST: /* Fall-through */
+ case CMM_SEQ_CST_FENCE:
+ break;
+ default:
+ abort();
+ }
+}
+
+static inline void _cmm_compat_c11_smp_mb__before_uatomic_sub_return_mo(enum cmm_memorder mo)
+{
+ /* NOP. uatomic_sub_return has explicit lock prefix. */
+ switch (mo) {
+ case CMM_RELAXED: /* Fall-through */
+ case CMM_ACQUIRE: /* Fall-through */
+ case CMM_CONSUME: /* Fall-through */
+ case CMM_RELEASE: /* Fall-through */
+ case CMM_ACQ_REL: /* Fall-through */
+ case CMM_SEQ_CST: /* Fall-through */
+ case CMM_SEQ_CST_FENCE:
+ break;
+ default:
+ abort();
+ }
+}
+
+static inline void _cmm_compat_c11_smp_mb__after_uatomic_sub_return_mo(enum cmm_memorder mo)
+{
+ /* NOP. uatomic_sub_return has explicit lock prefix. */
+ switch (mo) {
+ case CMM_RELAXED: /* Fall-through */
+ case CMM_ACQUIRE: /* Fall-through */
+ case CMM_CONSUME: /* Fall-through */
+ case CMM_RELEASE: /* Fall-through */
+ case CMM_ACQ_REL: /* Fall-through */
+ case CMM_SEQ_CST: /* Fall-through */
+ case CMM_SEQ_CST_FENCE:
+ break;
+ default:
+ abort();
+ }
+}
+
+#define _cmm_compat_c11_smp_mb__before_mo(operation, mo) \
+ do { \
+ _cmm_compat_c11_smp_mb__before_ ## operation ## _mo (mo); \
+ } while (0)
+
+#define _cmm_compat_c11_smp_mb__after_mo(operation, mo) \
+ do { \
+ _cmm_compat_c11_smp_mb__after_ ## operation ## _mo (mo); \
+ } while (0)
+
+
#ifdef __cplusplus
}
#endif
struct __cds_wfcq_head *_h;
struct cds_wfcq_head *h;
} __attribute__((__transparent_union__)) cds_wfcq_head_ptr_t;
+
+typedef union {
+ const struct __cds_wfcq_head *_h;
+ const struct cds_wfcq_head *h;
+} __attribute__((__transparent_union__)) cds_wfcq_head_const_ptr_t;
#if defined(__clang__)
#pragma clang diagnostic pop
#endif
{
return head;
}
+
+/*
+ * This static inline is only present for compatibility with C++. It is
+ * effect-less in C.
+ */
+static inline const struct __cds_wfcq_head *__cds_wfcq_head_const_cast(const struct __cds_wfcq_head *head)
+{
+ return head;
+}
+
+/*
+ * This static inline is only present for compatibility with C++. It is
+ * effect-less in C.
+ */
+static inline const struct cds_wfcq_head *cds_wfcq_head_const_cast(const struct cds_wfcq_head *head)
+{
+ return head;
+}
+
#else /* #ifndef __cplusplus */
/*
cds_wfcq_head_ptr_t ret = { .h = head };
return ret;
}
+
+/*
+ * This static inline is used by internally in the static inline
+ * implementation of the API.
+ */
+static inline cds_wfcq_head_const_ptr_t __cds_wfcq_head_const_cast(const struct __cds_wfcq_head *head)
+{
+ cds_wfcq_head_const_ptr_t ret = { ._h = head };
+ return ret;
+}
+
+/*
+ * This static inline is used by internally in the static inline
+ * implementation of the API.
+ */
+static inline cds_wfcq_head_const_ptr_t cds_wfcq_head_const_cast(const struct cds_wfcq_head *head)
+{
+ cds_wfcq_head_const_ptr_t ret = { .h = head };
+ return ret;
+}
+
#endif /* #else #ifndef __cplusplus */
struct cds_wfcq_tail {
*
* No memory barrier is issued. No mutual exclusion is required.
*/
-extern bool cds_wfcq_empty(cds_wfcq_head_ptr_t head,
- struct cds_wfcq_tail *tail);
+extern bool cds_wfcq_empty(cds_wfcq_head_const_ptr_t head,
+ const struct cds_wfcq_tail *tail);
/*
* cds_wfcq_dequeue_lock: take the dequeue mutual exclusion lock.
return ret;
}
+static inline cds_wfcq_head_const_ptr_t cds_wfcq_head_const_cast_cpp(const struct __cds_wfcq_head *head)
+{
+ cds_wfcq_head_const_ptr_t ret = { ._h = head };
+ return ret;
+}
+
+static inline cds_wfcq_head_const_ptr_t cds_wfcq_head_const_cast_cpp(const struct cds_wfcq_head *head)
+{
+ cds_wfcq_head_const_ptr_t ret = { .h = head };
+ return ret;
+}
+
template<typename T> static inline bool cds_wfcq_empty(T head,
- struct cds_wfcq_tail *tail)
+ const struct cds_wfcq_tail *tail)
{
- return cds_wfcq_empty(cds_wfcq_head_cast_cpp(head), tail);
+ return cds_wfcq_empty(cds_wfcq_head_const_cast_cpp(head), tail);
}
template<typename T> static inline bool cds_wfcq_enqueue(T head,
struct __cds_wfs_stack *_s;
struct cds_wfs_stack *s;
} __attribute__((__transparent_union__)) cds_wfs_stack_ptr_t;
+
+typedef union {
+ const struct __cds_wfs_stack *_s;
+ const struct cds_wfs_stack *s;
+} __attribute__((__transparent_union__)) cds_wfs_stack_const_ptr_t;
#if defined(__clang__)
#pragma clang diagnostic pop
#endif
*
* No memory barrier is issued. No mutual exclusion is required.
*/
-extern bool cds_wfs_empty(cds_wfs_stack_ptr_t u_stack);
+extern bool cds_wfs_empty(cds_wfs_stack_const_ptr_t u_stack);
/*
* cds_wfs_push: push a node into the stack.
return ret;
}
+static inline cds_wfs_stack_const_ptr_t cds_wfs_stack_const_cast(const struct __cds_wfs_stack *s)
+{
+ cds_wfs_stack_const_ptr_t ret = {
+ ._s = s,
+ };
+ return ret;
+}
+
+static inline cds_wfs_stack_const_ptr_t cds_wfs_stack_const_cast(const struct cds_wfs_stack *s)
+{
+ cds_wfs_stack_const_ptr_t ret = {
+ .s = s,
+ };
+ return ret;
+}
+
template<typename T> static inline bool cds_wfs_empty(T s)
{
- return cds_wfs_empty(cds_wfs_stack_cast(s));
+ return cds_wfs_empty(cds_wfs_stack_const_cast(s));
}
template<typename T> static inline int cds_wfs_push(T s, struct cds_wfs_node *node)
/*
- * SPDX-License-Identifier: LGPL-2.1-only
+ * SPDX-License-Identifier: MIT
*
* Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
* Copyright (C) 2019 Michael Jeanson <mjeanson@efficios.com>
total_bytes_read += bytes_read;
assert(total_bytes_read <= max_bytes);
- } while (max_bytes > total_bytes_read && bytes_read > 0);
+ } while (max_bytes > total_bytes_read && bytes_read != 0);
/*
* Make sure the mask read is a null terminated string.
___cds_lfs_init(s);
}
-bool cds_lfs_empty(cds_lfs_stack_ptr_t s)
+bool cds_lfs_empty(cds_lfs_stack_const_ptr_t s)
{
return _cds_lfs_empty(s);
}
___cds_wfcq_init(head, tail);
}
-bool cds_wfcq_empty(cds_wfcq_head_ptr_t head,
- struct cds_wfcq_tail *tail)
+bool cds_wfcq_empty(cds_wfcq_head_const_ptr_t head,
+ const struct cds_wfcq_tail *tail)
{
return _cds_wfcq_empty(head, tail);
___cds_wfs_init(s);
}
-bool cds_wfs_empty(cds_wfs_stack_ptr_t u_stack)
+bool cds_wfs_empty(cds_wfs_stack_const_ptr_t u_stack)
{
return _cds_wfs_empty(u_stack);
}