From: Mathieu Desnoyers Date: Thu, 13 Jun 2024 18:47:33 +0000 (-0400) Subject: rculfhash: make cds_lfht_iter_get_node argument const X-Git-Url: https://git.liburcu.org/?a=commitdiff_plain;ds=sidebyside;h=refs%2Fheads%2Fmaster;hp=6b071d73cffc66df0bdb9ee3c062143f06923c78;p=urcu.git rculfhash: make cds_lfht_iter_get_node argument const cds_lfht_iter_get_node doesn't modify its argument. Hence, it can be marked as `const`. Signed-off-by: Mathieu Desnoyers Change-Id: I69f6460928e90faa0a44e6c795a0eccb3f418738 --- diff --git a/extras/abi/dump_abi.sh b/extras/abi/dump_abi.sh index a7bd5fd..673b839 100755 --- a/extras/abi/dump_abi.sh +++ b/extras/abi/dump_abi.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # SPDX-FileCopyrightText: 2021 Michael Jeanson # diff --git a/include/urcu/futex.h b/include/urcu/futex.h index 9d0a997..f1181ee 100644 --- a/include/urcu/futex.h +++ b/include/urcu/futex.h @@ -19,17 +19,23 @@ #if (defined(__linux__) && defined(__NR_futex)) /* For backwards compat */ -#define CONFIG_RCU_HAVE_FUTEX 1 +# define CONFIG_RCU_HAVE_FUTEX 1 -#include -#include -#include -#include +# include +# include +# include +# include +# include #elif defined(__FreeBSD__) -#include -#include +# include +# include + +#elif defined(__OpenBSD__) + +# include +# include #endif @@ -37,8 +43,10 @@ extern "C" { #endif -#define FUTEX_WAIT 0 -#define FUTEX_WAKE 1 +#ifndef __OpenBSD__ +# define FUTEX_WAIT 0 +# define FUTEX_WAKE 1 +#endif /* * sys_futex compatibility header. @@ -64,8 +72,7 @@ extern int compat_futex_async(int32_t *uaddr, int op, int32_t val, static inline int futex(int32_t *uaddr, int op, int32_t val, const struct timespec *timeout, int32_t *uaddr2, int32_t val3) { - return syscall(__NR_futex, uaddr, op, val, timeout, - uaddr2, val3); + return syscall(__NR_futex, uaddr, op, val, timeout, uaddr2, val3); } static inline int futex_noasync(int32_t *uaddr, int op, int32_t val, @@ -107,9 +114,7 @@ static inline int futex_async(int32_t *uaddr, int op, int32_t val, #elif defined(__FreeBSD__) static inline int futex_async(int32_t *uaddr, int op, int32_t val, - const struct timespec *timeout, - int32_t *uaddr2 __attribute__((unused)), - int32_t val3 __attribute__((unused))) + const struct timespec *timeout, int32_t *uaddr2, int32_t val3) { int umtx_op; void *umtx_uaddr = NULL, *umtx_uaddr2 = NULL; @@ -118,6 +123,13 @@ static inline int futex_async(int32_t *uaddr, int op, int32_t val, ._clockid = CLOCK_MONOTONIC, }; + /* + * Check if NULL or zero. Don't let users expect that they are + * taken into account. + */ + urcu_posix_assert(!uaddr2); + urcu_posix_assert(!val3); + switch (op) { case FUTEX_WAIT: /* On FreeBSD, a "u_int" is a 32-bit integer. */ @@ -146,6 +158,48 @@ static inline int futex_noasync(int32_t *uaddr, int op, int32_t val, return futex_async(uaddr, op, val, timeout, uaddr2, val3); } +#elif defined(__OpenBSD__) + +static inline int futex_noasync(int32_t *uaddr, int op, int32_t val, + const struct timespec *timeout, int32_t *uaddr2, int32_t val3) +{ + int ret; + + /* + * Check that val3 is zero. Don't let users expect that it is + * taken into account. + */ + urcu_posix_assert(!val3); + + ret = futex((volatile uint32_t *) uaddr, op, val, timeout, + (volatile uint32_t *) uaddr2); + if (caa_unlikely(ret < 0 && errno == ENOSYS)) { + return compat_futex_noasync(uaddr, op, val, timeout, + uaddr2, val3); + } + return ret; +} + +static inline int futex_async(int32_t *uaddr, int op, int32_t val, + const struct timespec *timeout, int32_t *uaddr2, int32_t val3) +{ + int ret; + + /* + * Check that val3 is zero. Don't let users expect that it is + * taken into account. + */ + urcu_posix_assert(!val3); + + ret = futex((volatile uint32_t *) uaddr, op, val, timeout, + (volatile uint32_t *) uaddr2); + if (caa_unlikely(ret < 0 && errno == ENOSYS)) { + return compat_futex_async(uaddr, op, val, timeout, + uaddr2, val3); + } + return ret; +} + #elif defined(__CYGWIN__) /* diff --git a/include/urcu/lfstack.h b/include/urcu/lfstack.h index 77af43b..2a3073d 100644 --- a/include/urcu/lfstack.h +++ b/include/urcu/lfstack.h @@ -84,6 +84,11 @@ typedef union { struct __cds_lfs_stack *_s; struct cds_lfs_stack *s; } __attribute__((__transparent_union__)) cds_lfs_stack_ptr_t; + +typedef union { + const struct __cds_lfs_stack *_s; + const struct cds_lfs_stack *s; +} __attribute__((__transparent_union__)) cds_lfs_stack_const_ptr_t; #if defined(__clang__) #pragma clang diagnostic pop #endif @@ -141,7 +146,7 @@ extern void __cds_lfs_init(struct __cds_lfs_stack *s); * * No memory barrier is issued. No mutual exclusion is required. */ -extern bool cds_lfs_empty(cds_lfs_stack_ptr_t s); +extern bool cds_lfs_empty(cds_lfs_stack_const_ptr_t s); /* * cds_lfs_push: push a node into the stack. @@ -276,9 +281,25 @@ static inline cds_lfs_stack_ptr_t cds_lfs_stack_cast(struct cds_lfs_stack *s) return ret; } +static inline cds_lfs_stack_const_ptr_t cds_lfs_stack_const_cast(const struct __cds_lfs_stack *s) +{ + cds_lfs_stack_const_ptr_t ret = { + ._s = s, + }; + return ret; +} + +static inline cds_lfs_stack_const_ptr_t cds_lfs_stack_const_cast(const struct cds_lfs_stack *s) +{ + cds_lfs_stack_const_ptr_t ret = { + .s = s, + }; + return ret; +} + template static inline bool cds_lfs_empty(T s) { - return cds_lfs_empty(cds_lfs_stack_cast(s)); + return cds_lfs_empty(cds_lfs_stack_const_cast(s)); } template static inline bool cds_lfs_push(T s, diff --git a/include/urcu/list.h b/include/urcu/list.h index 4bc88b3..f2d77bb 100644 --- a/include/urcu/list.h +++ b/include/urcu/list.h @@ -88,7 +88,7 @@ void cds_list_move(struct cds_list_head *elem, struct cds_list_head *head) /* Replace an old entry. */ static inline -void cds_list_replace(struct cds_list_head *old, struct cds_list_head *_new) +void cds_list_replace(const struct cds_list_head *old, struct cds_list_head *_new) { _new->next = old->next; _new->prev = old->prev; @@ -169,7 +169,7 @@ void cds_list_splice(struct cds_list_head *add, struct cds_list_head *head) pos = (p), p = cds_list_entry((pos)->member.next, __typeof__(*(pos)), member)) static inline -int cds_list_empty(struct cds_list_head *head) +int cds_list_empty(const struct cds_list_head *head) { return head == head->next; } diff --git a/include/urcu/rculfhash.h b/include/urcu/rculfhash.h index e0f4b35..69a251c 100644 --- a/include/urcu/rculfhash.h +++ b/include/urcu/rculfhash.h @@ -81,7 +81,7 @@ struct cds_lfht_alloc { }; static inline -struct cds_lfht_node *cds_lfht_iter_get_node(struct cds_lfht_iter *iter) +struct cds_lfht_node *cds_lfht_iter_get_node(const struct cds_lfht_iter *iter) { return iter->node; } diff --git a/include/urcu/static/lfstack.h b/include/urcu/static/lfstack.h index d7e70d4..22233d8 100644 --- a/include/urcu/static/lfstack.h +++ b/include/urcu/static/lfstack.h @@ -87,7 +87,7 @@ void ___cds_lfs_init(struct __cds_lfs_stack *s) } static inline -bool ___cds_lfs_empty_head(struct cds_lfs_head *head) +bool ___cds_lfs_empty_head(const struct cds_lfs_head *head) { return head == NULL; } @@ -98,7 +98,7 @@ bool ___cds_lfs_empty_head(struct cds_lfs_head *head) * No memory barrier is issued. No mutual exclusion is required. */ static inline -bool _cds_lfs_empty(cds_lfs_stack_ptr_t s) +bool _cds_lfs_empty(cds_lfs_stack_const_ptr_t s) { return ___cds_lfs_empty_head(uatomic_load(&s._s->head, CMM_RELAXED)); } diff --git a/include/urcu/static/wfcqueue.h b/include/urcu/static/wfcqueue.h index 26741ae..2799c61 100644 --- a/include/urcu/static/wfcqueue.h +++ b/include/urcu/static/wfcqueue.h @@ -133,10 +133,10 @@ static inline void ___cds_wfcq_init(struct __cds_wfcq_head *head, * make a queue appear empty if an enqueuer is preempted for a long time * between xchg() and setting the previous node's next pointer. */ -static inline bool _cds_wfcq_empty(cds_wfcq_head_ptr_t u_head, - struct cds_wfcq_tail *tail) +static inline bool _cds_wfcq_empty(cds_wfcq_head_const_ptr_t u_head, + const struct cds_wfcq_tail *tail) { - struct __cds_wfcq_head *head = u_head._h; + const struct __cds_wfcq_head *head = u_head._h; /* * Queue is empty if no node is pointed by head->node.next nor * tail->p. Even though the tail->p check is sufficient to find @@ -283,7 +283,7 @@ ___cds_wfcq_first(cds_wfcq_head_ptr_t u_head, struct __cds_wfcq_head *head = u_head._h; struct cds_wfcq_node *node; - if (_cds_wfcq_empty(__cds_wfcq_head_cast(head), tail)) + if (_cds_wfcq_empty(__cds_wfcq_head_const_cast(head), tail)) return NULL; node = ___cds_wfcq_node_sync_next(&head->node, blocking); @@ -399,7 +399,7 @@ ___cds_wfcq_dequeue_with_state(cds_wfcq_head_ptr_t u_head, if (state) *state = 0; - if (_cds_wfcq_empty(__cds_wfcq_head_cast(head), tail)) { + if (_cds_wfcq_empty(__cds_wfcq_head_const_cast(head), tail)) { return NULL; } @@ -530,7 +530,7 @@ ___cds_wfcq_splice( * Initial emptiness check to speed up cases where queue is * empty: only require loads to check if queue is empty. */ - if (_cds_wfcq_empty(__cds_wfcq_head_cast(src_q_head), src_q_tail)) + if (_cds_wfcq_empty(__cds_wfcq_head_const_cast(src_q_head), src_q_tail)) return CDS_WFCQ_RET_SRC_EMPTY; for (;;) { diff --git a/include/urcu/static/wfstack.h b/include/urcu/static/wfstack.h index c46e97d..97c5192 100644 --- a/include/urcu/static/wfstack.h +++ b/include/urcu/static/wfstack.h @@ -106,9 +106,9 @@ static inline bool ___cds_wfs_end(void *node) * * No memory barrier is issued. No mutual exclusion is required. */ -static inline bool _cds_wfs_empty(cds_wfs_stack_ptr_t u_stack) +static inline bool _cds_wfs_empty(cds_wfs_stack_const_ptr_t u_stack) { - struct __cds_wfs_stack *s = u_stack._s; + const struct __cds_wfs_stack *s = u_stack._s; return ___cds_wfs_end(uatomic_load(&s->head, CMM_RELAXED)); } diff --git a/include/urcu/uatomic/generic.h b/include/urcu/uatomic/generic.h index 8f8c437..ed655bb 100644 --- a/include/urcu/uatomic/generic.h +++ b/include/urcu/uatomic/generic.h @@ -15,7 +15,6 @@ */ #include -#include #include #include @@ -27,125 +26,61 @@ extern "C" { #define uatomic_set(addr, v) ((void) CMM_STORE_SHARED(*(addr), (v))) #endif -#define uatomic_load_store_return_op(op, addr, v, mo) \ - __extension__ \ - ({ \ - \ - switch (mo) { \ - case CMM_ACQUIRE: \ - case CMM_CONSUME: \ - case CMM_RELAXED: \ - break; \ - case CMM_RELEASE: \ - case CMM_ACQ_REL: \ - case CMM_SEQ_CST: \ - case CMM_SEQ_CST_FENCE: \ - cmm_smp_mb(); \ - break; \ - default: \ - abort(); \ - } \ - \ - __typeof__((*addr)) _value = op(addr, v); \ - \ - switch (mo) { \ - case CMM_CONSUME: \ - cmm_smp_read_barrier_depends(); \ - break; \ - case CMM_ACQUIRE: \ - case CMM_ACQ_REL: \ - case CMM_SEQ_CST: \ - case CMM_SEQ_CST_FENCE: \ - cmm_smp_mb(); \ - break; \ - case CMM_RELAXED: \ - case CMM_RELEASE: \ - break; \ - default: \ - abort(); \ - } \ - _value; \ +/* + * Can be defined for the architecture. + * + * What needs to be emitted _before_ the `operation' with memory ordering `mo'. + */ +#ifndef _cmm_compat_c11_smp_mb__before_mo +# define _cmm_compat_c11_smp_mb__before_mo(operation, mo) cmm_smp_mb() +#endif + +/* + * Can be defined for the architecture. + * + * What needs to be emitted _after_ the `operation' with memory ordering `mo'. + */ +#ifndef _cmm_compat_c11_smp_mb__after_mo +# define _cmm_compat_c11_smp_mb__after_mo(operation, mo) cmm_smp_mb() +#endif + +#define uatomic_load_store_return_op(op, addr, v, mo) \ + __extension__ \ + ({ \ + _cmm_compat_c11_smp_mb__before_mo(op, mo); \ + __typeof__((*addr)) _value = op(addr, v); \ + _cmm_compat_c11_smp_mb__after_mo(op, mo); \ + \ + _value; \ }) -#define uatomic_load_store_op(op, addr, v, mo) \ - do { \ - switch (mo) { \ - case CMM_ACQUIRE: \ - case CMM_CONSUME: \ - case CMM_RELAXED: \ - break; \ - case CMM_RELEASE: \ - case CMM_ACQ_REL: \ - case CMM_SEQ_CST: \ - case CMM_SEQ_CST_FENCE: \ - cmm_smp_mb(); \ - break; \ - default: \ - abort(); \ - } \ - \ - op(addr, v); \ - \ - switch (mo) { \ - case CMM_CONSUME: \ - cmm_smp_read_barrier_depends(); \ - break; \ - case CMM_ACQUIRE: \ - case CMM_ACQ_REL: \ - case CMM_SEQ_CST: \ - case CMM_SEQ_CST_FENCE: \ - cmm_smp_mb(); \ - break; \ - case CMM_RELAXED: \ - case CMM_RELEASE: \ - break; \ - default: \ - abort(); \ - } \ +#define uatomic_load_store_op(op, addr, v, mo) \ + do { \ + _cmm_compat_c11_smp_mb__before_mo(op, mo); \ + op(addr, v); \ + _cmm_compat_c11_smp_mb__after_mo(op, mo); \ } while (0) -#define uatomic_store(addr, v, mo) \ - do { \ - switch (mo) { \ - case CMM_RELAXED: \ - break; \ - case CMM_RELEASE: \ - case CMM_SEQ_CST: \ - case CMM_SEQ_CST_FENCE: \ - cmm_smp_mb(); \ - break; \ - default: \ - abort(); \ - } \ - \ - uatomic_set(addr, v); \ - \ - switch (mo) { \ - case CMM_RELAXED: \ - case CMM_RELEASE: \ - break; \ - case CMM_SEQ_CST: \ - case CMM_SEQ_CST_FENCE: \ - cmm_smp_mb(); \ - break; \ - default: \ - abort(); \ - } \ +#define uatomic_store(addr, v, mo) \ + do { \ + _cmm_compat_c11_smp_mb__before_mo(uatomic_set, mo); \ + uatomic_set(addr, v); \ + _cmm_compat_c11_smp_mb__after_mo(uatomic_set, mo); \ } while (0) -#define uatomic_and_mo(addr, v, mo) \ +#define uatomic_and_mo(addr, v, mo) \ uatomic_load_store_op(uatomic_and, addr, v, mo) -#define uatomic_or_mo(addr, v, mo) \ +#define uatomic_or_mo(addr, v, mo) \ uatomic_load_store_op(uatomic_or, addr, v, mo) -#define uatomic_add_mo(addr, v, mo) \ +#define uatomic_add_mo(addr, v, mo) \ uatomic_load_store_op(uatomic_add, addr, v, mo) -#define uatomic_sub_mo(addr, v, mo) \ +#define uatomic_sub_mo(addr, v, mo) \ uatomic_load_store_op(uatomic_sub, addr, v, mo) -#define uatomic_inc_mo(addr, mo) \ +#define uatomic_inc_mo(addr, mo) \ uatomic_load_store_op(uatomic_add, addr, 1, mo) #define uatomic_dec_mo(addr, mo) \ @@ -157,58 +92,14 @@ extern "C" { #define uatomic_cmpxchg_mo(addr, old, new, mos, mof) \ __extension__ \ ({ \ - switch (mos) { \ - case CMM_ACQUIRE: \ - case CMM_CONSUME: \ - case CMM_RELAXED: \ - break; \ - case CMM_RELEASE: \ - case CMM_ACQ_REL: \ - case CMM_SEQ_CST: \ - case CMM_SEQ_CST_FENCE: \ - cmm_smp_mb(); \ - break; \ - default: \ - abort(); \ - } \ - \ + _cmm_compat_c11_smp_mb__before_mo(uatomic_cmpxchg, mos); \ __typeof__(*(addr)) _value = uatomic_cmpxchg(addr, old, \ new); \ \ if (_value == (old)) { \ - switch (mos) { \ - case CMM_CONSUME: \ - cmm_smp_read_barrier_depends(); \ - break; \ - case CMM_ACQUIRE: \ - case CMM_ACQ_REL: \ - case CMM_SEQ_CST: \ - case CMM_SEQ_CST_FENCE: \ - cmm_smp_mb(); \ - break; \ - case CMM_RELAXED: \ - case CMM_RELEASE: \ - break; \ - default: \ - abort(); \ - } \ + _cmm_compat_c11_smp_mb__after_mo(uatomic_cmpxchg, mos); \ } else { \ - switch (mof) { \ - case CMM_CONSUME: \ - cmm_smp_read_barrier_depends(); \ - break; \ - case CMM_ACQUIRE: \ - case CMM_ACQ_REL: \ - case CMM_SEQ_CST: \ - case CMM_SEQ_CST_FENCE: \ - cmm_smp_mb(); \ - break; \ - case CMM_RELAXED: \ - case CMM_RELEASE: \ - break; \ - default: \ - abort(); \ - } \ + _cmm_compat_c11_smp_mb__after_mo(uatomic_cmpxchg, mof); \ } \ _value; \ }) @@ -222,7 +113,6 @@ extern "C" { #define uatomic_sub_return_mo(addr, v, mo) \ uatomic_load_store_return_op(uatomic_sub_return, addr, v) - #ifndef uatomic_read #define uatomic_read(addr) CMM_LOAD_SHARED(*(addr)) #endif @@ -230,35 +120,9 @@ extern "C" { #define uatomic_load(addr, mo) \ __extension__ \ ({ \ - switch (mo) { \ - case CMM_ACQUIRE: \ - case CMM_CONSUME: \ - case CMM_RELAXED: \ - break; \ - case CMM_SEQ_CST: \ - case CMM_SEQ_CST_FENCE: \ - cmm_smp_mb(); \ - break; \ - default: \ - abort(); \ - } \ - \ + _cmm_compat_c11_smp_mb__before_mo(uatomic_read, mo); \ __typeof__(*(addr)) _rcu_value = uatomic_read(addr); \ - \ - switch (mo) { \ - case CMM_RELAXED: \ - break; \ - case CMM_CONSUME: \ - cmm_smp_read_barrier_depends(); \ - break; \ - case CMM_ACQUIRE: \ - case CMM_SEQ_CST: \ - case CMM_SEQ_CST_FENCE: \ - cmm_smp_mb(); \ - break; \ - default: \ - abort(); \ - } \ + _cmm_compat_c11_smp_mb__after_mo(uatomic_read, mo); \ \ _rcu_value; \ }) diff --git a/include/urcu/uatomic/x86.h b/include/urcu/uatomic/x86.h index b5725e0..616eee9 100644 --- a/include/urcu/uatomic/x86.h +++ b/include/urcu/uatomic/x86.h @@ -8,6 +8,8 @@ #ifndef _URCU_ARCH_UATOMIC_X86_H #define _URCU_ARCH_UATOMIC_X86_H +#include /* For abort(3). */ + /* * Code inspired from libuatomic_ops-1.2, inherited in part from the * Boehm-Demers-Weiser conservative garbage collector. @@ -630,6 +632,474 @@ extern unsigned long _compat_uatomic_add_return(void *addr, #define cmm_smp_mb__before_uatomic_dec() cmm_barrier() #define cmm_smp_mb__after_uatomic_dec() cmm_barrier() +static inline void _cmm_compat_c11_smp_mb__before_uatomic_read_mo(enum cmm_memorder mo) +{ + /* + * A SMP barrier is not necessary for CMM_SEQ_CST because, only a + * previous store can be reordered with the load. However, emitting the + * memory barrier after the store is sufficient to prevent reordering + * between the two. This follows toolchains decision of emitting the + * memory fence on the stores instead of the loads. + * + * A compiler barrier is necessary because the underlying operation does + * not clobber the registers. + */ + switch (mo) { + case CMM_RELAXED: /* Fall-through */ + case CMM_ACQUIRE: /* Fall-through */ + case CMM_CONSUME: /* Fall-through */ + case CMM_SEQ_CST: /* Fall-through */ + case CMM_SEQ_CST_FENCE: + cmm_barrier(); + break; + case CMM_ACQ_REL: /* Fall-through */ + case CMM_RELEASE: /* Fall-through */ + default: + abort(); + break; + } +} + +static inline void _cmm_compat_c11_smp_mb__after_uatomic_read_mo(enum cmm_memorder mo) +{ + /* + * A SMP barrier is not necessary for CMM_SEQ_CST because following + * loads and stores cannot be reordered with the load. + * + * A SMP barrier is however necessary for CMM_SEQ_CST_FENCE to respect + * the memory model, since the underlying operation does not have a lock + * prefix. + * + * A compiler barrier is necessary because the underlying operation does + * not clobber the registers. + */ + switch (mo) { + case CMM_SEQ_CST_FENCE: + cmm_smp_mb(); + break; + case CMM_RELAXED: /* Fall-through */ + case CMM_ACQUIRE: /* Fall-through */ + case CMM_CONSUME: /* Fall-through */ + case CMM_SEQ_CST: + cmm_barrier(); + break; + case CMM_ACQ_REL: /* Fall-through */ + case CMM_RELEASE: /* Fall-through */ + default: + abort(); + break; + } +} + +static inline void _cmm_compat_c11_smp_mb__before_uatomic_set_mo(enum cmm_memorder mo) +{ + /* + * A SMP barrier is not necessary for CMM_SEQ_CST because the store can + * only be reodered with later loads + * + * A compiler barrier is necessary because the underlying operation does + * not clobber the registers. + */ + switch (mo) { + case CMM_RELAXED: /* Fall-through */ + case CMM_RELEASE: /* Fall-through */ + case CMM_SEQ_CST: /* Fall-through */ + case CMM_SEQ_CST_FENCE: + cmm_barrier(); + break; + case CMM_ACQ_REL: /* Fall-through */ + case CMM_ACQUIRE: /* Fall-through */ + case CMM_CONSUME: /* Fall-through */ + default: + abort(); + break; + } +} + +static inline void _cmm_compat_c11_smp_mb__after_uatomic_set_mo(enum cmm_memorder mo) +{ + /* + * A SMP barrier is necessary for CMM_SEQ_CST because the store can be + * reorded with later loads. Since no memory barrier is being emitted + * before loads, one has to be emitted after the store. This follows + * toolchains decision of emitting the memory fence on the stores instead + * of the loads. + * + * A SMP barrier is necessary for CMM_SEQ_CST_FENCE to respect the + * memory model, since the underlying store does not have a lock prefix. + * + * A compiler barrier is necessary because the underlying operation does + * not clobber the registers. + */ + switch (mo) { + case CMM_SEQ_CST: /* Fall-through */ + case CMM_SEQ_CST_FENCE: + cmm_smp_mb(); + break; + case CMM_RELAXED: /* Fall-through */ + case CMM_RELEASE: + cmm_barrier(); + break; + case CMM_ACQ_REL: /* Fall-through */ + case CMM_ACQUIRE: /* Fall-through */ + case CMM_CONSUME: /* Fall-through */ + default: + abort(); + break; + } +} + +static inline void _cmm_compat_c11_smp_mb__before_uatomic_xchg_mo(enum cmm_memorder mo) +{ + /* NOP. uatomic_xchg has implicit lock prefix. */ + switch (mo) { + case CMM_RELAXED: /* Fall-through */ + case CMM_ACQUIRE: /* Fall-through */ + case CMM_CONSUME: /* Fall-through */ + case CMM_RELEASE: /* Fall-through */ + case CMM_ACQ_REL: /* Fall-through */ + case CMM_SEQ_CST: /* Fall-through */ + case CMM_SEQ_CST_FENCE: + break; + default: + abort(); + } +} + +static inline void _cmm_compat_c11_smp_mb__after_uatomic_xchg_mo(enum cmm_memorder mo) +{ + /* NOP. uatomic_xchg has implicit lock prefix. */ + switch (mo) { + case CMM_RELAXED: /* Fall-through */ + case CMM_ACQUIRE: /* Fall-through */ + case CMM_CONSUME: /* Fall-through */ + case CMM_RELEASE: /* Fall-through */ + case CMM_ACQ_REL: /* Fall-through */ + case CMM_SEQ_CST: /* Fall-through */ + case CMM_SEQ_CST_FENCE: + break; + default: + abort(); + } +} + +static inline void _cmm_compat_c11_smp_mb__before_uatomic_cmpxchg_mo(enum cmm_memorder mo) +{ + /* NOP. uatomic_cmpxchg has implicit lock prefix. */ + switch (mo) { + case CMM_RELAXED: /* Fall-through */ + case CMM_ACQUIRE: /* Fall-through */ + case CMM_CONSUME: /* Fall-through */ + case CMM_RELEASE: /* Fall-through */ + case CMM_ACQ_REL: /* Fall-through */ + case CMM_SEQ_CST: /* Fall-through */ + case CMM_SEQ_CST_FENCE: + break; + default: + abort(); + } +} + +static inline void _cmm_compat_c11_smp_mb__after_uatomic_cmpxchg_mo(enum cmm_memorder mo) +{ + /* NOP. uatomic_cmpxchg has implicit lock prefix. */ + switch (mo) { + case CMM_RELAXED: /* Fall-through */ + case CMM_ACQUIRE: /* Fall-through */ + case CMM_CONSUME: /* Fall-through */ + case CMM_RELEASE: /* Fall-through */ + case CMM_ACQ_REL: /* Fall-through */ + case CMM_SEQ_CST: /* Fall-through */ + case CMM_SEQ_CST_FENCE: + break; + default: + abort(); + } +} + +static inline void _cmm_compat_c11_smp_mb__before_uatomic_and_mo(enum cmm_memorder mo) +{ + /* NOP. uatomic_and has explicit lock prefix. */ + switch (mo) { + case CMM_RELAXED: /* Fall-through */ + case CMM_ACQUIRE: /* Fall-through */ + case CMM_CONSUME: /* Fall-through */ + case CMM_RELEASE: /* Fall-through */ + case CMM_ACQ_REL: /* Fall-through */ + case CMM_SEQ_CST: /* Fall-through */ + case CMM_SEQ_CST_FENCE: + break; + default: + abort(); + } +} + +static inline void _cmm_compat_c11_smp_mb__after_uatomic_and_mo(enum cmm_memorder mo) +{ + /* NOP. uatomic_and has explicit lock prefix. */ + switch (mo) { + case CMM_RELAXED: /* Fall-through */ + case CMM_ACQUIRE: /* Fall-through */ + case CMM_CONSUME: /* Fall-through */ + case CMM_RELEASE: /* Fall-through */ + case CMM_ACQ_REL: /* Fall-through */ + case CMM_SEQ_CST: /* Fall-through */ + case CMM_SEQ_CST_FENCE: + break; + default: + abort(); + } +} + +static inline void _cmm_compat_c11_smp_mb__before_uatomic_or_mo(enum cmm_memorder mo) +{ + /* NOP. uatomic_or has explicit lock prefix. */ + switch (mo) { + case CMM_RELAXED: /* Fall-through */ + case CMM_ACQUIRE: /* Fall-through */ + case CMM_CONSUME: /* Fall-through */ + case CMM_RELEASE: /* Fall-through */ + case CMM_ACQ_REL: /* Fall-through */ + case CMM_SEQ_CST: /* Fall-through */ + case CMM_SEQ_CST_FENCE: + break; + default: + abort(); + } +} + +static inline void _cmm_compat_c11_smp_mb__after_uatomic_or_mo(enum cmm_memorder mo) +{ + /* NOP. uatomic_or has explicit lock prefix. */ + switch (mo) { + case CMM_RELAXED: /* Fall-through */ + case CMM_ACQUIRE: /* Fall-through */ + case CMM_CONSUME: /* Fall-through */ + case CMM_RELEASE: /* Fall-through */ + case CMM_ACQ_REL: /* Fall-through */ + case CMM_SEQ_CST: /* Fall-through */ + case CMM_SEQ_CST_FENCE: + break; + default: + abort(); + } +} + +static inline void _cmm_compat_c11_smp_mb__before_uatomic_add_mo(enum cmm_memorder mo) +{ + /* NOP. uatomic_add has explicit lock prefix. */ + switch (mo) { + case CMM_RELAXED: /* Fall-through */ + case CMM_ACQUIRE: /* Fall-through */ + case CMM_CONSUME: /* Fall-through */ + case CMM_RELEASE: /* Fall-through */ + case CMM_ACQ_REL: /* Fall-through */ + case CMM_SEQ_CST: /* Fall-through */ + case CMM_SEQ_CST_FENCE: + break; + default: + abort(); + } +} + +static inline void _cmm_compat_c11_smp_mb__after_uatomic_add_mo(enum cmm_memorder mo) +{ + /* NOP. uatomic_add has explicit lock prefix. */ + switch (mo) { + case CMM_RELAXED: /* Fall-through */ + case CMM_ACQUIRE: /* Fall-through */ + case CMM_CONSUME: /* Fall-through */ + case CMM_RELEASE: /* Fall-through */ + case CMM_ACQ_REL: /* Fall-through */ + case CMM_SEQ_CST: /* Fall-through */ + case CMM_SEQ_CST_FENCE: + break; + default: + abort(); + } +} + +static inline void _cmm_compat_c11_smp_mb__before_uatomic_sub_mo(enum cmm_memorder mo) +{ + /* NOP. uatomic_sub has explicit lock prefix. */ + switch (mo) { + case CMM_RELAXED: /* Fall-through */ + case CMM_ACQUIRE: /* Fall-through */ + case CMM_CONSUME: /* Fall-through */ + case CMM_RELEASE: /* Fall-through */ + case CMM_ACQ_REL: /* Fall-through */ + case CMM_SEQ_CST: /* Fall-through */ + case CMM_SEQ_CST_FENCE: + break; + default: + abort(); + } +} + +static inline void _cmm_compat_c11_smp_mb__after_uatomic_sub_mo(enum cmm_memorder mo) +{ + /* NOP. uatomic_sub has explicit lock prefix. */ + switch (mo) { + case CMM_RELAXED: /* Fall-through */ + case CMM_ACQUIRE: /* Fall-through */ + case CMM_CONSUME: /* Fall-through */ + case CMM_RELEASE: /* Fall-through */ + case CMM_ACQ_REL: /* Fall-through */ + case CMM_SEQ_CST: /* Fall-through */ + case CMM_SEQ_CST_FENCE: + break; + default: + abort(); + } +} + +static inline void _cmm_compat_c11_smp_mb__before_uatomic_inc_mo(enum cmm_memorder mo) +{ + /* NOP. uatomic_inc has explicit lock prefix. */ + switch (mo) { + case CMM_RELAXED: /* Fall-through */ + case CMM_ACQUIRE: /* Fall-through */ + case CMM_CONSUME: /* Fall-through */ + case CMM_RELEASE: /* Fall-through */ + case CMM_ACQ_REL: /* Fall-through */ + case CMM_SEQ_CST: /* Fall-through */ + case CMM_SEQ_CST_FENCE: + break; + default: + abort(); + } +} + +static inline void _cmm_compat_c11_smp_mb__after_uatomic_inc_mo(enum cmm_memorder mo) +{ + /* NOP. uatomic_inc has explicit lock prefix. */ + switch (mo) { + case CMM_RELAXED: /* Fall-through */ + case CMM_ACQUIRE: /* Fall-through */ + case CMM_CONSUME: /* Fall-through */ + case CMM_RELEASE: /* Fall-through */ + case CMM_ACQ_REL: /* Fall-through */ + case CMM_SEQ_CST: /* Fall-through */ + case CMM_SEQ_CST_FENCE: + break; + default: + abort(); + } +} + +static inline void _cmm_compat_c11_smp_mb__before_uatomic_dec_mo(enum cmm_memorder mo) +{ + /* NOP. uatomic_dec has explicit lock prefix. */ + switch (mo) { + case CMM_RELAXED: /* Fall-through */ + case CMM_ACQUIRE: /* Fall-through */ + case CMM_CONSUME: /* Fall-through */ + case CMM_RELEASE: /* Fall-through */ + case CMM_ACQ_REL: /* Fall-through */ + case CMM_SEQ_CST: /* Fall-through */ + case CMM_SEQ_CST_FENCE: + break; + default: + abort(); + } +} + +static inline void _cmm_compat_c11_smp_mb__after_uatomic_dec_mo(enum cmm_memorder mo) +{ + /* NOP. uatomic_dec has explicit lock prefix. */ + switch (mo) { + case CMM_RELAXED: /* Fall-through */ + case CMM_ACQUIRE: /* Fall-through */ + case CMM_CONSUME: /* Fall-through */ + case CMM_RELEASE: /* Fall-through */ + case CMM_ACQ_REL: /* Fall-through */ + case CMM_SEQ_CST: /* Fall-through */ + case CMM_SEQ_CST_FENCE: + break; + default: + abort(); + } +} + +static inline void _cmm_compat_c11_smp_mb__before_uatomic_add_return_mo(enum cmm_memorder mo) +{ + /* NOP. uatomic_add_return has explicit lock prefix. */ + switch (mo) { + case CMM_RELAXED: /* Fall-through */ + case CMM_ACQUIRE: /* Fall-through */ + case CMM_CONSUME: /* Fall-through */ + case CMM_RELEASE: /* Fall-through */ + case CMM_ACQ_REL: /* Fall-through */ + case CMM_SEQ_CST: /* Fall-through */ + case CMM_SEQ_CST_FENCE: + break; + default: + abort(); + } +} + +static inline void _cmm_compat_c11_smp_mb__after_uatomic_add_return_mo(enum cmm_memorder mo) +{ + /* NOP. uatomic_add_return has explicit lock prefix. */ + switch (mo) { + case CMM_RELAXED: /* Fall-through */ + case CMM_ACQUIRE: /* Fall-through */ + case CMM_CONSUME: /* Fall-through */ + case CMM_RELEASE: /* Fall-through */ + case CMM_ACQ_REL: /* Fall-through */ + case CMM_SEQ_CST: /* Fall-through */ + case CMM_SEQ_CST_FENCE: + break; + default: + abort(); + } +} + +static inline void _cmm_compat_c11_smp_mb__before_uatomic_sub_return_mo(enum cmm_memorder mo) +{ + /* NOP. uatomic_sub_return has explicit lock prefix. */ + switch (mo) { + case CMM_RELAXED: /* Fall-through */ + case CMM_ACQUIRE: /* Fall-through */ + case CMM_CONSUME: /* Fall-through */ + case CMM_RELEASE: /* Fall-through */ + case CMM_ACQ_REL: /* Fall-through */ + case CMM_SEQ_CST: /* Fall-through */ + case CMM_SEQ_CST_FENCE: + break; + default: + abort(); + } +} + +static inline void _cmm_compat_c11_smp_mb__after_uatomic_sub_return_mo(enum cmm_memorder mo) +{ + /* NOP. uatomic_sub_return has explicit lock prefix. */ + switch (mo) { + case CMM_RELAXED: /* Fall-through */ + case CMM_ACQUIRE: /* Fall-through */ + case CMM_CONSUME: /* Fall-through */ + case CMM_RELEASE: /* Fall-through */ + case CMM_ACQ_REL: /* Fall-through */ + case CMM_SEQ_CST: /* Fall-through */ + case CMM_SEQ_CST_FENCE: + break; + default: + abort(); + } +} + +#define _cmm_compat_c11_smp_mb__before_mo(operation, mo) \ + do { \ + _cmm_compat_c11_smp_mb__before_ ## operation ## _mo (mo); \ + } while (0) + +#define _cmm_compat_c11_smp_mb__after_mo(operation, mo) \ + do { \ + _cmm_compat_c11_smp_mb__after_ ## operation ## _mo (mo); \ + } while (0) + + #ifdef __cplusplus } #endif diff --git a/include/urcu/wfcqueue.h b/include/urcu/wfcqueue.h index bba5c55..5a4add4 100644 --- a/include/urcu/wfcqueue.h +++ b/include/urcu/wfcqueue.h @@ -78,6 +78,11 @@ typedef union { struct __cds_wfcq_head *_h; struct cds_wfcq_head *h; } __attribute__((__transparent_union__)) cds_wfcq_head_ptr_t; + +typedef union { + const struct __cds_wfcq_head *_h; + const struct cds_wfcq_head *h; +} __attribute__((__transparent_union__)) cds_wfcq_head_const_ptr_t; #if defined(__clang__) #pragma clang diagnostic pop #endif @@ -100,6 +105,25 @@ static inline struct cds_wfcq_head *cds_wfcq_head_cast(struct cds_wfcq_head *hea { return head; } + +/* + * This static inline is only present for compatibility with C++. It is + * effect-less in C. + */ +static inline const struct __cds_wfcq_head *__cds_wfcq_head_const_cast(const struct __cds_wfcq_head *head) +{ + return head; +} + +/* + * This static inline is only present for compatibility with C++. It is + * effect-less in C. + */ +static inline const struct cds_wfcq_head *cds_wfcq_head_const_cast(const struct cds_wfcq_head *head) +{ + return head; +} + #else /* #ifndef __cplusplus */ /* @@ -121,6 +145,27 @@ static inline cds_wfcq_head_ptr_t cds_wfcq_head_cast(struct cds_wfcq_head *head) cds_wfcq_head_ptr_t ret = { .h = head }; return ret; } + +/* + * This static inline is used by internally in the static inline + * implementation of the API. + */ +static inline cds_wfcq_head_const_ptr_t __cds_wfcq_head_const_cast(const struct __cds_wfcq_head *head) +{ + cds_wfcq_head_const_ptr_t ret = { ._h = head }; + return ret; +} + +/* + * This static inline is used by internally in the static inline + * implementation of the API. + */ +static inline cds_wfcq_head_const_ptr_t cds_wfcq_head_const_cast(const struct cds_wfcq_head *head) +{ + cds_wfcq_head_const_ptr_t ret = { .h = head }; + return ret; +} + #endif /* #else #ifndef __cplusplus */ struct cds_wfcq_tail { @@ -238,8 +283,8 @@ extern void __cds_wfcq_init(struct __cds_wfcq_head *head, * * No memory barrier is issued. No mutual exclusion is required. */ -extern bool cds_wfcq_empty(cds_wfcq_head_ptr_t head, - struct cds_wfcq_tail *tail); +extern bool cds_wfcq_empty(cds_wfcq_head_const_ptr_t head, + const struct cds_wfcq_tail *tail); /* * cds_wfcq_dequeue_lock: take the dequeue mutual exclusion lock. @@ -500,10 +545,22 @@ static inline cds_wfcq_head_ptr_t cds_wfcq_head_cast_cpp(struct cds_wfcq_head *h return ret; } +static inline cds_wfcq_head_const_ptr_t cds_wfcq_head_const_cast_cpp(const struct __cds_wfcq_head *head) +{ + cds_wfcq_head_const_ptr_t ret = { ._h = head }; + return ret; +} + +static inline cds_wfcq_head_const_ptr_t cds_wfcq_head_const_cast_cpp(const struct cds_wfcq_head *head) +{ + cds_wfcq_head_const_ptr_t ret = { .h = head }; + return ret; +} + template static inline bool cds_wfcq_empty(T head, - struct cds_wfcq_tail *tail) + const struct cds_wfcq_tail *tail) { - return cds_wfcq_empty(cds_wfcq_head_cast_cpp(head), tail); + return cds_wfcq_empty(cds_wfcq_head_const_cast_cpp(head), tail); } template static inline bool cds_wfcq_enqueue(T head, diff --git a/include/urcu/wfstack.h b/include/urcu/wfstack.h index 38e5b6b..66d4150 100644 --- a/include/urcu/wfstack.h +++ b/include/urcu/wfstack.h @@ -96,6 +96,11 @@ typedef union { struct __cds_wfs_stack *_s; struct cds_wfs_stack *s; } __attribute__((__transparent_union__)) cds_wfs_stack_ptr_t; + +typedef union { + const struct __cds_wfs_stack *_s; + const struct cds_wfs_stack *s; +} __attribute__((__transparent_union__)) cds_wfs_stack_const_ptr_t; #if defined(__clang__) #pragma clang diagnostic pop #endif @@ -167,7 +172,7 @@ extern void __cds_wfs_init(struct __cds_wfs_stack *s); * * No memory barrier is issued. No mutual exclusion is required. */ -extern bool cds_wfs_empty(cds_wfs_stack_ptr_t u_stack); +extern bool cds_wfs_empty(cds_wfs_stack_const_ptr_t u_stack); /* * cds_wfs_push: push a node into the stack. @@ -372,9 +377,25 @@ static inline cds_wfs_stack_ptr_t cds_wfs_stack_cast(struct cds_wfs_stack *s) return ret; } +static inline cds_wfs_stack_const_ptr_t cds_wfs_stack_const_cast(const struct __cds_wfs_stack *s) +{ + cds_wfs_stack_const_ptr_t ret = { + ._s = s, + }; + return ret; +} + +static inline cds_wfs_stack_const_ptr_t cds_wfs_stack_const_cast(const struct cds_wfs_stack *s) +{ + cds_wfs_stack_const_ptr_t ret = { + .s = s, + }; + return ret; +} + template static inline bool cds_wfs_empty(T s) { - return cds_wfs_empty(cds_wfs_stack_cast(s)); + return cds_wfs_empty(cds_wfs_stack_const_cast(s)); } template static inline int cds_wfs_push(T s, struct cds_wfs_node *node) diff --git a/src/compat-smp.h b/src/compat-smp.h index 31fa979..5da8d6a 100644 --- a/src/compat-smp.h +++ b/src/compat-smp.h @@ -1,5 +1,5 @@ /* - * SPDX-License-Identifier: LGPL-2.1-only + * SPDX-License-Identifier: MIT * * Copyright (C) 2011-2012 Mathieu Desnoyers * Copyright (C) 2019 Michael Jeanson @@ -164,7 +164,7 @@ static inline int get_cpu_mask_from_sysfs(char *buf, size_t max_bytes, const cha total_bytes_read += bytes_read; assert(total_bytes_read <= max_bytes); - } while (max_bytes > total_bytes_read && bytes_read > 0); + } while (max_bytes > total_bytes_read && bytes_read != 0); /* * Make sure the mask read is a null terminated string. diff --git a/src/lfstack.c b/src/lfstack.c index 5a2c80f..ca3de85 100644 --- a/src/lfstack.c +++ b/src/lfstack.c @@ -36,7 +36,7 @@ void __cds_lfs_init(struct __cds_lfs_stack *s) ___cds_lfs_init(s); } -bool cds_lfs_empty(cds_lfs_stack_ptr_t s) +bool cds_lfs_empty(cds_lfs_stack_const_ptr_t s) { return _cds_lfs_empty(s); } diff --git a/src/wfcqueue.c b/src/wfcqueue.c index ff05510..294b266 100644 --- a/src/wfcqueue.c +++ b/src/wfcqueue.c @@ -38,8 +38,8 @@ void __cds_wfcq_init(struct __cds_wfcq_head *head, ___cds_wfcq_init(head, tail); } -bool cds_wfcq_empty(cds_wfcq_head_ptr_t head, - struct cds_wfcq_tail *tail) +bool cds_wfcq_empty(cds_wfcq_head_const_ptr_t head, + const struct cds_wfcq_tail *tail) { return _cds_wfcq_empty(head, tail); diff --git a/src/wfstack.c b/src/wfstack.c index 8fddaec..6308a94 100644 --- a/src/wfstack.c +++ b/src/wfstack.c @@ -34,7 +34,7 @@ void __cds_wfs_init(struct __cds_wfs_stack *s) ___cds_wfs_init(s); } -bool cds_wfs_empty(cds_wfs_stack_ptr_t u_stack) +bool cds_wfs_empty(cds_wfs_stack_const_ptr_t u_stack) { return _cds_wfs_empty(u_stack); }