X-Git-Url: https://git.liburcu.org/?p=urcu.git;a=blobdiff_plain;f=urcu%2Fstatic%2Furcu.h;h=3fb457b4e3e00916b2a2e06e172b4eced08f0df0;hp=bee97eeb982f2979dae60a1d1ad95b96b9385393;hb=6ef5ddc9b4e136e250f3c5f292f847e6849e6200;hpb=4de0cd31491bcb93a19c14fc1eb2a2a23ce12855 diff --git a/urcu/static/urcu.h b/urcu/static/urcu.h index bee97ee..3fb457b 100644 --- a/urcu/static/urcu.h +++ b/urcu/static/urcu.h @@ -41,6 +41,8 @@ #include #include #include +#include +#include #ifdef __cplusplus extern "C" { @@ -51,30 +53,6 @@ extern "C" { #define RCU_MEMBARRIER #endif -/* - * RCU_MEMBARRIER is only possibly available on Linux. Fallback to RCU_MB - * otherwise. - */ -#if !defined(__linux__) && defined(RCU_MEMBARRIER) -#undef RCU_MEMBARRIER -#define RCU_MB -#endif - -#ifdef RCU_MEMBARRIER -#include - -/* If the headers do not support SYS_membarrier, statically use RCU_MB */ -#ifdef SYS_membarrier -# define MEMBARRIER_EXPEDITED (1 << 0) -# define MEMBARRIER_DELAYED (1 << 1) -# define MEMBARRIER_QUERY (1 << 16) -# define membarrier(...) syscall(SYS_membarrier, __VA_ARGS__) -#else -# undef RCU_MEMBARRIER -# define RCU_MB -#endif -#endif - /* * This code section can only be included in LGPL 2.1 compatible source code. * See below for the function call wrappers which can be used in code meant to @@ -102,67 +80,6 @@ enum rcu_state { RCU_READER_INACTIVE, }; -#ifdef DEBUG_RCU -#define rcu_assert(args...) assert(args) -#else -#define rcu_assert(args...) -#endif - -#ifdef DEBUG_YIELD -#include -#include -#include -#include - -#define RCU_YIELD_READ (1 << 0) -#define RCU_YIELD_WRITE (1 << 1) - -/* - * Updates with RCU_SIGNAL are much slower. Account this in the delay. - */ -#ifdef RCU_SIGNAL -/* maximum sleep delay, in us */ -#define MAX_SLEEP 30000 -#else -#define MAX_SLEEP 50 -#endif - -extern unsigned int rcu_yield_active; -extern DECLARE_URCU_TLS(unsigned int, rcu_rand_yield); - -static inline void rcu_debug_yield_read(void) -{ - if (rcu_yield_active & RCU_YIELD_READ) - if (rand_r(&URCU_TLS(rcu_rand_yield)) & 0x1) - usleep(rand_r(&URCU_TLS(rcu_rand_yield)) % MAX_SLEEP); -} - -static inline void rcu_debug_yield_write(void) -{ - if (rcu_yield_active & RCU_YIELD_WRITE) - if (rand_r(&URCU_TLS(rcu_rand_yield)) & 0x1) - usleep(rand_r(&URCU_TLS(rcu_rand_yield)) % MAX_SLEEP); -} - -static inline void rcu_debug_yield_init(void) -{ - URCU_TLS(rcu_rand_yield) = time(NULL) ^ (unsigned long) pthread_self(); -} -#else -static inline void rcu_debug_yield_read(void) -{ -} - -static inline void rcu_debug_yield_write(void) -{ -} - -static inline void rcu_debug_yield_init(void) -{ - -} -#endif - /* * RCU memory barrier broadcast group. Currently, only broadcast to all process * threads is supported (group 0). @@ -235,6 +152,8 @@ struct rcu_reader { /* Data used for registry */ struct cds_list_head node __attribute__((aligned(CAA_CACHE_LINE_SIZE))); pthread_t tid; + /* Reader registered flag, for internal checks. */ + unsigned int registered:1; }; extern DECLARE_URCU_TLS(struct rcu_reader, rcu_reader); @@ -246,8 +165,13 @@ static inline void wake_up_gp(void) { if (caa_unlikely(uatomic_read(&rcu_gp.futex) == -1)) { uatomic_set(&rcu_gp.futex, 0); - futex_async(&rcu_gp.futex, FUTEX_WAKE, 1, - NULL, NULL, 0); + /* + * Ignoring return value until we can make this function + * return something (because urcu_die() is not publicly + * exposed). + */ + (void) futex_async(&rcu_gp.futex, FUTEX_WAKE, 1, + NULL, NULL, 0); } } @@ -297,8 +221,10 @@ static inline void _rcu_read_lock(void) { unsigned long tmp; + urcu_assert(URCU_TLS(rcu_reader).registered); cmm_barrier(); tmp = URCU_TLS(rcu_reader).ctr; + urcu_assert((tmp & RCU_GP_CTR_NEST_MASK) != RCU_GP_CTR_NEST_MASK); _rcu_read_lock_update(tmp); } @@ -314,11 +240,11 @@ static inline void _rcu_read_unlock_update_and_wakeup(unsigned long tmp) { if (caa_likely((tmp & RCU_GP_CTR_NEST_MASK) == RCU_GP_COUNT)) { smp_mb_slave(RCU_MB_GROUP); - _CMM_STORE_SHARED(URCU_TLS(rcu_reader).ctr, URCU_TLS(rcu_reader).ctr - RCU_GP_COUNT); + _CMM_STORE_SHARED(URCU_TLS(rcu_reader).ctr, tmp - RCU_GP_COUNT); smp_mb_slave(RCU_MB_GROUP); wake_up_gp(); } else - _CMM_STORE_SHARED(URCU_TLS(rcu_reader).ctr, URCU_TLS(rcu_reader).ctr - RCU_GP_COUNT); + _CMM_STORE_SHARED(URCU_TLS(rcu_reader).ctr, tmp - RCU_GP_COUNT); } /* @@ -330,7 +256,9 @@ static inline void _rcu_read_unlock(void) { unsigned long tmp; + urcu_assert(URCU_TLS(rcu_reader).registered); tmp = URCU_TLS(rcu_reader).ctr; + urcu_assert(tmp & RCU_GP_CTR_NEST_MASK); _rcu_read_unlock_update_and_wakeup(tmp); cmm_barrier(); /* Ensure the compiler does not reorder us with mutex */ }