X-Git-Url: https://git.liburcu.org/?p=urcu.git;a=blobdiff_plain;f=urcu%2Fstatic%2Furcu.h;h=9082af71a92c443dd0efe8be74d879c55dd56ddd;hp=33545b71f68c1be9dc25ef78a59a9a052d16aca5;hb=67ecffc0f530a7b5c4dd5111ea7dd3213da8eb91;hpb=e1c0b55cbcc602bdff8b9778d911c35515f48cc3 diff --git a/urcu/static/urcu.h b/urcu/static/urcu.h index 33545b7..9082af7 100644 --- a/urcu/static/urcu.h +++ b/urcu/static/urcu.h @@ -42,10 +42,11 @@ #include #include #include +#include #ifdef __cplusplus extern "C" { -#endif +#endif /* Default is RCU_MEMBARRIER */ #if !defined(RCU_MEMBARRIER) && !defined(RCU_MB) && !defined(RCU_SIGNAL) @@ -79,71 +80,7 @@ enum rcu_state { RCU_READER_INACTIVE, }; -#ifdef DEBUG_RCU -#define rcu_assert(args...) assert(args) -#else -#define rcu_assert(args...) -#endif - -#ifdef DEBUG_YIELD -#include -#include -#include -#include - -#define RCU_YIELD_READ (1 << 0) -#define RCU_YIELD_WRITE (1 << 1) - -/* - * Updates with RCU_SIGNAL are much slower. Account this in the delay. - */ -#ifdef RCU_SIGNAL -/* maximum sleep delay, in us */ -#define MAX_SLEEP 30000 -#else -#define MAX_SLEEP 50 -#endif - -extern unsigned int rcu_yield_active; -extern DECLARE_URCU_TLS(unsigned int, rcu_rand_yield); - -static inline void rcu_debug_yield_read(void) -{ - if (rcu_yield_active & RCU_YIELD_READ) - if (rand_r(&URCU_TLS(rcu_rand_yield)) & 0x1) - usleep(rand_r(&URCU_TLS(rcu_rand_yield)) % MAX_SLEEP); -} - -static inline void rcu_debug_yield_write(void) -{ - if (rcu_yield_active & RCU_YIELD_WRITE) - if (rand_r(&URCU_TLS(rcu_rand_yield)) & 0x1) - usleep(rand_r(&URCU_TLS(rcu_rand_yield)) % MAX_SLEEP); -} - -static inline void rcu_debug_yield_init(void) -{ - URCU_TLS(rcu_rand_yield) = time(NULL) ^ (unsigned long) pthread_self(); -} -#else -static inline void rcu_debug_yield_read(void) -{ -} - -static inline void rcu_debug_yield_write(void) -{ -} - -static inline void rcu_debug_yield_init(void) -{ - -} -#endif - /* - * RCU memory barrier broadcast group. Currently, only broadcast to all process - * threads is supported (group 0). - * * Slave barriers are only guaranteed to be ordered wrt master barriers. * * The pair ordering is detailed as (O: ordered, X: not ordered) : @@ -152,13 +89,10 @@ static inline void rcu_debug_yield_init(void) * master O O */ -#define MB_GROUP_ALL 0 -#define RCU_MB_GROUP MB_GROUP_ALL - #ifdef RCU_MEMBARRIER extern int rcu_has_sys_membarrier; -static inline void smp_mb_slave(int group) +static inline void smp_mb_slave(void) { if (caa_likely(rcu_has_sys_membarrier)) cmm_barrier(); @@ -168,14 +102,14 @@ static inline void smp_mb_slave(int group) #endif #ifdef RCU_MB -static inline void smp_mb_slave(int group) +static inline void smp_mb_slave(void) { cmm_smp_mb(); } #endif #ifdef RCU_SIGNAL -static inline void smp_mb_slave(int group) +static inline void smp_mb_slave(void) { cmm_barrier(); } @@ -212,6 +146,8 @@ struct rcu_reader { /* Data used for registry */ struct cds_list_head node __attribute__((aligned(CAA_CACHE_LINE_SIZE))); pthread_t tid; + /* Reader registered flag, for internal checks. */ + unsigned int registered:1; }; extern DECLARE_URCU_TLS(struct rcu_reader, rcu_reader); @@ -223,8 +159,13 @@ static inline void wake_up_gp(void) { if (caa_unlikely(uatomic_read(&rcu_gp.futex) == -1)) { uatomic_set(&rcu_gp.futex, 0); - futex_async(&rcu_gp.futex, FUTEX_WAKE, 1, - NULL, NULL, 0); + /* + * Ignoring return value until we can make this function + * return something (because urcu_die() is not publicly + * exposed). + */ + (void) futex_async(&rcu_gp.futex, FUTEX_WAKE, 1, + NULL, NULL, 0); } } @@ -255,7 +196,7 @@ static inline void _rcu_read_lock_update(unsigned long tmp) { if (caa_likely(!(tmp & RCU_GP_CTR_NEST_MASK))) { _CMM_STORE_SHARED(URCU_TLS(rcu_reader).ctr, _CMM_LOAD_SHARED(rcu_gp.ctr)); - smp_mb_slave(RCU_MB_GROUP); + smp_mb_slave(); } else _CMM_STORE_SHARED(URCU_TLS(rcu_reader).ctr, tmp + RCU_GP_COUNT); } @@ -274,8 +215,10 @@ static inline void _rcu_read_lock(void) { unsigned long tmp; + urcu_assert(URCU_TLS(rcu_reader).registered); cmm_barrier(); tmp = URCU_TLS(rcu_reader).ctr; + urcu_assert((tmp & RCU_GP_CTR_NEST_MASK) != RCU_GP_CTR_NEST_MASK); _rcu_read_lock_update(tmp); } @@ -290,12 +233,12 @@ static inline void _rcu_read_lock(void) static inline void _rcu_read_unlock_update_and_wakeup(unsigned long tmp) { if (caa_likely((tmp & RCU_GP_CTR_NEST_MASK) == RCU_GP_COUNT)) { - smp_mb_slave(RCU_MB_GROUP); - _CMM_STORE_SHARED(URCU_TLS(rcu_reader).ctr, URCU_TLS(rcu_reader).ctr - RCU_GP_COUNT); - smp_mb_slave(RCU_MB_GROUP); + smp_mb_slave(); + _CMM_STORE_SHARED(URCU_TLS(rcu_reader).ctr, tmp - RCU_GP_COUNT); + smp_mb_slave(); wake_up_gp(); } else - _CMM_STORE_SHARED(URCU_TLS(rcu_reader).ctr, URCU_TLS(rcu_reader).ctr - RCU_GP_COUNT); + _CMM_STORE_SHARED(URCU_TLS(rcu_reader).ctr, tmp - RCU_GP_COUNT); } /* @@ -307,7 +250,9 @@ static inline void _rcu_read_unlock(void) { unsigned long tmp; + urcu_assert(URCU_TLS(rcu_reader).registered); tmp = URCU_TLS(rcu_reader).ctr; + urcu_assert(tmp & RCU_GP_CTR_NEST_MASK); _rcu_read_unlock_update_and_wakeup(tmp); cmm_barrier(); /* Ensure the compiler does not reorder us with mutex */ }