X-Git-Url: http://git.liburcu.org/?p=urcu.git;a=blobdiff_plain;f=urcu%2Fstatic%2Furcu-bp.h;h=b6d5f132778380c57af501307cf25e017c92c8ec;hp=2f365206afad6db4c561654109def617ef559ce5;hb=2650042a5405028da49bfcfb71053b78a56a9ed4;hpb=882f335739b978d1c55be2faeed077f315afe5d7 diff --git a/urcu/static/urcu-bp.h b/urcu/static/urcu-bp.h index 2f36520..b6d5f13 100644 --- a/urcu/static/urcu-bp.h +++ b/urcu/static/urcu-bp.h @@ -64,58 +64,6 @@ enum rcu_state { RCU_READER_INACTIVE, }; -#ifdef DEBUG_YIELD -#include -#include -#include -#include - -#define RCU_YIELD_READ (1 << 0) -#define RCU_YIELD_WRITE (1 << 1) - -/* - * Updates without RCU_MB are much slower. Account this in - * the delay. - */ -/* maximum sleep delay, in us */ -#define MAX_SLEEP 50 - -extern unsigned int rcu_yield_active; -extern DECLARE_URCU_TLS(unsigned int, rcu_rand_yield); - -static inline void rcu_debug_yield_read(void) -{ - if (rcu_yield_active & RCU_YIELD_READ) - if (rand_r(&URCU_TLS(rcu_rand_yield)) & 0x1) - usleep(rand_r(&URCU_TLS(rcu_rand_yield)) % MAX_SLEEP); -} - -static inline void rcu_debug_yield_write(void) -{ - if (rcu_yield_active & RCU_YIELD_WRITE) - if (rand_r(&URCU_TLS(rcu_rand_yield)) & 0x1) - usleep(rand_r(&URCU_TLS(rcu_rand_yield)) % MAX_SLEEP); -} - -static inline void rcu_debug_yield_init(void) -{ - URCU_TLS(rcu_rand_yield) = time(NULL) ^ (unsigned long) pthread_self(); -} -#else -static inline void rcu_debug_yield_read(void) -{ -} - -static inline void rcu_debug_yield_write(void) -{ -} - -static inline void rcu_debug_yield_init(void) -{ - -} -#endif - /* * The trick here is that RCU_GP_CTR_PHASE must be a multiple of 8 so we can use a * full 8-bits, 16-bits or 32-bits bitmask for the lower order bits. @@ -130,12 +78,18 @@ static inline void rcu_debug_yield_init(void) */ extern void rcu_bp_register(void); -/* - * Global quiescent period counter with low-order bits unused. - * Using a int rather than a char to eliminate false register dependencies - * causing stalls on some architectures. - */ -extern unsigned long rcu_gp_ctr; +struct rcu_gp { + /* + * Global grace period counter. + * Contains the current RCU_GP_CTR_PHASE. + * Also has a RCU_GP_COUNT of 1, to accelerate the reader fast path. + * Written to only by writer with mutex taken. + * Read by both writer and readers. + */ + unsigned long ctr; +} __attribute__((aligned(CAA_CACHE_LINE_SIZE))); + +extern struct rcu_gp rcu_gp; struct rcu_reader { /* Data used by both reader and synchronize_rcu() */ @@ -166,13 +120,13 @@ static inline enum rcu_state rcu_reader_state(unsigned long *ctr) v = CMM_LOAD_SHARED(*ctr); if (!(v & RCU_GP_CTR_NEST_MASK)) return RCU_READER_INACTIVE; - if (!((v ^ rcu_gp_ctr) & RCU_GP_CTR_PHASE)) + if (!((v ^ rcu_gp.ctr) & RCU_GP_CTR_PHASE)) return RCU_READER_ACTIVE_CURRENT; return RCU_READER_ACTIVE_OLD; } /* - * Helper for _rcu_read_lock(). The format of rcu_gp_ctr (as well as + * Helper for _rcu_read_lock(). The format of rcu_gp.ctr (as well as * the per-thread rcu_reader.ctr) has the upper bits containing a count of * _rcu_read_lock() nesting, and a lower-order bit that contains either zero * or RCU_GP_CTR_PHASE. The smp_mb_slave() ensures that the accesses in @@ -181,7 +135,7 @@ static inline enum rcu_state rcu_reader_state(unsigned long *ctr) static inline void _rcu_read_lock_update(unsigned long tmp) { if (caa_likely(!(tmp & RCU_GP_CTR_NEST_MASK))) { - _CMM_STORE_SHARED(URCU_TLS(rcu_reader)->ctr, _CMM_LOAD_SHARED(rcu_gp_ctr)); + _CMM_STORE_SHARED(URCU_TLS(rcu_reader)->ctr, _CMM_LOAD_SHARED(rcu_gp.ctr)); cmm_smp_mb(); } else _CMM_STORE_SHARED(URCU_TLS(rcu_reader)->ctr, tmp + RCU_GP_COUNT);