X-Git-Url: https://git.liburcu.org/?p=urcu.git;a=blobdiff_plain;f=urcu%2Fstatic%2Furcu-qsbr.h;h=8d5fd03107373b5cee0d51273624da5c05552bdb;hp=ea5e13adebd5fae0374a5ad81693dd6cf46436c2;hb=67ecffc0f530a7b5c4dd5111ea7dd3213da8eb91;hpb=882f335739b978d1c55be2faeed077f315afe5d7 diff --git a/urcu/static/urcu-qsbr.h b/urcu/static/urcu-qsbr.h index ea5e13a..8d5fd03 100644 --- a/urcu/static/urcu-qsbr.h +++ b/urcu/static/urcu-qsbr.h @@ -31,7 +31,6 @@ #include #include -#include #include #include #include @@ -43,10 +42,11 @@ #include #include #include +#include #ifdef __cplusplus extern "C" { -#endif +#endif /* * This code section can only be included in LGPL 2.1 compatible source code. @@ -56,75 +56,27 @@ extern "C" { * This is required to permit relinking with newer versions of the library. */ -#ifdef DEBUG_RCU -#define rcu_assert(args...) assert(args) -#else -#define rcu_assert(args...) -#endif - enum rcu_state { RCU_READER_ACTIVE_CURRENT, RCU_READER_ACTIVE_OLD, RCU_READER_INACTIVE, }; -#ifdef DEBUG_YIELD -#include -#include -#include -#include - -#define RCU_YIELD_READ (1 << 0) -#define RCU_YIELD_WRITE (1 << 1) - -/* maximum sleep delay, in us */ -#define MAX_SLEEP 50 - -extern unsigned int rcu_yield_active; -extern DECLARE_URCU_TLS(unsigned int, rcu_rand_yield); - -static inline void rcu_debug_yield_read(void) -{ - if (rcu_yield_active & RCU_YIELD_READ) - if (rand_r(&URCU_TLS(rcu_rand_yield)) & 0x1) - usleep(rand_r(&URCU_TLS(rcu_rand_yield)) % MAX_SLEEP); -} - -static inline void rcu_debug_yield_write(void) -{ - if (rcu_yield_active & RCU_YIELD_WRITE) - if (rand_r(&URCU_TLS(rcu_rand_yield)) & 0x1) - usleep(rand_r(&URCU_TLS(rcu_rand_yield)) % MAX_SLEEP); -} - -static inline void rcu_debug_yield_init(void) -{ - URCU_TLS(rcu_rand_yield) = time(NULL) ^ (unsigned long) pthread_self(); -} -#else -static inline void rcu_debug_yield_read(void) -{ -} - -static inline void rcu_debug_yield_write(void) -{ -} - -static inline void rcu_debug_yield_init(void) -{ - -} -#endif - #define RCU_GP_ONLINE (1UL << 0) #define RCU_GP_CTR (1UL << 1) -/* - * Global quiescent period counter with low-order bits unused. - * Using a int rather than a char to eliminate false register dependencies - * causing stalls on some architectures. - */ -extern unsigned long rcu_gp_ctr; +struct rcu_gp { + /* + * Global quiescent period counter with low-order bits unused. + * Using a int rather than a char to eliminate false register + * dependencies causing stalls on some architectures. + */ + unsigned long ctr; + + int32_t futex; +} __attribute__((aligned(CAA_CACHE_LINE_SIZE))); + +extern struct rcu_gp rcu_gp; struct rcu_reader { /* Data used by both reader and synchronize_rcu() */ @@ -133,12 +85,12 @@ struct rcu_reader { struct cds_list_head node __attribute__((aligned(CAA_CACHE_LINE_SIZE))); int waiting; pthread_t tid; + /* Reader registered flag, for internal checks. */ + unsigned int registered:1; }; extern DECLARE_URCU_TLS(struct rcu_reader, rcu_reader); -extern int32_t rcu_gp_futex; - /* * Wake-up waiting synchronize_rcu(). Called from many concurrent threads. */ @@ -147,11 +99,16 @@ static inline void wake_up_gp(void) if (caa_unlikely(_CMM_LOAD_SHARED(URCU_TLS(rcu_reader).waiting))) { _CMM_STORE_SHARED(URCU_TLS(rcu_reader).waiting, 0); cmm_smp_mb(); - if (uatomic_read(&rcu_gp_futex) != -1) + if (uatomic_read(&rcu_gp.futex) != -1) return; - uatomic_set(&rcu_gp_futex, 0); - futex_noasync(&rcu_gp_futex, FUTEX_WAKE, 1, - NULL, NULL, 0); + uatomic_set(&rcu_gp.futex, 0); + /* + * Ignoring return value until we can make this function + * return something (because urcu_die() is not publicly + * exposed). + */ + (void) futex_noasync(&rcu_gp.futex, FUTEX_WAKE, 1, + NULL, NULL, 0); } } @@ -162,7 +119,7 @@ static inline enum rcu_state rcu_reader_state(unsigned long *ctr) v = CMM_LOAD_SHARED(*ctr); if (!v) return RCU_READER_INACTIVE; - if (v == rcu_gp_ctr) + if (v == rcu_gp.ctr) return RCU_READER_ACTIVE_CURRENT; return RCU_READER_ACTIVE_OLD; } @@ -176,7 +133,7 @@ static inline enum rcu_state rcu_reader_state(unsigned long *ctr) */ static inline void _rcu_read_lock(void) { - rcu_assert(URCU_TLS(rcu_reader).ctr); + urcu_assert(URCU_TLS(rcu_reader).ctr); } /* @@ -188,6 +145,7 @@ static inline void _rcu_read_lock(void) */ static inline void _rcu_read_unlock(void) { + urcu_assert(URCU_TLS(rcu_reader).ctr); } /* @@ -228,7 +186,7 @@ static inline void _rcu_quiescent_state_update_and_wakeup(unsigned long gp_ctr) * to be invoked directly from non-LGPL code. * * We skip the memory barriers and gp store if our local ctr already - * matches the global rcu_gp_ctr value: this is OK because a prior + * matches the global rcu_gp.ctr value: this is OK because a prior * _rcu_quiescent_state() or _rcu_thread_online() already updated it * within our thread, so we have no quiescent state to report. */ @@ -236,7 +194,8 @@ static inline void _rcu_quiescent_state(void) { unsigned long gp_ctr; - if ((gp_ctr = CMM_LOAD_SHARED(rcu_gp_ctr)) == URCU_TLS(rcu_reader).ctr) + urcu_assert(URCU_TLS(rcu_reader).registered); + if ((gp_ctr = CMM_LOAD_SHARED(rcu_gp.ctr)) == URCU_TLS(rcu_reader).ctr) return; _rcu_quiescent_state_update_and_wakeup(gp_ctr); } @@ -251,6 +210,7 @@ static inline void _rcu_quiescent_state(void) */ static inline void _rcu_thread_offline(void) { + urcu_assert(URCU_TLS(rcu_reader).registered); cmm_smp_mb(); CMM_STORE_SHARED(URCU_TLS(rcu_reader).ctr, 0); cmm_smp_mb(); /* write URCU_TLS(rcu_reader).ctr before read futex */ @@ -268,12 +228,13 @@ static inline void _rcu_thread_offline(void) */ static inline void _rcu_thread_online(void) { + urcu_assert(URCU_TLS(rcu_reader).registered); cmm_barrier(); /* Ensure the compiler does not reorder us with mutex */ - _CMM_STORE_SHARED(URCU_TLS(rcu_reader).ctr, CMM_LOAD_SHARED(rcu_gp_ctr)); + _CMM_STORE_SHARED(URCU_TLS(rcu_reader).ctr, CMM_LOAD_SHARED(rcu_gp.ctr)); cmm_smp_mb(); } -#ifdef __cplusplus +#ifdef __cplusplus } #endif