X-Git-Url: https://git.liburcu.org/?p=urcu.git;a=blobdiff_plain;f=urcu%2Fstatic%2Furcu.h;h=fbba46cde9afdc6bff0c4b337a22afc78d40fea7;hp=33545b71f68c1be9dc25ef78a59a9a052d16aca5;hb=a77f7d8228bcf1259f8c4121ce02d4763424d45a;hpb=e1c0b55cbcc602bdff8b9778d911c35515f48cc3 diff --git a/urcu/static/urcu.h b/urcu/static/urcu.h index 33545b7..fbba46c 100644 --- a/urcu/static/urcu.h +++ b/urcu/static/urcu.h @@ -42,6 +42,7 @@ #include #include #include +#include #ifdef __cplusplus extern "C" { @@ -79,67 +80,6 @@ enum rcu_state { RCU_READER_INACTIVE, }; -#ifdef DEBUG_RCU -#define rcu_assert(args...) assert(args) -#else -#define rcu_assert(args...) -#endif - -#ifdef DEBUG_YIELD -#include -#include -#include -#include - -#define RCU_YIELD_READ (1 << 0) -#define RCU_YIELD_WRITE (1 << 1) - -/* - * Updates with RCU_SIGNAL are much slower. Account this in the delay. - */ -#ifdef RCU_SIGNAL -/* maximum sleep delay, in us */ -#define MAX_SLEEP 30000 -#else -#define MAX_SLEEP 50 -#endif - -extern unsigned int rcu_yield_active; -extern DECLARE_URCU_TLS(unsigned int, rcu_rand_yield); - -static inline void rcu_debug_yield_read(void) -{ - if (rcu_yield_active & RCU_YIELD_READ) - if (rand_r(&URCU_TLS(rcu_rand_yield)) & 0x1) - usleep(rand_r(&URCU_TLS(rcu_rand_yield)) % MAX_SLEEP); -} - -static inline void rcu_debug_yield_write(void) -{ - if (rcu_yield_active & RCU_YIELD_WRITE) - if (rand_r(&URCU_TLS(rcu_rand_yield)) & 0x1) - usleep(rand_r(&URCU_TLS(rcu_rand_yield)) % MAX_SLEEP); -} - -static inline void rcu_debug_yield_init(void) -{ - URCU_TLS(rcu_rand_yield) = time(NULL) ^ (unsigned long) pthread_self(); -} -#else -static inline void rcu_debug_yield_read(void) -{ -} - -static inline void rcu_debug_yield_write(void) -{ -} - -static inline void rcu_debug_yield_init(void) -{ - -} -#endif - /* * RCU memory barrier broadcast group. Currently, only broadcast to all process * threads is supported (group 0). @@ -212,6 +152,8 @@ struct rcu_reader { /* Data used for registry */ struct cds_list_head node __attribute__((aligned(CAA_CACHE_LINE_SIZE))); pthread_t tid; + /* Reader registered flag, for internal checks. */ + unsigned int registered:1; }; extern DECLARE_URCU_TLS(struct rcu_reader, rcu_reader); @@ -223,8 +165,13 @@ static inline void wake_up_gp(void) { if (caa_unlikely(uatomic_read(&rcu_gp.futex) == -1)) { uatomic_set(&rcu_gp.futex, 0); - futex_async(&rcu_gp.futex, FUTEX_WAKE, 1, - NULL, NULL, 0); + /* + * Ignoring return value until we can make this function + * return something (because urcu_die() is not publicly + * exposed). + */ + (void) futex_async(&rcu_gp.futex, FUTEX_WAKE, 1, + NULL, NULL, 0); } } @@ -274,6 +221,7 @@ static inline void _rcu_read_lock(void) { unsigned long tmp; + urcu_assert(URCU_TLS(rcu_reader).registered); cmm_barrier(); tmp = URCU_TLS(rcu_reader).ctr; _rcu_read_lock_update(tmp); @@ -307,6 +255,7 @@ static inline void _rcu_read_unlock(void) { unsigned long tmp; + urcu_assert(URCU_TLS(rcu_reader).registered); tmp = URCU_TLS(rcu_reader).ctr; _rcu_read_unlock_update_and_wakeup(tmp); cmm_barrier(); /* Ensure the compiler does not reorder us with mutex */