X-Git-Url: http://git.liburcu.org/?p=urcu.git;a=blobdiff_plain;f=urcu%2Fstatic%2Furcu.h;h=6c8b92602234b97d27a4cb462ec0ec06c56a62c7;hp=c517693ff1cf187982882bf30f412e454ae8d26f;hb=15302e2854ad9f1377ec81e331c1bec7a54a5621;hpb=a5a9f428a238e790d6c97299bc214b5cca815cd7 diff --git a/urcu/static/urcu.h b/urcu/static/urcu.h index c517693..6c8b926 100644 --- a/urcu/static/urcu.h +++ b/urcu/static/urcu.h @@ -96,6 +96,12 @@ extern "C" { #define SIGRCU SIGUSR1 #endif +enum rcu_state { + RCU_READER_ACTIVE_CURRENT, + RCU_READER_ACTIVE_OLD, + RCU_READER_INACTIVE, +}; + #ifdef DEBUG_RCU #define rcu_assert(args...) assert(args) #else @@ -108,8 +114,8 @@ extern "C" { #include #include -#define YIELD_READ (1 << 0) -#define YIELD_WRITE (1 << 1) +#define RCU_YIELD_READ (1 << 0) +#define RCU_YIELD_WRITE (1 << 1) /* * Updates with RCU_SIGNAL are much slower. Account this in the delay. @@ -121,37 +127,37 @@ extern "C" { #define MAX_SLEEP 50 #endif -extern unsigned int yield_active; -extern DECLARE_URCU_TLS(unsigned int, rand_yield); +extern unsigned int rcu_yield_active; +extern DECLARE_URCU_TLS(unsigned int, rcu_rand_yield); -static inline void debug_yield_read(void) +static inline void rcu_debug_yield_read(void) { - if (yield_active & YIELD_READ) - if (rand_r(&URCU_TLS(rand_yield)) & 0x1) - usleep(rand_r(&URCU_TLS(rand_yield)) % MAX_SLEEP); + if (rcu_yield_active & RCU_YIELD_READ) + if (rand_r(&URCU_TLS(rcu_rand_yield)) & 0x1) + usleep(rand_r(&URCU_TLS(rcu_rand_yield)) % MAX_SLEEP); } -static inline void debug_yield_write(void) +static inline void rcu_debug_yield_write(void) { - if (yield_active & YIELD_WRITE) - if (rand_r(&URCU_TLS(rand_yield)) & 0x1) - usleep(rand_r(&URCU_TLS(rand_yield)) % MAX_SLEEP); + if (rcu_yield_active & RCU_YIELD_WRITE) + if (rand_r(&URCU_TLS(rcu_rand_yield)) & 0x1) + usleep(rand_r(&URCU_TLS(rcu_rand_yield)) % MAX_SLEEP); } -static inline void debug_yield_init(void) +static inline void rcu_debug_yield_init(void) { - URCU_TLS(rand_yield) = time(NULL) ^ (unsigned long) pthread_self(); + URCU_TLS(rcu_rand_yield) = time(NULL) ^ (unsigned long) pthread_self(); } #else -static inline void debug_yield_read(void) +static inline void rcu_debug_yield_read(void) { } -static inline void debug_yield_write(void) +static inline void rcu_debug_yield_write(void) { } -static inline void debug_yield_init(void) +static inline void rcu_debug_yield_init(void) { } @@ -173,11 +179,11 @@ static inline void debug_yield_init(void) #define RCU_MB_GROUP MB_GROUP_ALL #ifdef RCU_MEMBARRIER -extern int has_sys_membarrier; +extern int rcu_has_sys_membarrier; static inline void smp_mb_slave(int group) { - if (caa_likely(has_sys_membarrier)) + if (caa_likely(rcu_has_sys_membarrier)) cmm_barrier(); else cmm_smp_mb(); @@ -225,21 +231,21 @@ struct rcu_reader { extern DECLARE_URCU_TLS(struct rcu_reader, rcu_reader); -extern int32_t gp_futex; +extern int32_t rcu_gp_futex; /* * Wake-up waiting synchronize_rcu(). Called from many concurrent threads. */ static inline void wake_up_gp(void) { - if (caa_unlikely(uatomic_read(&gp_futex) == -1)) { - uatomic_set(&gp_futex, 0); - futex_async(&gp_futex, FUTEX_WAKE, 1, + if (caa_unlikely(uatomic_read(&rcu_gp_futex) == -1)) { + uatomic_set(&rcu_gp_futex, 0); + futex_async(&rcu_gp_futex, FUTEX_WAKE, 1, NULL, NULL, 0); } } -static inline int rcu_gp_ongoing(unsigned long *ctr) +static inline enum rcu_state rcu_reader_state(unsigned long *ctr) { unsigned long v; @@ -248,8 +254,11 @@ static inline int rcu_gp_ongoing(unsigned long *ctr) * to insure consistency. */ v = CMM_LOAD_SHARED(*ctr); - return (v & RCU_GP_CTR_NEST_MASK) && - ((v ^ rcu_gp_ctr) & RCU_GP_CTR_PHASE); + if (!(v & RCU_GP_CTR_NEST_MASK)) + return RCU_READER_INACTIVE; + if (!((v ^ rcu_gp_ctr) & RCU_GP_CTR_PHASE)) + return RCU_READER_ACTIVE_CURRENT; + return RCU_READER_ACTIVE_OLD; } /* @@ -320,6 +329,18 @@ static inline void _rcu_read_unlock(void) cmm_barrier(); /* Ensure the compiler does not reorder us with mutex */ } +/* + * Returns whether within a RCU read-side critical section. + * + * This function is less than 10 lines long. The intent is that this + * function meets the 10-line criterion for LGPL, allowing this function + * to be invoked directly from non-LGPL code. + */ +static inline int _rcu_read_ongoing(void) +{ + return URCU_TLS(rcu_reader).ctr & RCU_GP_CTR_NEST_MASK; +} + #ifdef __cplusplus } #endif