X-Git-Url: http://git.liburcu.org/?p=urcu.git;a=blobdiff_plain;f=urcu%2Fstatic%2Furcu.h;h=f27f8b6b7a151fdbe3ddfbb78719a11fbb51e168;hp=777fa4c593b17ee2625c23c143e6ee668e8f9e40;hb=bd252a04bbbb163aa4d8864b1e1e5a3a4d9d0892;hpb=6d841bc23fc94345fe76651d73c1a3f821a85aa7 diff --git a/urcu/static/urcu.h b/urcu/static/urcu.h index 777fa4c..f27f8b6 100644 --- a/urcu/static/urcu.h +++ b/urcu/static/urcu.h @@ -31,7 +31,6 @@ #include #include -#include #include #include @@ -41,6 +40,7 @@ #include #include #include +#include #ifdef __cplusplus extern "C" { @@ -51,9 +51,17 @@ extern "C" { #define RCU_MEMBARRIER #endif +/* + * RCU_MEMBARRIER is only possibly available on Linux. Fallback to RCU_MB + * otherwise. + */ +#if !defined(__linux__) && defined(RCU_MEMBARRIER) +#undef RCU_MEMBARRIER +#define RCU_MB +#endif + #ifdef RCU_MEMBARRIER -#include -#include +#include /* If the headers do not support SYS_membarrier, statically use RCU_MB */ #ifdef SYS_membarrier @@ -88,18 +96,6 @@ extern "C" { #define SIGRCU SIGUSR1 #endif -/* - * If a reader is really non-cooperative and refuses to commit its - * rcu_active_readers count to memory (there is no barrier in the reader - * per-se), kick it after a few loops waiting for it. - */ -#define KICK_READER_LOOPS 10000 - -/* - * Active attempts to check for reader Q.S. before calling futex(). - */ -#define RCU_QS_ACTIVE_ATTEMPTS 100 - #ifdef DEBUG_RCU #define rcu_assert(args...) assert(args) #else @@ -126,25 +122,25 @@ extern "C" { #endif extern unsigned int yield_active; -extern unsigned int __thread rand_yield; +extern DECLARE_URCU_TLS(unsigned int, rand_yield); static inline void debug_yield_read(void) { if (yield_active & YIELD_READ) - if (rand_r(&rand_yield) & 0x1) - usleep(rand_r(&rand_yield) % MAX_SLEEP); + if (rand_r(&URCU_TLS(rand_yield)) & 0x1) + usleep(rand_r(&URCU_TLS(rand_yield)) % MAX_SLEEP); } static inline void debug_yield_write(void) { if (yield_active & YIELD_WRITE) - if (rand_r(&rand_yield) & 0x1) - usleep(rand_r(&rand_yield) % MAX_SLEEP); + if (rand_r(&URCU_TLS(rand_yield)) & 0x1) + usleep(rand_r(&URCU_TLS(rand_yield)) % MAX_SLEEP); } static inline void debug_yield_init(void) { - rand_yield = time(NULL) ^ pthread_self(); + URCU_TLS(rand_yield) = time(NULL) ^ (unsigned long) pthread_self(); } #else static inline void debug_yield_read(void) @@ -181,7 +177,7 @@ extern int has_sys_membarrier; static inline void smp_mb_slave(int group) { - if (likely(has_sys_membarrier)) + if (caa_likely(has_sys_membarrier)) cmm_barrier(); else cmm_smp_mb(); @@ -227,7 +223,7 @@ struct rcu_reader { pthread_t tid; }; -extern struct rcu_reader __thread rcu_reader; +extern DECLARE_URCU_TLS(struct rcu_reader, rcu_reader); extern int32_t gp_futex; @@ -236,7 +232,7 @@ extern int32_t gp_futex; */ static inline void wake_up_gp(void) { - if (unlikely(uatomic_read(&gp_futex) == -1)) { + if (caa_unlikely(uatomic_read(&gp_futex) == -1)) { uatomic_set(&gp_futex, 0); futex_async(&gp_futex, FUTEX_WAKE, 1, NULL, NULL, 0); @@ -261,20 +257,20 @@ static inline void _rcu_read_lock(void) unsigned long tmp; cmm_barrier(); /* Ensure the compiler does not reorder us with mutex */ - tmp = rcu_reader.ctr; + tmp = URCU_TLS(rcu_reader).ctr; /* * rcu_gp_ctr is * RCU_GP_COUNT | (~RCU_GP_CTR_PHASE or RCU_GP_CTR_PHASE) */ - if (likely(!(tmp & RCU_GP_CTR_NEST_MASK))) { - _CMM_STORE_SHARED(rcu_reader.ctr, _CMM_LOAD_SHARED(rcu_gp_ctr)); + if (caa_likely(!(tmp & RCU_GP_CTR_NEST_MASK))) { + _CMM_STORE_SHARED(URCU_TLS(rcu_reader).ctr, _CMM_LOAD_SHARED(rcu_gp_ctr)); /* * Set active readers count for outermost nesting level before * accessing the pointer. See smp_mb_master(). */ smp_mb_slave(RCU_MB_GROUP); } else { - _CMM_STORE_SHARED(rcu_reader.ctr, tmp + RCU_GP_COUNT); + _CMM_STORE_SHARED(URCU_TLS(rcu_reader).ctr, tmp + RCU_GP_COUNT); } } @@ -282,19 +278,19 @@ static inline void _rcu_read_unlock(void) { unsigned long tmp; - tmp = rcu_reader.ctr; + tmp = URCU_TLS(rcu_reader).ctr; /* * Finish using rcu before decrementing the pointer. * See smp_mb_master(). */ - if (likely((tmp & RCU_GP_CTR_NEST_MASK) == RCU_GP_COUNT)) { + if (caa_likely((tmp & RCU_GP_CTR_NEST_MASK) == RCU_GP_COUNT)) { smp_mb_slave(RCU_MB_GROUP); - _CMM_STORE_SHARED(rcu_reader.ctr, rcu_reader.ctr - RCU_GP_COUNT); - /* write rcu_reader.ctr before read futex */ + _CMM_STORE_SHARED(URCU_TLS(rcu_reader).ctr, URCU_TLS(rcu_reader).ctr - RCU_GP_COUNT); + /* write URCU_TLS(rcu_reader).ctr before read futex */ smp_mb_slave(RCU_MB_GROUP); wake_up_gp(); } else { - _CMM_STORE_SHARED(rcu_reader.ctr, rcu_reader.ctr - RCU_GP_COUNT); + _CMM_STORE_SHARED(URCU_TLS(rcu_reader).ctr, URCU_TLS(rcu_reader).ctr - RCU_GP_COUNT); } cmm_barrier(); /* Ensure the compiler does not reorder us with mutex */ }