X-Git-Url: https://git.liburcu.org/?p=urcu.git;a=blobdiff_plain;f=urcu%2Fstatic%2Furcu-bp.h;h=6e52d84bafe84f75dfd45593a2b58cad6431bcd0;hp=832ba0fe2b238293558761fa94b3bbdd9ba05e14;hb=67ecffc0f530a7b5c4dd5111ea7dd3213da8eb91;hpb=b7b6a8f52c051dca78161908670e3f51b9b33080 diff --git a/urcu/static/urcu-bp.h b/urcu/static/urcu-bp.h index 832ba0f..6e52d84 100644 --- a/urcu/static/urcu-bp.h +++ b/urcu/static/urcu-bp.h @@ -6,8 +6,8 @@ * * Userspace RCU header. * - * TO BE INCLUDED ONLY IN LGPL-COMPATIBLE CODE. See urcu.h for linking - * dynamically with the userspace rcu library. + * TO BE INCLUDED ONLY IN CODE THAT IS TO BE RECOMPILED ON EACH LIBURCU + * RELEASE. See urcu.h for linking dynamically with the userspace rcu library. * * Copyright (c) 2009 Mathieu Desnoyers * Copyright (c) 2009 Paul E. McKenney, IBM Corporation. @@ -38,6 +38,8 @@ #include #include #include +#include +#include /* * This code section can only be included in LGPL 2.1 compatible source code. @@ -51,63 +53,11 @@ extern "C" { #endif -#ifdef DEBUG_RCU -#define rcu_assert(args...) assert(args) -#else -#define rcu_assert(args...) -#endif - -#ifdef DEBUG_YIELD -#include -#include -#include -#include - -#define YIELD_READ (1 << 0) -#define YIELD_WRITE (1 << 1) - -/* - * Updates without RCU_MB are much slower. Account this in - * the delay. - */ -/* maximum sleep delay, in us */ -#define MAX_SLEEP 50 - -extern unsigned int yield_active; -extern unsigned int __thread rand_yield; - -static inline void debug_yield_read(void) -{ - if (yield_active & YIELD_READ) - if (rand_r(&rand_yield) & 0x1) - usleep(rand_r(&rand_yield) % MAX_SLEEP); -} - -static inline void debug_yield_write(void) -{ - if (yield_active & YIELD_WRITE) - if (rand_r(&rand_yield) & 0x1) - usleep(rand_r(&rand_yield) % MAX_SLEEP); -} - -static inline void debug_yield_init(void) -{ - rand_yield = time(NULL) ^ pthread_self(); -} -#else -static inline void debug_yield_read(void) -{ -} - -static inline void debug_yield_write(void) -{ -} - -static inline void debug_yield_init(void) -{ - -} -#endif +enum rcu_state { + RCU_READER_ACTIVE_CURRENT, + RCU_READER_ACTIVE_OLD, + RCU_READER_INACTIVE, +}; /* * The trick here is that RCU_GP_CTR_PHASE must be a multiple of 8 so we can use a @@ -123,16 +73,22 @@ static inline void debug_yield_init(void) */ extern void rcu_bp_register(void); -/* - * Global quiescent period counter with low-order bits unused. - * Using a int rather than a char to eliminate false register dependencies - * causing stalls on some architectures. - */ -extern long rcu_gp_ctr; +struct rcu_gp { + /* + * Global grace period counter. + * Contains the current RCU_GP_CTR_PHASE. + * Also has a RCU_GP_COUNT of 1, to accelerate the reader fast path. + * Written to only by writer with mutex taken. + * Read by both writer and readers. + */ + unsigned long ctr; +} __attribute__((aligned(CAA_CACHE_LINE_SIZE))); + +extern struct rcu_gp rcu_gp; struct rcu_reader { /* Data used by both reader and synchronize_rcu() */ - long ctr; + unsigned long ctr; /* Data used for registry */ struct cds_list_head node __attribute__((aligned(CAA_CACHE_LINE_SIZE))); pthread_t tid; @@ -144,60 +100,106 @@ struct rcu_reader { * Adds a pointer dereference on the read-side, but won't require to unregister * the reader thread. */ -extern struct rcu_reader __thread *rcu_reader; +extern DECLARE_URCU_TLS(struct rcu_reader *, rcu_reader); + +extern int urcu_bp_has_sys_membarrier; + +static inline void urcu_bp_smp_mb_slave(void) +{ + if (caa_likely(urcu_bp_has_sys_membarrier)) + cmm_barrier(); + else + cmm_smp_mb(); +} -static inline int rcu_old_gp_ongoing(long *value) +static inline enum rcu_state rcu_reader_state(unsigned long *ctr) { - long v; + unsigned long v; - if (value == NULL) - return 0; + if (ctr == NULL) + return RCU_READER_INACTIVE; /* * Make sure both tests below are done on the same version of *value * to insure consistency. */ - v = CMM_LOAD_SHARED(*value); - return (v & RCU_GP_CTR_NEST_MASK) && - ((v ^ rcu_gp_ctr) & RCU_GP_CTR_PHASE); + v = CMM_LOAD_SHARED(*ctr); + if (!(v & RCU_GP_CTR_NEST_MASK)) + return RCU_READER_INACTIVE; + if (!((v ^ rcu_gp.ctr) & RCU_GP_CTR_PHASE)) + return RCU_READER_ACTIVE_CURRENT; + return RCU_READER_ACTIVE_OLD; } -static inline void _rcu_read_lock(void) +/* + * Helper for _rcu_read_lock(). The format of rcu_gp.ctr (as well as + * the per-thread rcu_reader.ctr) has the upper bits containing a count of + * _rcu_read_lock() nesting, and a lower-order bit that contains either zero + * or RCU_GP_CTR_PHASE. The smp_mb_slave() ensures that the accesses in + * _rcu_read_lock() happen before the subsequent read-side critical section. + */ +static inline void _rcu_read_lock_update(unsigned long tmp) { - long tmp; + if (caa_likely(!(tmp & RCU_GP_CTR_NEST_MASK))) { + _CMM_STORE_SHARED(URCU_TLS(rcu_reader)->ctr, _CMM_LOAD_SHARED(rcu_gp.ctr)); + urcu_bp_smp_mb_slave(); + } else + _CMM_STORE_SHARED(URCU_TLS(rcu_reader)->ctr, tmp + RCU_GP_COUNT); +} - /* Check if registered */ - if (unlikely(!rcu_reader)) - rcu_bp_register(); +/* + * Enter an RCU read-side critical section. + * + * The first cmm_barrier() call ensures that the compiler does not reorder + * the body of _rcu_read_lock() with a mutex. + * + * This function and its helper are both less than 10 lines long. The + * intent is that this function meets the 10-line criterion in LGPL, + * allowing this function to be invoked directly from non-LGPL code. + */ +static inline void _rcu_read_lock(void) +{ + unsigned long tmp; + if (caa_unlikely(!URCU_TLS(rcu_reader))) + rcu_bp_register(); /* If not yet registered. */ cmm_barrier(); /* Ensure the compiler does not reorder us with mutex */ - tmp = rcu_reader->ctr; - /* - * rcu_gp_ctr is - * RCU_GP_COUNT | (~RCU_GP_CTR_PHASE or RCU_GP_CTR_PHASE) - */ - if (likely(!(tmp & RCU_GP_CTR_NEST_MASK))) { - _CMM_STORE_SHARED(rcu_reader->ctr, _CMM_LOAD_SHARED(rcu_gp_ctr)); - /* - * Set active readers count for outermost nesting level before - * accessing the pointer. - */ - cmm_smp_mb(); - } else { - _CMM_STORE_SHARED(rcu_reader->ctr, tmp + RCU_GP_COUNT); - } + tmp = URCU_TLS(rcu_reader)->ctr; + urcu_assert((tmp & RCU_GP_CTR_NEST_MASK) != RCU_GP_CTR_NEST_MASK); + _rcu_read_lock_update(tmp); } +/* + * Exit an RCU read-side critical section. This function is less than + * 10 lines of code, and is intended to be usable by non-LGPL code, as + * called out in LGPL. + */ static inline void _rcu_read_unlock(void) { - /* - * Finish using rcu before decrementing the pointer. - */ - cmm_smp_mb(); - _CMM_STORE_SHARED(rcu_reader->ctr, rcu_reader->ctr - RCU_GP_COUNT); + unsigned long tmp; + + tmp = URCU_TLS(rcu_reader)->ctr; + urcu_assert(tmp & RCU_GP_CTR_NEST_MASK); + /* Finish using rcu before decrementing the pointer. */ + urcu_bp_smp_mb_slave(); + _CMM_STORE_SHARED(URCU_TLS(rcu_reader)->ctr, tmp - RCU_GP_COUNT); cmm_barrier(); /* Ensure the compiler does not reorder us with mutex */ } -#ifdef __cplusplus +/* + * Returns whether within a RCU read-side critical section. + * + * This function is less than 10 lines long. The intent is that this + * function meets the 10-line criterion for LGPL, allowing this function + * to be invoked directly from non-LGPL code. + */ +static inline int _rcu_read_ongoing(void) +{ + if (caa_unlikely(!URCU_TLS(rcu_reader))) + rcu_bp_register(); /* If not yet registered. */ + return URCU_TLS(rcu_reader)->ctr & RCU_GP_CTR_NEST_MASK; +} + +#ifdef __cplusplus } #endif