X-Git-Url: http://git.liburcu.org/?p=urcu.git;a=blobdiff_plain;f=urcu.h;h=92b31df603d157ffa942534af1d5731d912c2342;hp=444d9cdba3346d987070dfa8fc68b78df4fe923f;hb=b715b99eb0d52e0744bfea46c62849f260a5ec58;hpb=6e32665beaad7fef48e9ab3c8c4cda2e9c232dde diff --git a/urcu.h b/urcu.h index 444d9cd..92b31df 100644 --- a/urcu.h +++ b/urcu.h @@ -31,6 +31,19 @@ #define rmb() asm volatile("lfence":::"memory") #define wmb() asm volatile("sfence" ::: "memory") +/* Assume SMP machine, given we don't have this information */ +#define CONFIG_SMP 1 + +#ifdef CONFIG_SMP +#define smp_mb() mb() +#define smp_rmb() rmb() +#define smp_wmb() wmb() +#else +#define smp_mb() barrier() +#define smp_rmb() barrier() +#define smp_wmb() barrier() +#endif + static inline void atomic_inc(int *v) { asm volatile("lock; incl %0" @@ -121,10 +134,19 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, #include #include #include +#include #define YIELD_READ (1 << 0) #define YIELD_WRITE (1 << 1) +/* Updates without DEBUG_FULL_MB are much slower. Account this in the delay */ +#ifdef DEBUG_FULL_MB +/* maximum sleep delay, in us */ +#define MAX_SLEEP 50 +#else +#define MAX_SLEEP 30000 +#endif + extern unsigned int yield_active; extern unsigned int __thread rand_yield; @@ -132,14 +154,14 @@ static inline void debug_yield_read(void) { if (yield_active & YIELD_READ) if (rand_r(&rand_yield) & 0x1) - sched_yield(); + usleep(rand_r(&rand_yield) % MAX_SLEEP); } static inline void debug_yield_write(void) { if (yield_active & YIELD_WRITE) if (rand_r(&rand_yield) & 0x1) - sched_yield(); + usleep(rand_r(&rand_yield) % MAX_SLEEP); } static inline void debug_yield_init(void) @@ -161,6 +183,18 @@ static inline void debug_yield_init(void) } #endif +#ifdef DEBUG_FULL_MB +static inline void read_barrier() +{ + smp_mb(); +} +#else +static inline void read_barrier() +{ + barrier(); +} +#endif + /* * The trick here is that RCU_GP_CTR_BIT must be a multiple of 8 so we can use a * full 8-bits, 16-bits or 32-bits bitmask for the lower order bits. @@ -179,17 +213,21 @@ extern long urcu_gp_ctr; extern long __thread urcu_active_readers; -static inline int rcu_old_gp_ongoing(int *value) +static inline int rcu_old_gp_ongoing(long *value) { long v; if (value == NULL) return 0; debug_yield_write(); + /* + * Make sure both tests below are done on the same version of *value + * to insure consistency. + */ v = ACCESS_ONCE(*value); debug_yield_write(); return (v & RCU_GP_CTR_NEST_MASK) && - ((v ^ ACCESS_ONCE(urcu_gp_ctr)) & RCU_GP_CTR_BIT); + ((v ^ urcu_gp_ctr) & RCU_GP_CTR_BIT); } static inline void rcu_read_lock(void) @@ -199,8 +237,9 @@ static inline void rcu_read_lock(void) debug_yield_read(); tmp = urcu_active_readers; debug_yield_read(); + /* urcu_gp_ctr = RCU_GP_COUNT | (~RCU_GP_CTR_BIT or RCU_GP_CTR_BIT) */ if (likely(!(tmp & RCU_GP_CTR_NEST_MASK))) - urcu_active_readers = urcu_gp_ctr + RCU_GP_COUNT; + urcu_active_readers = urcu_gp_ctr; else urcu_active_readers = tmp + RCU_GP_COUNT; debug_yield_read(); @@ -208,14 +247,14 @@ static inline void rcu_read_lock(void) * Increment active readers count before accessing the pointer. * See force_mb_all_threads(). */ - barrier(); + read_barrier(); debug_yield_read(); } static inline void rcu_read_unlock(void) { debug_yield_read(); - barrier(); + read_barrier(); debug_yield_read(); /* * Finish using rcu before decrementing the pointer.