X-Git-Url: https://git.liburcu.org/?a=blobdiff_plain;f=urcu.h;h=3e1ad78324cda478711e1c3f521bd62359bd2212;hb=82faadb56d6001ee1dc0bc5b3f22de49cd0ec2fe;hp=a32fb94247fcfa11e4f5330d5e278821ab3a1e04;hpb=69a757c902c0cfb6245da18339b297b9ca6f94e0;p=urcu.git diff --git a/urcu.h b/urcu.h index a32fb94..3e1ad78 100644 --- a/urcu.h +++ b/urcu.h @@ -17,7 +17,6 @@ * Distributed under GPLv2 */ -#define __USE_GNU #include #include @@ -27,10 +26,36 @@ #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) +/* Assume P4 or newer */ +#define CONFIG_HAS_FENCE 1 + /* x86 32/64 specific */ +#ifdef CONFIG_HAS_FENCE #define mb() asm volatile("mfence":::"memory") #define rmb() asm volatile("lfence":::"memory") -#define wmb() asm volatile("sfence" ::: "memory") +#define wmb() asm volatile("sfence"::: "memory") +#else +/* + * Some non-Intel clones support out of order store. wmb() ceases to be a + * nop for these. + */ +#define mb() asm volatile("lock; addl $0,0(%%esp)":::"memory") +#define rmb() asm volatile("lock; addl $0,0(%%esp)":::"memory") +#define wmb() asm volatile("lock; addl $0,0(%%esp)"::: "memory") +#endif + +/* Assume SMP machine, given we don't have this information */ +#define CONFIG_SMP 1 + +#ifdef CONFIG_SMP +#define smp_mb() mb() +#define smp_rmb() rmb() +#define smp_wmb() wmb() +#else +#define smp_mb() barrier() +#define smp_rmb() barrier() +#define smp_wmb() barrier() +#endif static inline void atomic_inc(int *v) { @@ -118,14 +143,30 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, #define SIGURCU SIGUSR1 +/* + * If a reader is really non-cooperative and refuses to commit its + * urcu_active_readers count to memory (there is no barrier in the reader + * per-se), kick it after a few loops waiting for it. + */ +#define KICK_READER_LOOPS 10000 + #ifdef DEBUG_YIELD #include #include #include +#include #define YIELD_READ (1 << 0) #define YIELD_WRITE (1 << 1) +/* Updates without DEBUG_FULL_MB are much slower. Account this in the delay */ +#ifdef DEBUG_FULL_MB +/* maximum sleep delay, in us */ +#define MAX_SLEEP 50 +#else +#define MAX_SLEEP 30000 +#endif + extern unsigned int yield_active; extern unsigned int __thread rand_yield; @@ -133,14 +174,14 @@ static inline void debug_yield_read(void) { if (yield_active & YIELD_READ) if (rand_r(&rand_yield) & 0x1) - sched_yield(); + usleep(rand_r(&rand_yield) % MAX_SLEEP); } static inline void debug_yield_write(void) { if (yield_active & YIELD_WRITE) if (rand_r(&rand_yield) & 0x1) - sched_yield(); + usleep(rand_r(&rand_yield) % MAX_SLEEP); } static inline void debug_yield_init(void) @@ -162,12 +203,25 @@ static inline void debug_yield_init(void) } #endif +#ifdef DEBUG_FULL_MB +static inline void read_barrier() +{ + smp_mb(); +} +#else +static inline void read_barrier() +{ + barrier(); +} +#endif + /* - * Limiting the nesting level to 256 to keep instructions small in the read - * fast-path. + * The trick here is that RCU_GP_CTR_BIT must be a multiple of 8 so we can use a + * full 8-bits, 16-bits or 32-bits bitmask for the lower order bits. */ -#define RCU_GP_COUNT (1U << 0) -#define RCU_GP_CTR_BIT (1U << 8) +#define RCU_GP_COUNT (1UL << 0) +/* Use the amount of bits equal to half of the architecture long size */ +#define RCU_GP_CTR_BIT (1UL << (sizeof(long) << 2)) #define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1) /* @@ -175,54 +229,52 @@ static inline void debug_yield_init(void) * Using a int rather than a char to eliminate false register dependencies * causing stalls on some architectures. */ -extern int urcu_gp_ctr; +extern long urcu_gp_ctr; -extern int __thread urcu_active_readers; +extern long __thread urcu_active_readers; -static inline int rcu_old_gp_ongoing(int *value) +static inline int rcu_old_gp_ongoing(long *value) { - int v; + long v; if (value == NULL) return 0; - debug_yield_write(); + /* + * Make sure both tests below are done on the same version of *value + * to insure consistency. + */ v = ACCESS_ONCE(*value); - debug_yield_write(); return (v & RCU_GP_CTR_NEST_MASK) && - ((v ^ ACCESS_ONCE(urcu_gp_ctr)) & RCU_GP_CTR_BIT); + ((v ^ urcu_gp_ctr) & RCU_GP_CTR_BIT); } static inline void rcu_read_lock(void) { - int tmp; + long tmp; - debug_yield_read(); tmp = urcu_active_readers; - debug_yield_read(); + /* urcu_gp_ctr = RCU_GP_COUNT | (~RCU_GP_CTR_BIT or RCU_GP_CTR_BIT) */ + /* The data dependency "read urcu_gp_ctr, write urcu_active_readers", + * serializes those two memory operations. */ if (likely(!(tmp & RCU_GP_CTR_NEST_MASK))) - urcu_active_readers = urcu_gp_ctr + RCU_GP_COUNT; + urcu_active_readers = ACCESS_ONCE(urcu_gp_ctr); else urcu_active_readers = tmp + RCU_GP_COUNT; - debug_yield_read(); /* * Increment active readers count before accessing the pointer. * See force_mb_all_threads(). */ - barrier(); - debug_yield_read(); + read_barrier(); } static inline void rcu_read_unlock(void) { - debug_yield_read(); - barrier(); - debug_yield_read(); + read_barrier(); /* * Finish using rcu before decrementing the pointer. * See force_mb_all_threads(). */ urcu_active_readers -= RCU_GP_COUNT; - debug_yield_read(); } /** @@ -263,7 +315,6 @@ extern void synchronize_rcu(void); #define urcu_publish_content(p, v) \ ({ \ void *oldptr; \ - debug_yield_write(); \ oldptr = rcu_xchg_pointer(p, v); \ synchronize_rcu(); \ oldptr; \