X-Git-Url: http://git.liburcu.org/?p=urcu.git;a=blobdiff_plain;f=urcu.h;h=c4a7992b4b7b166d527411d4ae5108cac174fc95;hp=fb8cedf3e15beab5ba5355b153d0bb999fa1c7fc;hb=5464000f823e493703feb49d7580a479d3ba54d4;hpb=f4a486ac095bd844d0c18995737d589b8f085b69 diff --git a/urcu.h b/urcu.h index fb8cedf..c4a7992 100644 --- a/urcu.h +++ b/urcu.h @@ -18,10 +18,14 @@ */ #include +#include /* The "volatile" is due to gcc bugs */ #define barrier() __asm__ __volatile__("": : :"memory") +#define likely(x) __builtin_expect(!!(x), 1) +#define unlikely(x) __builtin_expect(!!(x), 0) + /* x86 32/64 specific */ #define mb() asm volatile("mfence":::"memory") #define rmb() asm volatile("lfence":::"memory") @@ -63,7 +67,13 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, : "memory"); break; case 4: - asm volatile("xchgl %0,%1" + asm volatile("xchgl %k0,%1" + : "=r" (x) + : "m" (*__xg(ptr)), "0" (x) + : "memory"); + break; + case 8: + asm volatile("xchgq %0,%1" : "=r" (x) : "m" (*__xg(ptr)), "0" (x) : "memory"); @@ -109,10 +119,21 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, #ifdef DEBUG_YIELD #include +#include +#include +#include #define YIELD_READ (1 << 0) #define YIELD_WRITE (1 << 1) +/* Updates without DEBUG_FULL_MB are much slower. Account this in the delay */ +#ifdef DEBUG_FULL_MB +/* maximum sleep delay, in us */ +#define MAX_SLEEP 50 +#else +#define MAX_SLEEP 30000 +#endif + extern unsigned int yield_active; extern unsigned int __thread rand_yield; @@ -120,14 +141,14 @@ static inline void debug_yield_read(void) { if (yield_active & YIELD_READ) if (rand_r(&rand_yield) & 0x1) - sched_yield(); + usleep(rand_r(&rand_yield) % MAX_SLEEP); } static inline void debug_yield_write(void) { if (yield_active & YIELD_WRITE) if (rand_r(&rand_yield) & 0x1) - sched_yield(); + usleep(rand_r(&rand_yield) % MAX_SLEEP); } static inline void debug_yield_init(void) @@ -149,41 +170,63 @@ static inline void debug_yield_init(void) } #endif +#ifdef DEBUG_FULL_MB +static inline void read_barrier() +{ + mb(); +} +#else +static inline void read_barrier() +{ + barrier(); +} +#endif + /* - * Limiting the nesting level to 256 to keep instructions small in the read - * fast-path. + * The trick here is that RCU_GP_CTR_BIT must be a multiple of 8 so we can use a + * full 8-bits, 16-bits or 32-bits bitmask for the lower order bits. */ -#define RCU_GP_COUNT (1U << 0) -#define RCU_GP_CTR_BIT (1U << 8) +#define RCU_GP_COUNT (1UL << 0) +/* Use the amount of bits equal to half of the architecture long size */ +#define RCU_GP_CTR_BIT (1UL << (sizeof(long) << 2)) #define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1) -/* Global quiescent period counter with low-order bits unused. */ -extern int urcu_gp_ctr; +/* + * Global quiescent period counter with low-order bits unused. + * Using a int rather than a char to eliminate false register dependencies + * causing stalls on some architectures. + */ +extern long urcu_gp_ctr; -extern int __thread urcu_active_readers; +extern long __thread urcu_active_readers; -static inline int rcu_old_gp_ongoing(int *value) +static inline int rcu_old_gp_ongoing(long *value) { - int v; + long v; if (value == NULL) return 0; debug_yield_write(); + /* + * Make sure both tests below are done on the same version of *value + * to insure consistency. + */ v = ACCESS_ONCE(*value); debug_yield_write(); return (v & RCU_GP_CTR_NEST_MASK) && - ((v ^ ACCESS_ONCE(urcu_gp_ctr)) & RCU_GP_CTR_BIT); + ((v ^ urcu_gp_ctr) & RCU_GP_CTR_BIT); } static inline void rcu_read_lock(void) { - int tmp; + long tmp; debug_yield_read(); tmp = urcu_active_readers; debug_yield_read(); - if (!(tmp & RCU_GP_CTR_NEST_MASK)) - urcu_active_readers = urcu_gp_ctr + RCU_GP_COUNT; + /* urcu_gp_ctr = RCU_GP_COUNT | (~RCU_GP_CTR_BIT or RCU_GP_CTR_BIT) */ + if (likely(!(tmp & RCU_GP_CTR_NEST_MASK))) + urcu_active_readers = urcu_gp_ctr; else urcu_active_readers = tmp + RCU_GP_COUNT; debug_yield_read(); @@ -191,14 +234,14 @@ static inline void rcu_read_lock(void) * Increment active readers count before accessing the pointer. * See force_mb_all_threads(). */ - barrier(); + read_barrier(); debug_yield_read(); } static inline void rcu_read_unlock(void) { debug_yield_read(); - barrier(); + read_barrier(); debug_yield_read(); /* * Finish using rcu before decrementing the pointer.