X-Git-Url: http://git.liburcu.org/?p=urcu.git;a=blobdiff_plain;f=urcu.h;h=92b31df603d157ffa942534af1d5731d912c2342;hp=c77b26f958e34b8521f1b8363d83b6492cb46b0f;hb=b715b99eb0d52e0744bfea46c62849f260a5ec58;hpb=9d335088c7c7eb6219e32ec9426d336f3a211a77 diff --git a/urcu.h b/urcu.h index c77b26f..92b31df 100644 --- a/urcu.h +++ b/urcu.h @@ -17,20 +17,84 @@ * Distributed under GPLv2 */ +#include +#include + /* The "volatile" is due to gcc bugs */ #define barrier() __asm__ __volatile__("": : :"memory") +#define likely(x) __builtin_expect(!!(x), 1) +#define unlikely(x) __builtin_expect(!!(x), 0) + /* x86 32/64 specific */ #define mb() asm volatile("mfence":::"memory") #define rmb() asm volatile("lfence":::"memory") #define wmb() asm volatile("sfence" ::: "memory") +/* Assume SMP machine, given we don't have this information */ +#define CONFIG_SMP 1 + +#ifdef CONFIG_SMP +#define smp_mb() mb() +#define smp_rmb() rmb() +#define smp_wmb() wmb() +#else +#define smp_mb() barrier() +#define smp_rmb() barrier() +#define smp_wmb() barrier() +#endif + static inline void atomic_inc(int *v) { asm volatile("lock; incl %0" : "+m" (*v)); } +#define xchg(ptr, v) \ + ((__typeof__(*(ptr)))__xchg((unsigned long)(v), (ptr), sizeof(*(ptr)))) + +struct __xchg_dummy { + unsigned long a[100]; +}; +#define __xg(x) ((struct __xchg_dummy *)(x)) + +/* + * Note: no "lock" prefix even on SMP: xchg always implies lock anyway + * Note 2: xchg has side effect, so that attribute volatile is necessary, + * but generally the primitive is invalid, *ptr is output argument. --ANK + */ +static inline unsigned long __xchg(unsigned long x, volatile void *ptr, + int size) +{ + switch (size) { + case 1: + asm volatile("xchgb %b0,%1" + : "=q" (x) + : "m" (*__xg(ptr)), "0" (x) + : "memory"); + break; + case 2: + asm volatile("xchgw %w0,%1" + : "=r" (x) + : "m" (*__xg(ptr)), "0" (x) + : "memory"); + break; + case 4: + asm volatile("xchgl %k0,%1" + : "=r" (x) + : "m" (*__xg(ptr)), "0" (x) + : "memory"); + break; + case 8: + asm volatile("xchgq %0,%1" + : "=r" (x) + : "m" (*__xg(ptr)), "0" (x) + : "memory"); + break; + } + return x; +} + /* Nop everywhere except on alpha. */ #define smp_read_barrier_depends() @@ -68,10 +132,21 @@ static inline void atomic_inc(int *v) #ifdef DEBUG_YIELD #include +#include +#include +#include #define YIELD_READ (1 << 0) #define YIELD_WRITE (1 << 1) +/* Updates without DEBUG_FULL_MB are much slower. Account this in the delay */ +#ifdef DEBUG_FULL_MB +/* maximum sleep delay, in us */ +#define MAX_SLEEP 50 +#else +#define MAX_SLEEP 30000 +#endif + extern unsigned int yield_active; extern unsigned int __thread rand_yield; @@ -79,14 +154,14 @@ static inline void debug_yield_read(void) { if (yield_active & YIELD_READ) if (rand_r(&rand_yield) & 0x1) - sched_yield(); + usleep(rand_r(&rand_yield) % MAX_SLEEP); } static inline void debug_yield_write(void) { if (yield_active & YIELD_WRITE) if (rand_r(&rand_yield) & 0x1) - sched_yield(); + usleep(rand_r(&rand_yield) % MAX_SLEEP); } static inline void debug_yield_init(void) @@ -108,48 +183,130 @@ static inline void debug_yield_init(void) } #endif -/* Global quiescent period parity */ -extern int urcu_qparity; - -extern int __thread urcu_active_readers[2]; - -static inline int get_urcu_qparity(void) +#ifdef DEBUG_FULL_MB +static inline void read_barrier() { - return urcu_qparity; + smp_mb(); } +#else +static inline void read_barrier() +{ + barrier(); +} +#endif /* - * urcu_parity should be declared on the caller's stack. + * The trick here is that RCU_GP_CTR_BIT must be a multiple of 8 so we can use a + * full 8-bits, 16-bits or 32-bits bitmask for the lower order bits. */ -static inline void rcu_read_lock(int *urcu_parity) +#define RCU_GP_COUNT (1UL << 0) +/* Use the amount of bits equal to half of the architecture long size */ +#define RCU_GP_CTR_BIT (1UL << (sizeof(long) << 2)) +#define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1) + +/* + * Global quiescent period counter with low-order bits unused. + * Using a int rather than a char to eliminate false register dependencies + * causing stalls on some architectures. + */ +extern long urcu_gp_ctr; + +extern long __thread urcu_active_readers; + +static inline int rcu_old_gp_ongoing(long *value) +{ + long v; + + if (value == NULL) + return 0; + debug_yield_write(); + /* + * Make sure both tests below are done on the same version of *value + * to insure consistency. + */ + v = ACCESS_ONCE(*value); + debug_yield_write(); + return (v & RCU_GP_CTR_NEST_MASK) && + ((v ^ urcu_gp_ctr) & RCU_GP_CTR_BIT); +} + +static inline void rcu_read_lock(void) { + long tmp; + debug_yield_read(); - *urcu_parity = get_urcu_qparity(); + tmp = urcu_active_readers; debug_yield_read(); - urcu_active_readers[*urcu_parity]++; + /* urcu_gp_ctr = RCU_GP_COUNT | (~RCU_GP_CTR_BIT or RCU_GP_CTR_BIT) */ + if (likely(!(tmp & RCU_GP_CTR_NEST_MASK))) + urcu_active_readers = urcu_gp_ctr; + else + urcu_active_readers = tmp + RCU_GP_COUNT; debug_yield_read(); /* * Increment active readers count before accessing the pointer. * See force_mb_all_threads(). */ - barrier(); + read_barrier(); debug_yield_read(); } -static inline void rcu_read_unlock(int *urcu_parity) +static inline void rcu_read_unlock(void) { debug_yield_read(); - barrier(); + read_barrier(); debug_yield_read(); /* * Finish using rcu before decrementing the pointer. * See force_mb_all_threads(). */ - urcu_active_readers[*urcu_parity]--; + urcu_active_readers -= RCU_GP_COUNT; debug_yield_read(); } -extern void *urcu_publish_content(void **ptr, void *new); +/** + * rcu_assign_pointer - assign (publicize) a pointer to a newly + * initialized structure that will be dereferenced by RCU read-side + * critical sections. Returns the value assigned. + * + * Inserts memory barriers on architectures that require them + * (pretty much all of them other than x86), and also prevents + * the compiler from reordering the code that initializes the + * structure after the pointer assignment. More importantly, this + * call documents which pointers will be dereferenced by RCU read-side + * code. + */ + +#define rcu_assign_pointer(p, v) \ + ({ \ + if (!__builtin_constant_p(v) || \ + ((v) != NULL)) \ + wmb(); \ + (p) = (v); \ + }) + +#define rcu_xchg_pointer(p, v) \ + ({ \ + if (!__builtin_constant_p(v) || \ + ((v) != NULL)) \ + wmb(); \ + xchg(p, v); \ + }) + +extern void synchronize_rcu(void); + +/* + * Exchanges the pointer and waits for quiescent state. + * The pointer returned can be freed. + */ +#define urcu_publish_content(p, v) \ + ({ \ + void *oldptr; \ + debug_yield_write(); \ + oldptr = rcu_xchg_pointer(p, v); \ + synchronize_rcu(); \ + oldptr; \ + }) /* * Reader thread registration.