X-Git-Url: http://git.liburcu.org/?a=blobdiff_plain;f=urcu.h;h=b8f6d9b29ab20a73b40b547261294c0f033c56e8;hb=4917a879125c1fca08ccb4328104701e83108556;hp=27695d495aeeb6388b121a099183949cbbdbac06;hpb=e462817ecdce42b00f0cb0825e2425b08943105b;p=urcu.git diff --git a/urcu.h b/urcu.h index 27695d4..b8f6d9b 100644 --- a/urcu.h +++ b/urcu.h @@ -18,10 +18,14 @@ */ #include +#include /* The "volatile" is due to gcc bugs */ #define barrier() __asm__ __volatile__("": : :"memory") +#define likely(x) __builtin_expect(!!(x), 1) +#define unlikely(x) __builtin_expect(!!(x), 0) + /* x86 32/64 specific */ #define mb() asm volatile("mfence":::"memory") #define rmb() asm volatile("lfence":::"memory") @@ -33,6 +37,51 @@ static inline void atomic_inc(int *v) : "+m" (*v)); } +#define xchg(ptr, v) \ + ((__typeof__(*(ptr)))__xchg((unsigned long)(v), (ptr), sizeof(*(ptr)))) + +struct __xchg_dummy { + unsigned long a[100]; +}; +#define __xg(x) ((struct __xchg_dummy *)(x)) + +/* + * Note: no "lock" prefix even on SMP: xchg always implies lock anyway + * Note 2: xchg has side effect, so that attribute volatile is necessary, + * but generally the primitive is invalid, *ptr is output argument. --ANK + */ +static inline unsigned long __xchg(unsigned long x, volatile void *ptr, + int size) +{ + switch (size) { + case 1: + asm volatile("xchgb %b0,%1" + : "=q" (x) + : "m" (*__xg(ptr)), "0" (x) + : "memory"); + break; + case 2: + asm volatile("xchgw %w0,%1" + : "=r" (x) + : "m" (*__xg(ptr)), "0" (x) + : "memory"); + break; + case 4: + asm volatile("xchgl %k0,%1" + : "=r" (x) + : "m" (*__xg(ptr)), "0" (x) + : "memory"); + break; + case 8: + asm volatile("xchgq %0,%1" + : "=r" (x) + : "m" (*__xg(ptr)), "0" (x) + : "memory"); + break; + } + return x; +} + /* Nop everywhere except on alpha. */ #define smp_read_barrier_depends() @@ -70,6 +119,8 @@ static inline void atomic_inc(int *v) #ifdef DEBUG_YIELD #include +#include +#include #define YIELD_READ (1 << 0) #define YIELD_WRITE (1 << 1) @@ -111,14 +162,19 @@ static inline void debug_yield_init(void) #endif /* - * Limiting the nesting level to 256 to keep instructions small in the read - * fast-path. + * The trick here is that RCU_GP_CTR_BIT must be a multiple of 8 so we can use a + * full 8-bits, 16-bits or 32-bits bitmask for the lower order bits. */ #define RCU_GP_COUNT (1U << 0) -#define RCU_GP_CTR_BIT (1U << 8) +/* Use the amount of bits equal to half of the architecture long size */ +#define RCU_GP_CTR_BIT (sizeof(long) << 2) #define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1) -/* Global quiescent period counter with low-order bits unused. */ +/* + * Global quiescent period counter with low-order bits unused. + * Using a int rather than a char to eliminate false register dependencies + * causing stalls on some architectures. + */ extern int urcu_gp_ctr; extern int __thread urcu_active_readers; @@ -143,7 +199,7 @@ static inline void rcu_read_lock(void) debug_yield_read(); tmp = urcu_active_readers; debug_yield_read(); - if (!(tmp & RCU_GP_CTR_NEST_MASK)) + if (likely(!(tmp & RCU_GP_CTR_NEST_MASK))) urcu_active_readers = urcu_gp_ctr + RCU_GP_COUNT; else urcu_active_readers = tmp + RCU_GP_COUNT; @@ -190,9 +246,29 @@ static inline void rcu_read_unlock(void) (p) = (v); \ }) -extern void *urcu_publish_content(void **ptr, void *new); +#define rcu_xchg_pointer(p, v) \ + ({ \ + if (!__builtin_constant_p(v) || \ + ((v) != NULL)) \ + wmb(); \ + xchg(p, v); \ + }) + extern void synchronize_rcu(void); +/* + * Exchanges the pointer and waits for quiescent state. + * The pointer returned can be freed. + */ +#define urcu_publish_content(p, v) \ + ({ \ + void *oldptr; \ + debug_yield_write(); \ + oldptr = rcu_xchg_pointer(p, v); \ + synchronize_rcu(); \ + oldptr; \ + }) + /* * Reader thread registration. */