X-Git-Url: http://git.liburcu.org/?p=urcu.git;a=blobdiff_plain;f=urcu.h;h=0ff0877ac4ac45d6966d674d100b3b47fb7ecbbd;hp=bee7715270794c3e83cede4acb1741a98d361ac6;hb=3db2d75b432e617014976239f694b91de2bc0d7d;hpb=41718ff94c4a07cb5f56d68084267798e471d1b1 diff --git a/urcu.h b/urcu.h index bee7715..0ff0877 100644 --- a/urcu.h +++ b/urcu.h @@ -1,22 +1,52 @@ #ifndef _URCU_H #define _URCU_H +/* + * urcu.h + * + * Userspace RCU header + * + * Copyright February 2009 - Mathieu Desnoyers + * + * Credits for Paul e. McKenney + * for inspiration coming from the Linux kernel RCU and rcu-preempt. + * + * The barrier, mb, rmb, wmb, atomic_inc, smp_read_barrier_depends, ACCESS_ONCE + * and rcu_dereference primitives come from the Linux kernel. + * + * Distributed under GPLv2 + */ + +#include +#include + /* The "volatile" is due to gcc bugs */ #define barrier() __asm__ __volatile__("": : :"memory") -/* x86 32/64 specific */ -#define mb() asm volatile("mfence":::"memory") -#define rmb() asm volatile("lfence":::"memory") -#define wmb() asm volatile("sfence" ::: "memory") +#define likely(x) __builtin_expect(!!(x), 1) +#define unlikely(x) __builtin_expect(!!(x), 0) +/* Assume SMP machine, given we don't have this information */ +#define CONFIG_SMP 1 -/* x86 32 */ -static inline void atomic_inc(int *v) -{ - asm volatile("lock; incl %0" - : "+m" (*v)); -} +#ifdef CONFIG_SMP +#define smp_mb() mb() +#define smp_rmb() rmb() +#define smp_wmb() wmb() +#define smp_mc() mc() +#define smp_rmc() rmc() +#define smp_wmc() wmc() +#else +#define smp_mb() barrier() +#define smp_rmb() barrier() +#define smp_wmb() barrier() +#define smp_mc() barrier() +#define smp_rmc() barrier() +#define smp_wmc() barrier() +#endif + +#include "arch.h" /* Nop everywhere except on alpha. */ #define smp_read_barrier_depends() @@ -35,6 +65,39 @@ static inline void atomic_inc(int *v) */ #define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) +/* + * Identify a shared load. A smp_rmc() or smp_mc() should come before the load. + */ +#define _LOAD_SHARED(p) ACCESS_ONCE(p) + +/* + * Load a data from shared memory, doing a cache flush if required. + */ +#define LOAD_SHARED(p) \ + ({ \ + smp_rmc(); \ + _LOAD_SHARED(p); \ + }) + + +/* + * Identify a shared store. A smp_wmc() or smp_mc() should follow the store. + */ +#define _STORE_SHARED(x, v) \ + do { \ + (x) = (v); \ + } while (0) + +/* + * Store v into x, where x is located in shared memory. Performs the required + * cache flush after writing. + */ +#define STORE_SHARED(x, v) \ + do { \ + _STORE_SHARED(x, v); \ + smp_wmc(); \ + } while (0) + /** * rcu_dereference - fetch an RCU-protected pointer in an * RCU read-side critical section. This pointer may later @@ -46,57 +109,199 @@ static inline void atomic_inc(int *v) */ #define rcu_dereference(p) ({ \ - typeof(p) _________p1 = ACCESS_ONCE(p); \ + typeof(p) _________p1 = LOAD_SHARED(p); \ smp_read_barrier_depends(); \ (_________p1); \ }) #define SIGURCU SIGUSR1 -/* Global quiescent period parity */ -extern int urcu_qparity; +/* + * If a reader is really non-cooperative and refuses to commit its + * urcu_active_readers count to memory (there is no barrier in the reader + * per-se), kick it after a few loops waiting for it. + */ +#define KICK_READER_LOOPS 10000 + +#ifdef DEBUG_YIELD +#include +#include +#include +#include + +#define YIELD_READ (1 << 0) +#define YIELD_WRITE (1 << 1) + +/* Updates without DEBUG_FULL_MB are much slower. Account this in the delay */ +#ifdef DEBUG_FULL_MB +/* maximum sleep delay, in us */ +#define MAX_SLEEP 50 +#else +#define MAX_SLEEP 30000 +#endif -extern int __thread urcu_active_readers[2]; +extern unsigned int yield_active; +extern unsigned int __thread rand_yield; -static inline int get_urcu_qparity(void) +static inline void debug_yield_read(void) { - return urcu_qparity; + if (yield_active & YIELD_READ) + if (rand_r(&rand_yield) & 0x1) + usleep(rand_r(&rand_yield) % MAX_SLEEP); } +static inline void debug_yield_write(void) +{ + if (yield_active & YIELD_WRITE) + if (rand_r(&rand_yield) & 0x1) + usleep(rand_r(&rand_yield) % MAX_SLEEP); +} + +static inline void debug_yield_init(void) +{ + rand_yield = time(NULL) ^ pthread_self(); +} +#else +static inline void debug_yield_read(void) +{ +} + +static inline void debug_yield_write(void) +{ +} + +static inline void debug_yield_init(void) +{ + +} +#endif + +#ifdef DEBUG_FULL_MB +static inline void reader_barrier() +{ + smp_mb(); +} +#else +static inline void reader_barrier() +{ + barrier(); +} +#endif + +/* + * The trick here is that RCU_GP_CTR_BIT must be a multiple of 8 so we can use a + * full 8-bits, 16-bits or 32-bits bitmask for the lower order bits. + */ +#define RCU_GP_COUNT (1UL << 0) +/* Use the amount of bits equal to half of the architecture long size */ +#define RCU_GP_CTR_BIT (1UL << (sizeof(long) << 2)) +#define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1) + /* - * returns urcu_parity. + * Global quiescent period counter with low-order bits unused. + * Using a int rather than a char to eliminate false register dependencies + * causing stalls on some architectures. */ -static inline int rcu_read_lock(void) +extern long urcu_gp_ctr; + +extern long __thread urcu_active_readers; + +static inline int rcu_old_gp_ongoing(long *value) +{ + long v; + + if (value == NULL) + return 0; + /* + * Make sure both tests below are done on the same version of *value + * to insure consistency. + */ + v = LOAD_SHARED(*value); + return (v & RCU_GP_CTR_NEST_MASK) && + ((v ^ urcu_gp_ctr) & RCU_GP_CTR_BIT); +} + +static inline void rcu_read_lock(void) { - int urcu_parity = get_urcu_qparity(); - urcu_active_readers[urcu_parity]++; + long tmp; + + tmp = urcu_active_readers; + /* urcu_gp_ctr = RCU_GP_COUNT | (~RCU_GP_CTR_BIT or RCU_GP_CTR_BIT) */ + /* + * The data dependency "read urcu_gp_ctr, write urcu_active_readers", + * serializes those two memory operations. The memory barrier in the + * signal handler ensures we receive the proper memory commit barriers + * required by _STORE_SHARED and _LOAD_SHARED whenever communication + * with the writer is needed. + */ + if (likely(!(tmp & RCU_GP_CTR_NEST_MASK))) + _STORE_SHARED(urcu_active_readers, _LOAD_SHARED(urcu_gp_ctr)); + else + _STORE_SHARED(urcu_active_readers, tmp + RCU_GP_COUNT); /* * Increment active readers count before accessing the pointer. * See force_mb_all_threads(). */ - barrier(); - return urcu_parity; + reader_barrier(); } -static inline void rcu_read_unlock(int urcu_parity) +static inline void rcu_read_unlock(void) { - barrier(); + reader_barrier(); /* * Finish using rcu before decrementing the pointer. * See force_mb_all_threads(). */ - urcu_active_readers[urcu_parity]--; + _STORE_SHARED(urcu_active_readers, urcu_active_readers - RCU_GP_COUNT); } -extern void rcu_write_lock(void); -extern void rcu_write_unlock(void); +/** + * rcu_assign_pointer - assign (publicize) a pointer to a newly + * initialized structure that will be dereferenced by RCU read-side + * critical sections. Returns the value assigned. + * + * Inserts memory barriers on architectures that require them + * (pretty much all of them other than x86), and also prevents + * the compiler from reordering the code that initializes the + * structure after the pointer assignment. More importantly, this + * call documents which pointers will be dereferenced by RCU read-side + * code. + */ + +#define rcu_assign_pointer(p, v) \ + ({ \ + if (!__builtin_constant_p(v) || \ + ((v) != NULL)) \ + wmb(); \ + STORE_SHARED(p, v); \ + }) + +#define rcu_xchg_pointer(p, v) \ + ({ \ + if (!__builtin_constant_p(v) || \ + ((v) != NULL)) \ + wmb(); \ + xchg(p, v); \ + }) -extern void *urcu_publish_content(void **ptr, void *new); +extern void synchronize_rcu(void); + +/* + * Exchanges the pointer and waits for quiescent state. + * The pointer returned can be freed. + */ +#define urcu_publish_content(p, v) \ + ({ \ + void *oldptr; \ + oldptr = rcu_xchg_pointer(p, v); \ + synchronize_rcu(); \ + oldptr; \ + }) /* * Reader thread registration. */ extern void urcu_register_thread(void); -extern void urcu_register_thread(void); +extern void urcu_unregister_thread(void); #endif /* _URCU_H */