X-Git-Url: http://git.liburcu.org/?p=urcu.git;a=blobdiff_plain;f=urcu.h;h=01a4c6857d7e6fd31668dec57615be03e94ccc91;hp=d417f34b52ddd1ce1509835dd0b7f062436d96f2;hb=1430ee0bdca4cb454d534ef7fc84af3e0692f26b;hpb=27b012e271a82b9a0d94543688904f207cd154ea diff --git a/urcu.h b/urcu.h index d417f34..01a4c68 100644 --- a/urcu.h +++ b/urcu.h @@ -1,6 +1,24 @@ #ifndef _URCU_H #define _URCU_H +/* + * urcu.h + * + * Userspace RCU header + * + * Copyright February 2009 - Mathieu Desnoyers + * + * Credits for Paul e. McKenney + * for inspiration coming from the Linux kernel RCU and rcu-preempt. + * + * The barrier, mb, rmb, wmb, atomic_inc, smp_read_barrier_depends, ACCESS_ONCE + * and rcu_dereference primitives come from the Linux kernel. + * + * Distributed under GPLv2 + */ + +#include + /* The "volatile" is due to gcc bugs */ #define barrier() __asm__ __volatile__("": : :"memory") @@ -9,53 +27,146 @@ #define rmb() asm volatile("lfence":::"memory") #define wmb() asm volatile("sfence" ::: "memory") - - -/* x86 32 */ static inline void atomic_inc(int *v) { asm volatile("lock; incl %0" - : "+m" (v->counter)); + : "+m" (*v)); } /* Nop everywhere except on alpha. */ #define smp_read_barrier_depends() +/* + * Prevent the compiler from merging or refetching accesses. The compiler + * is also forbidden from reordering successive instances of ACCESS_ONCE(), + * but only when the compiler is aware of some particular ordering. One way + * to make the compiler aware of ordering is to put the two invocations of + * ACCESS_ONCE() in different C statements. + * + * This macro does absolutely -nothing- to prevent the CPU from reordering, + * merging, or refetching absolutely anything at any time. Its main intended + * use is to mediate communication between process-level code and irq/NMI + * handlers, all running on the same CPU. + */ +#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) + +/** + * rcu_dereference - fetch an RCU-protected pointer in an + * RCU read-side critical section. This pointer may later + * be safely dereferenced. + * + * Inserts memory barriers on architectures that require them + * (currently only the Alpha), and, more importantly, documents + * exactly which pointers are protected by RCU. + */ + +#define rcu_dereference(p) ({ \ + typeof(p) _________p1 = ACCESS_ONCE(p); \ + smp_read_barrier_depends(); \ + (_________p1); \ + }) + #define SIGURCU SIGUSR1 -/* Global quiescent period parity */ -extern int urcu_qparity; +#ifdef DEBUG_YIELD +#include + +#define YIELD_READ (1 << 0) +#define YIELD_WRITE (1 << 1) -extern int __thread urcu_active_readers[2]; +extern unsigned int yield_active; +extern unsigned int __thread rand_yield; -static inline int get_urcu_qparity(void) +static inline void debug_yield_read(void) { - return urcu_qparity; + if (yield_active & YIELD_READ) + if (rand_r(&rand_yield) & 0x1) + sched_yield(); } +static inline void debug_yield_write(void) +{ + if (yield_active & YIELD_WRITE) + if (rand_r(&rand_yield) & 0x1) + sched_yield(); +} + +static inline void debug_yield_init(void) +{ + rand_yield = time(NULL) ^ pthread_self(); +} +#else +static inline void debug_yield_read(void) +{ +} + +static inline void debug_yield_write(void) +{ +} + +static inline void debug_yield_init(void) +{ + +} +#endif + /* - * returns urcu_parity. + * Limiting the nesting level to 256 to keep instructions small in the read + * fast-path. */ -static inline int rcu_read_lock(void) +#define RCU_GP_COUNT (1U << 0) +#define RCU_GP_CTR_BIT (1U << 8) +#define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1) + +/* Global quiescent period counter with low-order bits unused. */ +extern int urcu_gp_ctr; + +extern int __thread urcu_active_readers; + +static inline int rcu_old_gp_ongoing(int *value) { - int urcu_parity = get_urcu_qparity(); - urcu_active_readers[urcu_parity]++; + int v; + + if (value == NULL) + return 0; + debug_yield_write(); + v = ACCESS_ONCE(*value); + debug_yield_write(); + return (v & RCU_GP_CTR_NEST_MASK) && + ((v ^ ACCESS_ONCE(urcu_gp_ctr)) & RCU_GP_CTR_BIT); +} + +static inline void rcu_read_lock(void) +{ + int tmp; + + debug_yield_read(); + tmp = urcu_active_readers; + debug_yield_read(); + if (!(tmp & RCU_GP_CTR_NEST_MASK)) + urcu_active_readers = urcu_gp_ctr + RCU_GP_COUNT; + else + urcu_active_readers = tmp + RCU_GP_COUNT; + debug_yield_read(); /* * Increment active readers count before accessing the pointer. * See force_mb_all_threads(). */ barrier(); - return urcu_parity; + debug_yield_read(); } -static inline void rcu_read_unlock(int urcu_parity) +static inline void rcu_read_unlock(void) { + debug_yield_read(); barrier(); + debug_yield_read(); /* * Finish using rcu before decrementing the pointer. * See force_mb_all_threads(). */ - urcu_active_readers[urcu_parity]--; + urcu_active_readers -= RCU_GP_COUNT; + debug_yield_read(); } extern void *urcu_publish_content(void **ptr, void *new); @@ -64,6 +175,6 @@ extern void *urcu_publish_content(void **ptr, void *new); * Reader thread registration. */ extern void urcu_register_thread(void); -extern void urcu_register_thread(void); +extern void urcu_unregister_thread(void); #endif /* _URCU_H */