X-Git-Url: https://git.liburcu.org/?a=blobdiff_plain;f=urcu.h;h=27695d495aeeb6388b121a099183949cbbdbac06;hb=e462817ecdce42b00f0cb0825e2425b08943105b;hp=b6b5c7b039e359225d947afb91a29a2f25ba6a3c;hpb=c265818b951224bcde407b1efa14f9daf44949b3;p=urcu.git diff --git a/urcu.h b/urcu.h index b6b5c7b..27695d4 100644 --- a/urcu.h +++ b/urcu.h @@ -17,6 +17,8 @@ * Distributed under GPLv2 */ +#include + /* The "volatile" is due to gcc bugs */ #define barrier() __asm__ __volatile__("": : :"memory") @@ -66,41 +68,130 @@ static inline void atomic_inc(int *v) #define SIGURCU SIGUSR1 -/* Global quiescent period parity */ -extern int urcu_qparity; +#ifdef DEBUG_YIELD +#include + +#define YIELD_READ (1 << 0) +#define YIELD_WRITE (1 << 1) + +extern unsigned int yield_active; +extern unsigned int __thread rand_yield; + +static inline void debug_yield_read(void) +{ + if (yield_active & YIELD_READ) + if (rand_r(&rand_yield) & 0x1) + sched_yield(); +} -extern int __thread urcu_active_readers[2]; +static inline void debug_yield_write(void) +{ + if (yield_active & YIELD_WRITE) + if (rand_r(&rand_yield) & 0x1) + sched_yield(); +} -static inline int get_urcu_qparity(void) +static inline void debug_yield_init(void) { - return urcu_qparity; + rand_yield = time(NULL) ^ pthread_self(); } +#else +static inline void debug_yield_read(void) +{ +} + +static inline void debug_yield_write(void) +{ +} + +static inline void debug_yield_init(void) +{ + +} +#endif /* - * urcu_parity should be declared on the caller's stack. + * Limiting the nesting level to 256 to keep instructions small in the read + * fast-path. */ -static inline void rcu_read_lock(int *urcu_parity) +#define RCU_GP_COUNT (1U << 0) +#define RCU_GP_CTR_BIT (1U << 8) +#define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1) + +/* Global quiescent period counter with low-order bits unused. */ +extern int urcu_gp_ctr; + +extern int __thread urcu_active_readers; + +static inline int rcu_old_gp_ongoing(int *value) +{ + int v; + + if (value == NULL) + return 0; + debug_yield_write(); + v = ACCESS_ONCE(*value); + debug_yield_write(); + return (v & RCU_GP_CTR_NEST_MASK) && + ((v ^ ACCESS_ONCE(urcu_gp_ctr)) & RCU_GP_CTR_BIT); +} + +static inline void rcu_read_lock(void) { - *urcu_parity = get_urcu_qparity(); - urcu_active_readers[*urcu_parity]++; + int tmp; + + debug_yield_read(); + tmp = urcu_active_readers; + debug_yield_read(); + if (!(tmp & RCU_GP_CTR_NEST_MASK)) + urcu_active_readers = urcu_gp_ctr + RCU_GP_COUNT; + else + urcu_active_readers = tmp + RCU_GP_COUNT; + debug_yield_read(); /* * Increment active readers count before accessing the pointer. * See force_mb_all_threads(). */ barrier(); + debug_yield_read(); } -static inline void rcu_read_unlock(int *urcu_parity) +static inline void rcu_read_unlock(void) { + debug_yield_read(); barrier(); + debug_yield_read(); /* * Finish using rcu before decrementing the pointer. * See force_mb_all_threads(). */ - urcu_active_readers[*urcu_parity]--; + urcu_active_readers -= RCU_GP_COUNT; + debug_yield_read(); } +/** + * rcu_assign_pointer - assign (publicize) a pointer to a newly + * initialized structure that will be dereferenced by RCU read-side + * critical sections. Returns the value assigned. + * + * Inserts memory barriers on architectures that require them + * (pretty much all of them other than x86), and also prevents + * the compiler from reordering the code that initializes the + * structure after the pointer assignment. More importantly, this + * call documents which pointers will be dereferenced by RCU read-side + * code. + */ + +#define rcu_assign_pointer(p, v) \ + ({ \ + if (!__builtin_constant_p(v) || \ + ((v) != NULL)) \ + wmb(); \ + (p) = (v); \ + }) + extern void *urcu_publish_content(void **ptr, void *new); +extern void synchronize_rcu(void); /* * Reader thread registration.