X-Git-Url: https://git.liburcu.org/?a=blobdiff_plain;f=urcu.h;h=27695d495aeeb6388b121a099183949cbbdbac06;hb=e462817ecdce42b00f0cb0825e2425b08943105b;hp=3c5b178d1a8254adb37331fea6b61698a58c459c;hpb=5e7e64b952ac49cf61c4c3bfc02e5938c3691d94;p=urcu.git diff --git a/urcu.h b/urcu.h index 3c5b178..27695d4 100644 --- a/urcu.h +++ b/urcu.h @@ -17,6 +17,8 @@ * Distributed under GPLv2 */ +#include + /* The "volatile" is due to gcc bugs */ #define barrier() __asm__ __volatile__("": : :"memory") @@ -66,51 +68,130 @@ static inline void atomic_inc(int *v) #define SIGURCU SIGUSR1 -/* Global quiescent period parity */ -extern int urcu_qparity; +#ifdef DEBUG_YIELD +#include + +#define YIELD_READ (1 << 0) +#define YIELD_WRITE (1 << 1) + +extern unsigned int yield_active; +extern unsigned int __thread rand_yield; + +static inline void debug_yield_read(void) +{ + if (yield_active & YIELD_READ) + if (rand_r(&rand_yield) & 0x1) + sched_yield(); +} -extern int __thread urcu_active_readers[2]; +static inline void debug_yield_write(void) +{ + if (yield_active & YIELD_WRITE) + if (rand_r(&rand_yield) & 0x1) + sched_yield(); +} -static inline int get_urcu_qparity(void) +static inline void debug_yield_init(void) { - return urcu_qparity; + rand_yield = time(NULL) ^ pthread_self(); } +#else +static inline void debug_yield_read(void) +{ +} + +static inline void debug_yield_write(void) +{ +} + +static inline void debug_yield_init(void) +{ + +} +#endif /* - * returns urcu_parity. + * Limiting the nesting level to 256 to keep instructions small in the read + * fast-path. */ -static inline int rcu_read_lock(void) +#define RCU_GP_COUNT (1U << 0) +#define RCU_GP_CTR_BIT (1U << 8) +#define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1) + +/* Global quiescent period counter with low-order bits unused. */ +extern int urcu_gp_ctr; + +extern int __thread urcu_active_readers; + +static inline int rcu_old_gp_ongoing(int *value) +{ + int v; + + if (value == NULL) + return 0; + debug_yield_write(); + v = ACCESS_ONCE(*value); + debug_yield_write(); + return (v & RCU_GP_CTR_NEST_MASK) && + ((v ^ ACCESS_ONCE(urcu_gp_ctr)) & RCU_GP_CTR_BIT); +} + +static inline void rcu_read_lock(void) { - int urcu_parity = get_urcu_qparity(); - urcu_active_readers[urcu_parity]++; + int tmp; + + debug_yield_read(); + tmp = urcu_active_readers; + debug_yield_read(); + if (!(tmp & RCU_GP_CTR_NEST_MASK)) + urcu_active_readers = urcu_gp_ctr + RCU_GP_COUNT; + else + urcu_active_readers = tmp + RCU_GP_COUNT; + debug_yield_read(); /* * Increment active readers count before accessing the pointer. * See force_mb_all_threads(). */ barrier(); - return urcu_parity; + debug_yield_read(); } -static inline void rcu_read_unlock(int urcu_parity) +static inline void rcu_read_unlock(void) { + debug_yield_read(); barrier(); + debug_yield_read(); /* * Finish using rcu before decrementing the pointer. * See force_mb_all_threads(). */ - urcu_active_readers[urcu_parity]--; + urcu_active_readers -= RCU_GP_COUNT; + debug_yield_read(); } -extern void rcu_write_lock(void); -extern void rcu_write_unlock(void); +/** + * rcu_assign_pointer - assign (publicize) a pointer to a newly + * initialized structure that will be dereferenced by RCU read-side + * critical sections. Returns the value assigned. + * + * Inserts memory barriers on architectures that require them + * (pretty much all of them other than x86), and also prevents + * the compiler from reordering the code that initializes the + * structure after the pointer assignment. More importantly, this + * call documents which pointers will be dereferenced by RCU read-side + * code. + */ -extern void *_urcu_publish_content(void **ptr, void *new); +#define rcu_assign_pointer(p, v) \ + ({ \ + if (!__builtin_constant_p(v) || \ + ((v) != NULL)) \ + wmb(); \ + (p) = (v); \ + }) -/* - * gcc does not like automatic &struct ... * -> void **. - * Remove the warning. (hopefully this is ok) - */ -#define urcu_publish_content(ptr, new) _urcu_publish_content((void **)ptr, new) +extern void *urcu_publish_content(void **ptr, void *new); +extern void synchronize_rcu(void); /* * Reader thread registration.