X-Git-Url: http://git.liburcu.org/?p=urcu.git;a=blobdiff_plain;f=urcu-static.h;h=d46613196f5d471b21e584a85c0eaa7f60154e18;hp=2e7371ef5000194c2d302803c7ba4a359a64c7d3;hb=9d7e3f89772f08cca26d727f47d44ecd47c94401;hpb=48d848c7291ec22da6258ac03cd9c1dafec1fdfb diff --git a/urcu-static.h b/urcu-static.h index 2e7371e..d466131 100644 --- a/urcu-static.h +++ b/urcu-static.h @@ -39,10 +39,11 @@ #include #include #include +#include -#define futex(...) syscall(__NR_futex, __VA_ARGS__) -#define FUTEX_WAIT 0 -#define FUTEX_WAKE 1 +#ifdef __cplusplus +extern "C" { +#endif /* * This code section can only be included in LGPL 2.1 compatible source code. @@ -54,15 +55,15 @@ /* * The signal number used by the RCU library can be overridden with - * -DSIGURCU= when compiling the library. + * -DSIGRCU= when compiling the library. */ -#ifndef SIGURCU -#define SIGURCU SIGUSR1 +#ifndef SIGRCU +#define SIGRCU SIGUSR1 #endif /* * If a reader is really non-cooperative and refuses to commit its - * urcu_active_readers count to memory (there is no barrier in the reader + * rcu_active_readers count to memory (there is no barrier in the reader * per-se), kick it after a few loops waiting for it. */ #define KICK_READER_LOOPS 10000 @@ -88,10 +89,10 @@ #define YIELD_WRITE (1 << 1) /* - * Updates without URCU_MB are much slower. Account this in + * Updates without RCU_MB are much slower. Account this in * the delay. */ -#ifdef URCU_MB +#ifdef RCU_MB /* maximum sleep delay, in us */ #define MAX_SLEEP 50 #else @@ -134,35 +135,35 @@ static inline void debug_yield_init(void) } #endif -#ifdef URCU_MB -static inline void reader_barrier() +#ifdef RCU_MB +static inline void smp_mb_light() { smp_mb(); } #else -static inline void reader_barrier() +static inline void smp_mb_light() { barrier(); } #endif /* - * The trick here is that RCU_GP_CTR_BIT must be a multiple of 8 so we can use a - * full 8-bits, 16-bits or 32-bits bitmask for the lower order bits. + * The trick here is that RCU_GP_CTR_PHASE must be a multiple of 8 so we can use + * a full 8-bits, 16-bits or 32-bits bitmask for the lower order bits. */ #define RCU_GP_COUNT (1UL << 0) /* Use the amount of bits equal to half of the architecture long size */ -#define RCU_GP_CTR_BIT (1UL << (sizeof(long) << 2)) -#define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1) +#define RCU_GP_CTR_PHASE (1UL << (sizeof(long) << 2)) +#define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_PHASE - 1) /* * Global quiescent period counter with low-order bits unused. * Using a int rather than a char to eliminate false register dependencies * causing stalls on some architectures. */ -extern long urcu_gp_ctr; +extern long rcu_gp_ctr; -struct urcu_reader { +struct rcu_reader { /* Data used by both reader and synchronize_rcu() */ long ctr; char need_mb; @@ -171,7 +172,7 @@ struct urcu_reader { pthread_t tid; }; -extern struct urcu_reader __thread urcu_reader; +extern struct rcu_reader __thread rcu_reader; extern int gp_futex; @@ -182,7 +183,7 @@ static inline void wake_up_gp(void) { if (unlikely(uatomic_read(&gp_futex) == -1)) { uatomic_set(&gp_futex, 0); - futex(&gp_futex, FUTEX_WAKE, 1, + futex_async(&gp_futex, FUTEX_WAKE, 1, NULL, NULL, 0); } } @@ -199,24 +200,27 @@ static inline int rcu_old_gp_ongoing(long *value) */ v = LOAD_SHARED(*value); return (v & RCU_GP_CTR_NEST_MASK) && - ((v ^ urcu_gp_ctr) & RCU_GP_CTR_BIT); + ((v ^ rcu_gp_ctr) & RCU_GP_CTR_PHASE); } static inline void _rcu_read_lock(void) { long tmp; - tmp = urcu_reader.ctr; - /* urcu_gp_ctr = RCU_GP_COUNT | (~RCU_GP_CTR_BIT or RCU_GP_CTR_BIT) */ + tmp = rcu_reader.ctr; + /* + * rcu_gp_ctr is + * RCU_GP_COUNT | (~RCU_GP_CTR_PHASE or RCU_GP_CTR_PHASE) + */ if (likely(!(tmp & RCU_GP_CTR_NEST_MASK))) { - _STORE_SHARED(urcu_reader.ctr, _LOAD_SHARED(urcu_gp_ctr)); + _STORE_SHARED(rcu_reader.ctr, _LOAD_SHARED(rcu_gp_ctr)); /* * Set active readers count for outermost nesting level before - * accessing the pointer. See force_mb_all_threads(). + * accessing the pointer. See smp_mb_heavy(). */ - reader_barrier(); + smp_mb_light(); } else { - _STORE_SHARED(urcu_reader.ctr, tmp + RCU_GP_COUNT); + _STORE_SHARED(rcu_reader.ctr, tmp + RCU_GP_COUNT); } } @@ -224,20 +228,24 @@ static inline void _rcu_read_unlock(void) { long tmp; - tmp = urcu_reader.ctr; + tmp = rcu_reader.ctr; /* * Finish using rcu before decrementing the pointer. - * See force_mb_all_threads(). + * See smp_mb_heavy(). */ if (likely((tmp & RCU_GP_CTR_NEST_MASK) == RCU_GP_COUNT)) { - reader_barrier(); - _STORE_SHARED(urcu_reader.ctr, urcu_reader.ctr - RCU_GP_COUNT); - /* write urcu_reader.ctr before read futex */ - reader_barrier(); + smp_mb_light(); + _STORE_SHARED(rcu_reader.ctr, rcu_reader.ctr - RCU_GP_COUNT); + /* write rcu_reader.ctr before read futex */ + smp_mb_light(); wake_up_gp(); } else { - _STORE_SHARED(urcu_reader.ctr, urcu_reader.ctr - RCU_GP_COUNT); + _STORE_SHARED(rcu_reader.ctr, rcu_reader.ctr - RCU_GP_COUNT); } } +#ifdef __cplusplus +} +#endif + #endif /* _URCU_STATIC_H */