X-Git-Url: http://git.liburcu.org/?p=urcu.git;a=blobdiff_plain;f=urcu-static.h;h=d46613196f5d471b21e584a85c0eaa7f60154e18;hp=97d94dc0b8817b5a87e83fafb3de9d3c6bd011b1;hb=9d7e3f89772f08cca26d727f47d44ecd47c94401;hpb=0854ccff976f2893a7f04aaa4b48090f423802a5 diff --git a/urcu-static.h b/urcu-static.h index 97d94dc..d466131 100644 --- a/urcu-static.h +++ b/urcu-static.h @@ -41,6 +41,10 @@ #include #include +#ifdef __cplusplus +extern "C" { +#endif + /* * This code section can only be included in LGPL 2.1 compatible source code. * See below for the function call wrappers which can be used in code meant to @@ -51,15 +55,15 @@ /* * The signal number used by the RCU library can be overridden with - * -DSIGURCU= when compiling the library. + * -DSIGRCU= when compiling the library. */ -#ifndef SIGURCU -#define SIGURCU SIGUSR1 +#ifndef SIGRCU +#define SIGRCU SIGUSR1 #endif /* * If a reader is really non-cooperative and refuses to commit its - * urcu_active_readers count to memory (there is no barrier in the reader + * rcu_active_readers count to memory (there is no barrier in the reader * per-se), kick it after a few loops waiting for it. */ #define KICK_READER_LOOPS 10000 @@ -85,10 +89,10 @@ #define YIELD_WRITE (1 << 1) /* - * Updates without URCU_MB are much slower. Account this in + * Updates without RCU_MB are much slower. Account this in * the delay. */ -#ifdef URCU_MB +#ifdef RCU_MB /* maximum sleep delay, in us */ #define MAX_SLEEP 50 #else @@ -131,35 +135,35 @@ static inline void debug_yield_init(void) } #endif -#ifdef URCU_MB -static inline void reader_barrier() +#ifdef RCU_MB +static inline void smp_mb_light() { smp_mb(); } #else -static inline void reader_barrier() +static inline void smp_mb_light() { barrier(); } #endif /* - * The trick here is that RCU_GP_CTR_BIT must be a multiple of 8 so we can use a - * full 8-bits, 16-bits or 32-bits bitmask for the lower order bits. + * The trick here is that RCU_GP_CTR_PHASE must be a multiple of 8 so we can use + * a full 8-bits, 16-bits or 32-bits bitmask for the lower order bits. */ #define RCU_GP_COUNT (1UL << 0) /* Use the amount of bits equal to half of the architecture long size */ -#define RCU_GP_CTR_BIT (1UL << (sizeof(long) << 2)) -#define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1) +#define RCU_GP_CTR_PHASE (1UL << (sizeof(long) << 2)) +#define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_PHASE - 1) /* * Global quiescent period counter with low-order bits unused. * Using a int rather than a char to eliminate false register dependencies * causing stalls on some architectures. */ -extern long urcu_gp_ctr; +extern long rcu_gp_ctr; -struct urcu_reader { +struct rcu_reader { /* Data used by both reader and synchronize_rcu() */ long ctr; char need_mb; @@ -168,7 +172,7 @@ struct urcu_reader { pthread_t tid; }; -extern struct urcu_reader __thread urcu_reader; +extern struct rcu_reader __thread rcu_reader; extern int gp_futex; @@ -196,24 +200,27 @@ static inline int rcu_old_gp_ongoing(long *value) */ v = LOAD_SHARED(*value); return (v & RCU_GP_CTR_NEST_MASK) && - ((v ^ urcu_gp_ctr) & RCU_GP_CTR_BIT); + ((v ^ rcu_gp_ctr) & RCU_GP_CTR_PHASE); } static inline void _rcu_read_lock(void) { long tmp; - tmp = urcu_reader.ctr; - /* urcu_gp_ctr = RCU_GP_COUNT | (~RCU_GP_CTR_BIT or RCU_GP_CTR_BIT) */ + tmp = rcu_reader.ctr; + /* + * rcu_gp_ctr is + * RCU_GP_COUNT | (~RCU_GP_CTR_PHASE or RCU_GP_CTR_PHASE) + */ if (likely(!(tmp & RCU_GP_CTR_NEST_MASK))) { - _STORE_SHARED(urcu_reader.ctr, _LOAD_SHARED(urcu_gp_ctr)); + _STORE_SHARED(rcu_reader.ctr, _LOAD_SHARED(rcu_gp_ctr)); /* * Set active readers count for outermost nesting level before - * accessing the pointer. See force_mb_all_threads(). + * accessing the pointer. See smp_mb_heavy(). */ - reader_barrier(); + smp_mb_light(); } else { - _STORE_SHARED(urcu_reader.ctr, tmp + RCU_GP_COUNT); + _STORE_SHARED(rcu_reader.ctr, tmp + RCU_GP_COUNT); } } @@ -221,20 +228,24 @@ static inline void _rcu_read_unlock(void) { long tmp; - tmp = urcu_reader.ctr; + tmp = rcu_reader.ctr; /* * Finish using rcu before decrementing the pointer. - * See force_mb_all_threads(). + * See smp_mb_heavy(). */ if (likely((tmp & RCU_GP_CTR_NEST_MASK) == RCU_GP_COUNT)) { - reader_barrier(); - _STORE_SHARED(urcu_reader.ctr, urcu_reader.ctr - RCU_GP_COUNT); - /* write urcu_reader.ctr before read futex */ - reader_barrier(); + smp_mb_light(); + _STORE_SHARED(rcu_reader.ctr, rcu_reader.ctr - RCU_GP_COUNT); + /* write rcu_reader.ctr before read futex */ + smp_mb_light(); wake_up_gp(); } else { - _STORE_SHARED(urcu_reader.ctr, urcu_reader.ctr - RCU_GP_COUNT); + _STORE_SHARED(rcu_reader.ctr, rcu_reader.ctr - RCU_GP_COUNT); } } +#ifdef __cplusplus +} +#endif + #endif /* _URCU_STATIC_H */