X-Git-Url: https://git.liburcu.org/?p=urcu.git;a=blobdiff_plain;f=urcu%2Fstatic%2Furcu.h;h=9082af71a92c443dd0efe8be74d879c55dd56ddd;hp=c44c137278301a9b02e5b0c13693968c95e75d03;hb=67ecffc0f530a7b5c4dd5111ea7dd3213da8eb91;hpb=343c8b13592134185263cd6e30a615c8c4e35b6d diff --git a/urcu/static/urcu.h b/urcu/static/urcu.h index c44c137..9082af7 100644 --- a/urcu/static/urcu.h +++ b/urcu/static/urcu.h @@ -46,7 +46,7 @@ #ifdef __cplusplus extern "C" { -#endif +#endif /* Default is RCU_MEMBARRIER */ #if !defined(RCU_MEMBARRIER) && !defined(RCU_MB) && !defined(RCU_SIGNAL) @@ -81,9 +81,6 @@ enum rcu_state { }; /* - * RCU memory barrier broadcast group. Currently, only broadcast to all process - * threads is supported (group 0). - * * Slave barriers are only guaranteed to be ordered wrt master barriers. * * The pair ordering is detailed as (O: ordered, X: not ordered) : @@ -92,13 +89,10 @@ enum rcu_state { * master O O */ -#define MB_GROUP_ALL 0 -#define RCU_MB_GROUP MB_GROUP_ALL - #ifdef RCU_MEMBARRIER extern int rcu_has_sys_membarrier; -static inline void smp_mb_slave(int group) +static inline void smp_mb_slave(void) { if (caa_likely(rcu_has_sys_membarrier)) cmm_barrier(); @@ -108,14 +102,14 @@ static inline void smp_mb_slave(int group) #endif #ifdef RCU_MB -static inline void smp_mb_slave(int group) +static inline void smp_mb_slave(void) { cmm_smp_mb(); } #endif #ifdef RCU_SIGNAL -static inline void smp_mb_slave(int group) +static inline void smp_mb_slave(void) { cmm_barrier(); } @@ -202,7 +196,7 @@ static inline void _rcu_read_lock_update(unsigned long tmp) { if (caa_likely(!(tmp & RCU_GP_CTR_NEST_MASK))) { _CMM_STORE_SHARED(URCU_TLS(rcu_reader).ctr, _CMM_LOAD_SHARED(rcu_gp.ctr)); - smp_mb_slave(RCU_MB_GROUP); + smp_mb_slave(); } else _CMM_STORE_SHARED(URCU_TLS(rcu_reader).ctr, tmp + RCU_GP_COUNT); } @@ -224,6 +218,7 @@ static inline void _rcu_read_lock(void) urcu_assert(URCU_TLS(rcu_reader).registered); cmm_barrier(); tmp = URCU_TLS(rcu_reader).ctr; + urcu_assert((tmp & RCU_GP_CTR_NEST_MASK) != RCU_GP_CTR_NEST_MASK); _rcu_read_lock_update(tmp); } @@ -238,9 +233,9 @@ static inline void _rcu_read_lock(void) static inline void _rcu_read_unlock_update_and_wakeup(unsigned long tmp) { if (caa_likely((tmp & RCU_GP_CTR_NEST_MASK) == RCU_GP_COUNT)) { - smp_mb_slave(RCU_MB_GROUP); + smp_mb_slave(); _CMM_STORE_SHARED(URCU_TLS(rcu_reader).ctr, tmp - RCU_GP_COUNT); - smp_mb_slave(RCU_MB_GROUP); + smp_mb_slave(); wake_up_gp(); } else _CMM_STORE_SHARED(URCU_TLS(rcu_reader).ctr, tmp - RCU_GP_COUNT);