From: Mathieu Desnoyers Date: Fri, 18 Sep 2015 20:34:53 +0000 (-0400) Subject: urcu-bp: use sys_membarrier when available X-Git-Tag: v0.9.0~34 X-Git-Url: http://git.liburcu.org/?p=urcu.git;a=commitdiff_plain;h=f541831e757bc5e0143550f5d686bed0de7d41b7 urcu-bp: use sys_membarrier when available Signed-off-by: Mathieu Desnoyers --- diff --git a/urcu-bp.c b/urcu-bp.c index 4dc4028..6b78af8 100644 --- a/urcu-bp.c +++ b/urcu-bp.c @@ -94,11 +94,32 @@ void *mremap_wrapper(void *old_address, size_t old_size, static int rcu_bp_refcount; +/* + * RCU_MEMBARRIER is only possibly available on Linux. + */ +#ifdef __linux__ +#include +#endif + +/* If the headers do not support SYS_membarrier, fall back on RCU_MB */ +#ifdef SYS_membarrier +# define membarrier(...) syscall(SYS_membarrier, __VA_ARGS__) +#else +# define membarrier(...) -ENOSYS +#endif + +enum membarrier_cmd { + MEMBARRIER_CMD_QUERY = 0, + MEMBARRIER_CMD_SHARED = (1 << 0), +}; + static void __attribute__((constructor)) rcu_bp_init(void); static void __attribute__((destructor)) rcu_bp_exit(void); +int urcu_bp_has_sys_membarrier; + /* * rcu_gp_lock ensures mutual exclusion between threads calling * synchronize_rcu(). @@ -174,6 +195,14 @@ static void mutex_unlock(pthread_mutex_t *mutex) urcu_die(ret); } +static void smp_mb_master(void) +{ + if (caa_likely(urcu_bp_has_sys_membarrier)) + (void) membarrier(MEMBARRIER_CMD_SHARED, 0); + else + cmm_smp_mb(); +} + /* * Always called with rcu_registry lock held. Releases this lock between * iterations and grabs it again. Holds the lock when it returns. @@ -254,7 +283,7 @@ void synchronize_rcu(void) /* All threads should read qparity before accessing data structure * where new ptr points to. */ /* Write new ptr before changing the qparity */ - cmm_smp_mb(); + smp_mb_master(); /* * Wait for readers to observe original parity or be quiescent. @@ -303,7 +332,7 @@ void synchronize_rcu(void) * Finish waiting for reader threads before letting the old ptr being * freed. */ - cmm_smp_mb(); + smp_mb_master(); out: mutex_unlock(&rcu_registry_lock); mutex_unlock(&rcu_gp_lock); @@ -567,6 +596,10 @@ void rcu_bp_init(void) urcu_bp_thread_exit_notifier); if (ret) abort(); + ret = membarrier(MEMBARRIER_CMD_QUERY, 0); + if (ret >= 0 && (ret & MEMBARRIER_CMD_SHARED)) { + urcu_bp_has_sys_membarrier = 1; + } initialized = 1; } mutex_unlock(&init_lock); diff --git a/urcu/static/urcu-bp.h b/urcu/static/urcu-bp.h index df25393..182e5fe 100644 --- a/urcu/static/urcu-bp.h +++ b/urcu/static/urcu-bp.h @@ -102,6 +102,16 @@ struct rcu_reader { */ extern DECLARE_URCU_TLS(struct rcu_reader *, rcu_reader); +extern int urcu_bp_has_sys_membarrier; + +static inline void urcu_bp_smp_mb_slave(void) +{ + if (caa_likely(urcu_bp_has_sys_membarrier)) + cmm_barrier(); + else + cmm_smp_mb(); +} + static inline enum rcu_state rcu_reader_state(unsigned long *ctr) { unsigned long v; @@ -131,7 +141,7 @@ static inline void _rcu_read_lock_update(unsigned long tmp) { if (caa_likely(!(tmp & RCU_GP_CTR_NEST_MASK))) { _CMM_STORE_SHARED(URCU_TLS(rcu_reader)->ctr, _CMM_LOAD_SHARED(rcu_gp.ctr)); - cmm_smp_mb(); + urcu_bp_smp_mb_slave(); } else _CMM_STORE_SHARED(URCU_TLS(rcu_reader)->ctr, tmp + RCU_GP_COUNT); } @@ -170,7 +180,7 @@ static inline void _rcu_read_unlock(void) tmp = URCU_TLS(rcu_reader)->ctr; urcu_assert(tmp & RCU_GP_CTR_NEST_MASK); /* Finish using rcu before decrementing the pointer. */ - cmm_smp_mb(); + urcu_bp_smp_mb_slave(); _CMM_STORE_SHARED(URCU_TLS(rcu_reader)->ctr, tmp - RCU_GP_COUNT); cmm_barrier(); /* Ensure the compiler does not reorder us with mutex */ }