Cleanup: urcu: remove unused membarrier "group" parameter
authorMathieu Desnoyers <mathieu.desnoyers@efficios.com>
Fri, 18 Sep 2015 20:33:29 +0000 (16:33 -0400)
committerMathieu Desnoyers <mathieu.desnoyers@efficios.com>
Fri, 18 Sep 2015 20:33:29 +0000 (16:33 -0400)
Was planned in the 2010 implementation, but has never been used, and is
not needed with the current kernel membarrier implementation.

Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
urcu.c
urcu/static/urcu.h

diff --git a/urcu.c b/urcu.c
index 4702ba9aea4c1bc55f180b658c28bc56c03dec96..1aa00fec9278929996fed4183ebe2acf40d5b61f 100644 (file)
--- a/urcu.c
+++ b/urcu.c
@@ -165,7 +165,7 @@ static void mutex_unlock(pthread_mutex_t *mutex)
 }
 
 #ifdef RCU_MEMBARRIER
 }
 
 #ifdef RCU_MEMBARRIER
-static void smp_mb_master(int group)
+static void smp_mb_master(void)
 {
        if (caa_likely(rcu_has_sys_membarrier))
                (void) membarrier(MEMBARRIER_CMD_SHARED, 0);
 {
        if (caa_likely(rcu_has_sys_membarrier))
                (void) membarrier(MEMBARRIER_CMD_SHARED, 0);
@@ -175,7 +175,7 @@ static void smp_mb_master(int group)
 #endif
 
 #ifdef RCU_MB
 #endif
 
 #ifdef RCU_MB
-static void smp_mb_master(int group)
+static void smp_mb_master(void)
 {
        cmm_smp_mb();
 }
 {
        cmm_smp_mb();
 }
@@ -224,7 +224,7 @@ static void force_mb_all_readers(void)
        cmm_smp_mb();   /* read ->need_mb before ending the barrier */
 }
 
        cmm_smp_mb();   /* read ->need_mb before ending the barrier */
 }
 
-static void smp_mb_master(int group)
+static void smp_mb_master(void)
 {
        force_mb_all_readers();
 }
 {
        force_mb_all_readers();
 }
@@ -236,7 +236,7 @@ static void smp_mb_master(int group)
 static void wait_gp(void)
 {
        /* Read reader_gp before read futex */
 static void wait_gp(void)
 {
        /* Read reader_gp before read futex */
-       smp_mb_master(RCU_MB_GROUP);
+       smp_mb_master();
        if (uatomic_read(&rcu_gp.futex) != -1)
                return;
        while (futex_async(&rcu_gp.futex, FUTEX_WAIT, -1,
        if (uatomic_read(&rcu_gp.futex) != -1)
                return;
        while (futex_async(&rcu_gp.futex, FUTEX_WAIT, -1,
@@ -280,7 +280,7 @@ static void wait_for_readers(struct cds_list_head *input_readers,
                if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS) {
                        uatomic_dec(&rcu_gp.futex);
                        /* Write futex before read reader_gp */
                if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS) {
                        uatomic_dec(&rcu_gp.futex);
                        /* Write futex before read reader_gp */
-                       smp_mb_master(RCU_MB_GROUP);
+                       smp_mb_master();
                }
 
                cds_list_for_each_entry_safe(index, tmp, input_readers, node) {
                }
 
                cds_list_for_each_entry_safe(index, tmp, input_readers, node) {
@@ -310,7 +310,7 @@ static void wait_for_readers(struct cds_list_head *input_readers,
                if (cds_list_empty(input_readers)) {
                        if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS) {
                                /* Read reader_gp before write futex */
                if (cds_list_empty(input_readers)) {
                        if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS) {
                                /* Read reader_gp before write futex */
-                               smp_mb_master(RCU_MB_GROUP);
+                               smp_mb_master();
                                uatomic_set(&rcu_gp.futex, 0);
                        }
                        break;
                                uatomic_set(&rcu_gp.futex, 0);
                        }
                        break;
@@ -333,13 +333,13 @@ static void wait_for_readers(struct cds_list_head *input_readers,
                if (cds_list_empty(input_readers)) {
                        if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS) {
                                /* Read reader_gp before write futex */
                if (cds_list_empty(input_readers)) {
                        if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS) {
                                /* Read reader_gp before write futex */
-                               smp_mb_master(RCU_MB_GROUP);
+                               smp_mb_master();
                                uatomic_set(&rcu_gp.futex, 0);
                        }
                        break;
                } else {
                        if (wait_gp_loops == KICK_READER_LOOPS) {
                                uatomic_set(&rcu_gp.futex, 0);
                        }
                        break;
                } else {
                        if (wait_gp_loops == KICK_READER_LOOPS) {
-                               smp_mb_master(RCU_MB_GROUP);
+                               smp_mb_master();
                                wait_gp_loops = 0;
                        }
                        /* Temporarily unlock the registry lock. */
                                wait_gp_loops = 0;
                        }
                        /* Temporarily unlock the registry lock. */
@@ -400,7 +400,7 @@ void synchronize_rcu(void)
         * because it iterates on reader threads.
         */
        /* Write new ptr before changing the qparity */
         * because it iterates on reader threads.
         */
        /* Write new ptr before changing the qparity */
-       smp_mb_master(RCU_MB_GROUP);
+       smp_mb_master();
 
        /*
         * Wait for readers to observe original parity or be quiescent.
 
        /*
         * Wait for readers to observe original parity or be quiescent.
@@ -461,7 +461,7 @@ void synchronize_rcu(void)
         * being freed. Must be done within rcu_registry_lock because it
         * iterates on reader threads.
         */
         * being freed. Must be done within rcu_registry_lock because it
         * iterates on reader threads.
         */
-       smp_mb_master(RCU_MB_GROUP);
+       smp_mb_master();
 out:
        mutex_unlock(&rcu_registry_lock);
        mutex_unlock(&rcu_gp_lock);
 out:
        mutex_unlock(&rcu_registry_lock);
        mutex_unlock(&rcu_gp_lock);
index 3fb457b4e3e00916b2a2e06e172b4eced08f0df0..92185d7629a72f2ceb70d109f24bd959e062529d 100644 (file)
@@ -81,9 +81,6 @@ enum rcu_state {
 };
 
 /*
 };
 
 /*
- * RCU memory barrier broadcast group. Currently, only broadcast to all process
- * threads is supported (group 0).
- *
  * Slave barriers are only guaranteed to be ordered wrt master barriers.
  *
  * The pair ordering is detailed as (O: ordered, X: not ordered) :
  * Slave barriers are only guaranteed to be ordered wrt master barriers.
  *
  * The pair ordering is detailed as (O: ordered, X: not ordered) :
@@ -92,13 +89,10 @@ enum rcu_state {
  *        master   O      O
  */
 
  *        master   O      O
  */
 
-#define MB_GROUP_ALL           0
-#define RCU_MB_GROUP           MB_GROUP_ALL
-
 #ifdef RCU_MEMBARRIER
 extern int rcu_has_sys_membarrier;
 
 #ifdef RCU_MEMBARRIER
 extern int rcu_has_sys_membarrier;
 
-static inline void smp_mb_slave(int group)
+static inline void smp_mb_slave(void)
 {
        if (caa_likely(rcu_has_sys_membarrier))
                cmm_barrier();
 {
        if (caa_likely(rcu_has_sys_membarrier))
                cmm_barrier();
@@ -108,14 +102,14 @@ static inline void smp_mb_slave(int group)
 #endif
 
 #ifdef RCU_MB
 #endif
 
 #ifdef RCU_MB
-static inline void smp_mb_slave(int group)
+static inline void smp_mb_slave(void)
 {
        cmm_smp_mb();
 }
 #endif
 
 #ifdef RCU_SIGNAL
 {
        cmm_smp_mb();
 }
 #endif
 
 #ifdef RCU_SIGNAL
-static inline void smp_mb_slave(int group)
+static inline void smp_mb_slave(void)
 {
        cmm_barrier();
 }
 {
        cmm_barrier();
 }
@@ -202,7 +196,7 @@ static inline void _rcu_read_lock_update(unsigned long tmp)
 {
        if (caa_likely(!(tmp & RCU_GP_CTR_NEST_MASK))) {
                _CMM_STORE_SHARED(URCU_TLS(rcu_reader).ctr, _CMM_LOAD_SHARED(rcu_gp.ctr));
 {
        if (caa_likely(!(tmp & RCU_GP_CTR_NEST_MASK))) {
                _CMM_STORE_SHARED(URCU_TLS(rcu_reader).ctr, _CMM_LOAD_SHARED(rcu_gp.ctr));
-               smp_mb_slave(RCU_MB_GROUP);
+               smp_mb_slave();
        } else
                _CMM_STORE_SHARED(URCU_TLS(rcu_reader).ctr, tmp + RCU_GP_COUNT);
 }
        } else
                _CMM_STORE_SHARED(URCU_TLS(rcu_reader).ctr, tmp + RCU_GP_COUNT);
 }
@@ -239,9 +233,9 @@ static inline void _rcu_read_lock(void)
 static inline void _rcu_read_unlock_update_and_wakeup(unsigned long tmp)
 {
        if (caa_likely((tmp & RCU_GP_CTR_NEST_MASK) == RCU_GP_COUNT)) {
 static inline void _rcu_read_unlock_update_and_wakeup(unsigned long tmp)
 {
        if (caa_likely((tmp & RCU_GP_CTR_NEST_MASK) == RCU_GP_COUNT)) {
-               smp_mb_slave(RCU_MB_GROUP);
+               smp_mb_slave();
                _CMM_STORE_SHARED(URCU_TLS(rcu_reader).ctr, tmp - RCU_GP_COUNT);
                _CMM_STORE_SHARED(URCU_TLS(rcu_reader).ctr, tmp - RCU_GP_COUNT);
-               smp_mb_slave(RCU_MB_GROUP);
+               smp_mb_slave();
                wake_up_gp();
        } else
                _CMM_STORE_SHARED(URCU_TLS(rcu_reader).ctr, tmp - RCU_GP_COUNT);
                wake_up_gp();
        } else
                _CMM_STORE_SHARED(URCU_TLS(rcu_reader).ctr, tmp - RCU_GP_COUNT);
This page took 0.027899 seconds and 4 git commands to generate.