urcu signal-based renames
authorMathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
Wed, 13 Jan 2010 16:59:00 +0000 (11:59 -0500)
committerMathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
Wed, 13 Jan 2010 16:59:00 +0000 (11:59 -0500)
reader_barrier renamed to smp_mb_light
force_mb_all_readers renamed to smp_mb_heavy

Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
urcu-static.h
urcu.c

index 0708df7c344038682595a39b3eb0fe977200d859..d46613196f5d471b21e584a85c0eaa7f60154e18 100644 (file)
@@ -136,12 +136,12 @@ static inline void debug_yield_init(void)
 #endif
 
 #ifdef RCU_MB
-static inline void reader_barrier()
+static inline void smp_mb_light()
 {
        smp_mb();
 }
 #else
-static inline void reader_barrier()
+static inline void smp_mb_light()
 {
        barrier();
 }
@@ -216,9 +216,9 @@ static inline void _rcu_read_lock(void)
                _STORE_SHARED(rcu_reader.ctr, _LOAD_SHARED(rcu_gp_ctr));
                /*
                 * Set active readers count for outermost nesting level before
-                * accessing the pointer. See force_mb_all_readers().
+                * accessing the pointer. See smp_mb_heavy().
                 */
-               reader_barrier();
+               smp_mb_light();
        } else {
                _STORE_SHARED(rcu_reader.ctr, tmp + RCU_GP_COUNT);
        }
@@ -231,13 +231,13 @@ static inline void _rcu_read_unlock(void)
        tmp = rcu_reader.ctr;
        /*
         * Finish using rcu before decrementing the pointer.
-        * See force_mb_all_readers().
+        * See smp_mb_heavy().
         */
        if (likely((tmp & RCU_GP_CTR_NEST_MASK) == RCU_GP_COUNT)) {
-               reader_barrier();
+               smp_mb_light();
                _STORE_SHARED(rcu_reader.ctr, rcu_reader.ctr - RCU_GP_COUNT);
                /* write rcu_reader.ctr before read futex */
-               reader_barrier();
+               smp_mb_light();
                wake_up_gp();
        } else {
                _STORE_SHARED(rcu_reader.ctr, rcu_reader.ctr - RCU_GP_COUNT);
diff --git a/urcu.c b/urcu.c
index 3132e59277bf782f9e2e03cb3b0f862981b80a2a..53dfbd5286e4ea5a02044fe23a17425fe738cc4f 100644 (file)
--- a/urcu.c
+++ b/urcu.c
@@ -119,43 +119,11 @@ static void switch_next_rcu_qparity(void)
 }
 
 #ifdef RCU_MB
-#if 0 /* unused */
-static void force_mb_single_thread(struct rcu_reader *index)
+static void smp_mb_heavy()
 {
        smp_mb();
 }
-#endif //0
-
-static void force_mb_all_readers(void)
-{
-       smp_mb();
-}
-#else /* #ifdef RCU_MB */
-#if 0 /* unused */
-static void force_mb_single_thread(struct rcu_reader *index)
-{
-       assert(!list_empty(&registry));
-       /*
-        * pthread_kill has a smp_mb(). But beware, we assume it performs
-        * a cache flush on architectures with non-coherent cache. Let's play
-        * safe and don't assume anything : we use smp_mc() to make sure the
-        * cache flush is enforced.
-        */
-       index->need_mb = 1;
-       smp_mc();       /* write ->need_mb before sending the signals */
-       pthread_kill(index->tid, SIGRCU);
-       smp_mb();
-       /*
-        * Wait for sighandler (and thus mb()) to execute on every thread.
-        * BUSY-LOOP.
-        */
-       while (index->need_mb) {
-               poll(NULL, 0, 1);
-       }
-       smp_mb();       /* read ->need_mb before ending the barrier */
-}
-#endif //0
-
+#else
 static void force_mb_all_readers(void)
 {
        struct rcu_reader *index;
@@ -198,6 +166,11 @@ static void force_mb_all_readers(void)
        }
        smp_mb();       /* read ->need_mb before ending the barrier */
 }
+
+static void smp_mb_heavy()
+{
+       force_mb_all_readers();
+}
 #endif /* #else #ifdef RCU_MB */
 
 /*
@@ -206,7 +179,7 @@ static void force_mb_all_readers(void)
 static void wait_gp(void)
 {
        /* Read reader_gp before read futex */
-       force_mb_all_readers();
+       smp_mb_heavy();
        if (uatomic_read(&gp_futex) == -1)
                futex_async(&gp_futex, FUTEX_WAIT, -1,
                      NULL, NULL, 0);
@@ -228,7 +201,7 @@ void wait_for_quiescent_state(void)
                if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS) {
                        uatomic_dec(&gp_futex);
                        /* Write futex before read reader_gp */
-                       force_mb_all_readers();
+                       smp_mb_heavy();
                }
 
                list_for_each_entry_safe(index, tmp, &registry, head) {
@@ -240,7 +213,7 @@ void wait_for_quiescent_state(void)
                if (list_empty(&registry)) {
                        if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS) {
                                /* Read reader_gp before write futex */
-                               force_mb_all_readers();
+                               smp_mb_heavy();
                                uatomic_set(&gp_futex, 0);
                        }
                        break;
@@ -258,7 +231,7 @@ void wait_for_quiescent_state(void)
                if (list_empty(&registry)) {
                        if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS) {
                                /* Read reader_gp before write futex */
-                               force_mb_all_readers();
+                               smp_mb_heavy();
                                uatomic_set(&gp_futex, 0);
                        }
                        break;
@@ -268,7 +241,7 @@ void wait_for_quiescent_state(void)
                                wait_gp();
                                break; /* only escape switch */
                        case KICK_READER_LOOPS:
-                               force_mb_all_readers();
+                               smp_mb_heavy();
                                wait_loops = 0;
                                break; /* only escape switch */
                        default:
@@ -289,7 +262,7 @@ void synchronize_rcu(void)
         * where new ptr points to. Must be done within internal_rcu_lock
         * because it iterates on reader threads.*/
        /* Write new ptr before changing the qparity */
-       force_mb_all_readers();
+       smp_mb_heavy();
 
        switch_next_rcu_qparity();      /* 0 -> 1 */
 
@@ -353,7 +326,7 @@ void synchronize_rcu(void)
        /* Finish waiting for reader threads before letting the old ptr being
         * freed. Must be done within internal_rcu_lock because it iterates on
         * reader threads. */
-       force_mb_all_readers();
+       smp_mb_heavy();
 
        internal_rcu_unlock();
 }
This page took 0.02762 seconds and 4 git commands to generate.