qsbr: only mark reader thread as being waited for in contended case
[userspace-rcu.git] / urcu-qsbr.c
index b42d7c4a42e3a4aa4e334ca457066327e969a85a..9b3210a6fde03412f401a68c54c5b073bdc357cd 100644 (file)
@@ -31,6 +31,7 @@
 #include <string.h>
 #include <errno.h>
 #include <poll.h>
+#include <unistd.h>
 
 #define BUILD_QSBR_LIB
 #include "urcu-qsbr-static.h"
@@ -48,14 +49,14 @@ unsigned long urcu_gp_ctr = RCU_GP_ONLINE;
  * Written to only by each individual reader. Read by both the reader and the
  * writers.
  */
-unsigned long __thread rcu_reader_qs_gp;
+struct urcu_reader_status __thread urcu_reader_status;
 
 /* Thread IDs of registered readers */
 #define INIT_NUM_THREADS 4
 
 struct reader_registry {
        pthread_t tid;
-       unsigned long *rcu_reader_qs_gp;
+       struct urcu_reader_status *urcu_reader_status;
 };
 
 #ifdef DEBUG_YIELD
@@ -109,14 +110,20 @@ static void wait_for_quiescent_state(void)
        if (!registry)
                return;
        /*
-        * Wait for each thread rcu_reader_qs_gp count to become 0.
+        * Wait for each thread rcu_reader qs_gp count to become 0.
         */
        for (index = registry; index < registry + num_readers; index++) {
                int wait_loops = 0;
 
-               while (rcu_gp_ongoing(index->rcu_reader_qs_gp)) {
+               if (likely(!rcu_gp_ongoing(&index->urcu_reader_status->qs_gp)))
+                       continue;
+       
+               index->urcu_reader_status->gp_waiting = 1;
+               while (rcu_gp_ongoing(&index->urcu_reader_status->qs_gp)) {
                        if (wait_loops++ == RCU_QS_ACTIVE_ATTEMPTS) {
-                               sched_yield();  /* ideally sched_yield_to() */
+                               /* adapted wait time, in us */
+                               usleep(LOAD_SHARED(index->urcu_reader_status->qs_time_delta_usec) / 4);
+                               wait_loops = 0;
                        } else {
 #ifndef HAS_INCOHERENT_CACHES
                                cpu_relax();
@@ -125,6 +132,7 @@ static void wait_for_quiescent_state(void)
 #endif /* #else #ifndef HAS_INCOHERENT_CACHES */
                        }
                }
+               index->urcu_reader_status->gp_waiting = 0;
        }
 }
 
@@ -146,7 +154,7 @@ void synchronize_rcu(void)
 {
        unsigned long was_online;
 
-       was_online = rcu_reader_qs_gp;
+       was_online = urcu_reader_status.qs_gp;
 
        /* All threads should read qparity before accessing data structure
         * where new ptr points to.
@@ -160,7 +168,7 @@ void synchronize_rcu(void)
         * threads registered as readers.
         */
        if (was_online)
-               STORE_SHARED(rcu_reader_qs_gp, 0);
+               STORE_SHARED(urcu_reader_status.qs_gp, 0);
 
        internal_urcu_lock();
 
@@ -213,7 +221,8 @@ void synchronize_rcu(void)
         * freed.
         */
        if (was_online)
-               _STORE_SHARED(rcu_reader_qs_gp, LOAD_SHARED(urcu_gp_ctr));
+               _STORE_SHARED(urcu_reader_status.qs_gp,
+                             LOAD_SHARED(urcu_gp_ctr));
        smp_mb();
 }
 #else /* !(BITS_PER_LONG < 64) */
@@ -221,7 +230,7 @@ void synchronize_rcu(void)
 {
        unsigned long was_online;
 
-       was_online = rcu_reader_qs_gp;
+       was_online = urcu_reader_status.qs_gp;
 
        /*
         * Mark the writer thread offline to make sure we don't wait for
@@ -230,7 +239,7 @@ void synchronize_rcu(void)
         */
        smp_mb();
        if (was_online)
-               STORE_SHARED(rcu_reader_qs_gp, 0);
+               STORE_SHARED(urcu_reader_status.qs_gp, 0);
 
        internal_urcu_lock();
        STORE_SHARED(urcu_gp_ctr, urcu_gp_ctr ^ RCU_GP_ONGOING);
@@ -240,7 +249,8 @@ void synchronize_rcu(void)
        internal_urcu_unlock();
 
        if (was_online)
-               _STORE_SHARED(rcu_reader_qs_gp, LOAD_SHARED(urcu_gp_ctr));
+               _STORE_SHARED(urcu_reader_status.qs_gp,
+                             LOAD_SHARED(urcu_gp_ctr));
        smp_mb();
 }
 #endif  /* !(BITS_PER_LONG < 64) */
@@ -327,7 +337,7 @@ static void rcu_add_reader(pthread_t id)
        }
        registry[num_readers].tid = id;
        /* reference to the TLS of _this_ reader thread. */
-       registry[num_readers].rcu_reader_qs_gp = &rcu_reader_qs_gp;
+       registry[num_readers].urcu_reader_status = &urcu_reader_status;
        num_readers++;
 }
 
@@ -345,7 +355,7 @@ static void rcu_remove_reader(pthread_t id)
                        memcpy(index, &registry[num_readers - 1],
                                sizeof(struct reader_registry));
                        registry[num_readers - 1].tid = 0;
-                       registry[num_readers - 1].rcu_reader_qs_gp = NULL;
+                       registry[num_readers - 1].urcu_reader_status = NULL;
                        num_readers--;
                        return;
                }
This page took 0.034706 seconds and 4 git commands to generate.