makefile update
[urcu.git] / urcu-qsbr.c
index e077eac8f2b4b4bc7c44cadaf0f96d012be60fe3..f681c05bf0c05d96860e06cbb8771b11c9e3e829 100644 (file)
 /* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */
 #include "urcu-qsbr.h"
 
-pthread_mutex_t urcu_mutex = PTHREAD_MUTEX_INITIALIZER;
+static pthread_mutex_t urcu_mutex = PTHREAD_MUTEX_INITIALIZER;
 
 /*
  * Global grace period counter.
  */
-long urcu_gp_ctr = 0;
+unsigned long urcu_gp_ctr = RCU_GP_ONLINE;
 
 /*
  * Written to only by each individual reader. Read by both the reader and the
  * writers.
  */
-long __thread rcu_reader_qs_gp;
+unsigned long __thread rcu_reader_qs_gp;
 
 /* Thread IDs of registered readers */
 #define INIT_NUM_THREADS 4
 
 struct reader_registry {
        pthread_t tid;
-       long *rcu_reader_qs_gp;
+       unsigned long *rcu_reader_qs_gp;
 };
 
 #ifdef DEBUG_YIELD
@@ -99,18 +99,6 @@ static void internal_urcu_unlock(void)
        }
 }
 
-#ifdef HAS_INCOHERENT_CACHES
-static void force_mb_single_thread(struct reader_registry *index)
-{
-       smp_mb();
-}
-#endif /* #ifdef HAS_INCOHERENT_CACHES */
-
-static void force_mb_all_threads(void)
-{
-       smp_mb();
-}
-
 static void wait_for_quiescent_state(void)
 {
        struct reader_registry *index;
@@ -125,47 +113,122 @@ static void wait_for_quiescent_state(void)
                while (rcu_gp_ongoing(index->rcu_reader_qs_gp))
                        cpu_relax();
 #else /* #ifndef HAS_INCOHERENT_CACHES */
-               int wait_loops = 0;
-               /*
-                * BUSY-LOOP. Force the reader thread to commit its
-                * rcu_reader_qs_gp update to memory if we wait for too long.
-                */
-               while (rcu_gp_ongoing(index->rcu_reader_qs_gp)) {
-                       if (wait_loops++ == KICK_READER_LOOPS) {
-                               force_mb_single_thread(index);
-                               wait_loops = 0;
-                       } else {
-                               cpu_relax();
-                       }
-               }
+               while (rcu_gp_ongoing(index->rcu_reader_qs_gp))
+                       smp_mb();
 #endif /* #else #ifndef HAS_INCOHERENT_CACHES */
        }
 }
 
+/*
+ * Using a two-subphases algorithm for architectures with smaller than 64-bit
+ * long-size to ensure we do not encounter an overflow bug.
+ */
+
+#if (BITS_PER_LONG < 64)
+/*
+ * called with urcu_mutex held.
+ */
+static void switch_next_urcu_qparity(void)
+{
+       STORE_SHARED(urcu_gp_ctr, urcu_gp_ctr ^ RCU_GP_CTR);
+}
+
+void synchronize_rcu(void)
+{
+       unsigned long was_online;
+
+       was_online = rcu_reader_qs_gp;
+
+       /* All threads should read qparity before accessing data structure
+        * where new ptr points to.
+        */
+       /* Write new ptr before changing the qparity */
+       smp_mb();
+
+       /*
+        * Mark the writer thread offline to make sure we don't wait for
+        * our own quiescent state. This allows using synchronize_rcu() in
+        * threads registered as readers.
+        */
+       if (was_online)
+               STORE_SHARED(rcu_reader_qs_gp, 0);
+
+       internal_urcu_lock();
+
+       switch_next_urcu_qparity();     /* 0 -> 1 */
+
+       /*
+        * Must commit qparity update to memory before waiting for parity
+        * 0 quiescent state. Failure to do so could result in the writer
+        * waiting forever while new readers are always accessing data (no
+        * progress).
+        * Ensured by STORE_SHARED and LOAD_SHARED.
+        */
+
+       /*
+        * Wait for previous parity to be empty of readers.
+        */
+       wait_for_quiescent_state();     /* Wait readers in parity 0 */
+
+       /*
+        * Must finish waiting for quiescent state for parity 0 before
+        * committing qparity update to memory. Failure to do so could result in
+        * the writer waiting forever while new readers are always accessing
+        * data (no progress).
+        * Ensured by STORE_SHARED and LOAD_SHARED.
+        */
+
+       switch_next_urcu_qparity();     /* 1 -> 0 */
+
+       /*
+        * Must commit qparity update to memory before waiting for parity
+        * 1 quiescent state. Failure to do so could result in the writer
+        * waiting forever while new readers are always accessing data (no
+        * progress).
+        * Ensured by STORE_SHARED and LOAD_SHARED.
+        */
+
+       /*
+        * Wait for previous parity to be empty of readers.
+        */
+       wait_for_quiescent_state();     /* Wait readers in parity 1 */
+
+       internal_urcu_unlock();
+
+       /*
+        * Finish waiting for reader threads before letting the old ptr being
+        * freed.
+        */
+       if (was_online)
+               _STORE_SHARED(rcu_reader_qs_gp, LOAD_SHARED(urcu_gp_ctr));
+       smp_mb();
+}
+#else /* !(BITS_PER_LONG < 64) */
 void synchronize_rcu(void)
 {
-       int was_online;
+       unsigned long was_online;
 
-       was_online = rcu_reader_qs_gp & 1;
+       was_online = rcu_reader_qs_gp;
 
        /*
         * Mark the writer thread offline to make sure we don't wait for
         * our own quiescent state. This allows using synchronize_rcu() in
         * threads registered as readers.
         */
+       smp_mb();
        if (was_online)
-               _rcu_thread_offline();
+               STORE_SHARED(rcu_reader_qs_gp, 0);
 
        internal_urcu_lock();
-       force_mb_all_threads();
-       STORE_SHARED(urcu_gp_ctr, urcu_gp_ctr + 2);
+       STORE_SHARED(urcu_gp_ctr, urcu_gp_ctr + RCU_GP_CTR);
        wait_for_quiescent_state();
-       force_mb_all_threads();
        internal_urcu_unlock();
 
        if (was_online)
-               _rcu_thread_online();
+               _STORE_SHARED(rcu_reader_qs_gp, LOAD_SHARED(urcu_gp_ctr));
+       smp_mb();
 }
+#endif  /* !(BITS_PER_LONG < 64) */
 
 /*
  * library wrappers to be used by non-LGPL compatible source code.
This page took 0.024185 seconds and 4 git commands to generate.