Add ACCESS_ONCE to _STORE_SHARED
[urcu.git] / urcu.c
diff --git a/urcu.c b/urcu.c
index b04b121344c5e0d425c0d2cfaee34a662ecb8cbf..337f7640b44d208855277f6fc53d9322f212d638 100644 (file)
--- a/urcu.c
+++ b/urcu.c
@@ -6,6 +6,8 @@
  * Copyright February 2009 - Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
  *
  * Distributed under GPLv2
+ *
+ * IBM's contributions to this file may be relicensed under LGPLv2 or later.
  */
 
 #include <stdio.h>
@@ -14,6 +16,8 @@
 #include <assert.h>
 #include <stdlib.h>
 #include <string.h>
+#include <errno.h>
+#include <poll.h>
 
 #include "urcu.h"
 
@@ -23,17 +27,23 @@ pthread_mutex_t urcu_mutex = PTHREAD_MUTEX_INITIALIZER;
  * Global grace period counter.
  * Contains the current RCU_GP_CTR_BIT.
  * Also has a RCU_GP_CTR_BIT of 1, to accelerate the reader fast path.
+ * Written to only by writer with mutex taken. Read by both writer and readers.
  */
 long urcu_gp_ctr = RCU_GP_COUNT;
 
+/*
+ * Written to only by each individual reader. Read by both the reader and the
+ * writers.
+ */
 long __thread urcu_active_readers;
 
 /* Thread IDs of registered readers */
 #define INIT_NUM_THREADS 4
 
-struct reader_data {
+struct reader_registry {
        pthread_t tid;
        long *urcu_active_readers;
+       char *need_mb;
 };
 
 #ifdef DEBUG_YIELD
@@ -41,20 +51,35 @@ unsigned int yield_active;
 unsigned int __thread rand_yield;
 #endif
 
-static struct reader_data *reader_data;
+static struct reader_registry *registry;
+static char __thread need_mb;
 static int num_readers, alloc_readers;
-#ifndef DEBUG_FULL_MB
-static int sig_done;
-#endif
 
 void internal_urcu_lock(void)
 {
        int ret;
+
+#ifndef DISTRUST_SIGNALS_EXTREME
        ret = pthread_mutex_lock(&urcu_mutex);
        if (ret) {
                perror("Error in pthread mutex lock");
                exit(-1);
        }
+#else /* #ifndef DISTRUST_SIGNALS_EXTREME */
+       while ((ret = pthread_mutex_trylock(&urcu_mutex)) != 0) {
+               if (ret != EBUSY && ret != EINTR) {
+                       printf("ret = %d, errno = %d\n", ret, errno);
+                       perror("Error in pthread mutex lock");
+                       exit(-1);
+               }
+               if (need_mb) {
+                       smp_mb();
+                       need_mb = 0;
+                       smp_mb();
+               }
+               poll(NULL,0,10);
+       }
+#endif /* #else #ifndef DISTRUST_SIGNALS_EXTREME */
 }
 
 void internal_urcu_unlock(void)
@@ -73,83 +98,104 @@ void internal_urcu_unlock(void)
  */
 static void switch_next_urcu_qparity(void)
 {
-       urcu_gp_ctr ^= RCU_GP_CTR_BIT;
+       STORE_SHARED(urcu_gp_ctr, urcu_gp_ctr ^ RCU_GP_CTR_BIT);
 }
 
 #ifdef DEBUG_FULL_MB
-static void force_mb_single_thread(pthread_t tid)
+#ifdef HAS_INCOHERENT_CACHES
+static void force_mb_single_thread(struct reader_registry *index)
 {
        smp_mb();
 }
+#endif /* #ifdef HAS_INCOHERENT_CACHES */
 
 static void force_mb_all_threads(void)
 {
        smp_mb();
 }
-#else
-
-static void force_mb_single_thread(pthread_t tid)
+#else /* #ifdef DEBUG_FULL_MB */
+#ifdef HAS_INCOHERENT_CACHES
+static void force_mb_single_thread(struct reader_registry *index)
 {
-       assert(reader_data);
-       sig_done = 0;
+       assert(registry);
        /*
         * pthread_kill has a smp_mb(). But beware, we assume it performs
         * a cache flush on architectures with non-coherent cache. Let's play
         * safe and don't assume anything : we use smp_mc() to make sure the
         * cache flush is enforced.
-        * smp_mb();    write sig_done before sending the signals
         */
-       smp_mc();       /* write sig_done before sending the signals */
-       pthread_kill(tid, SIGURCU);
+       *index->need_mb = 1;
+       smp_mc();       /* write ->need_mb before sending the signals */
+       pthread_kill(index->tid, SIGURCU);
+       smp_mb();
        /*
         * Wait for sighandler (and thus mb()) to execute on every thread.
         * BUSY-LOOP.
         */
-       while (LOAD_REMOTE(sig_done) < 1)
-               cpu_relax();
-       smp_mb();       /* read sig_done before ending the barrier */
+       while (*index->need_mb) {
+               poll(NULL, 0, 1);
+       }
+       smp_mb();       /* read ->need_mb before ending the barrier */
 }
+#endif /* #ifdef HAS_INCOHERENT_CACHES */
 
 static void force_mb_all_threads(void)
 {
-       struct reader_data *index;
+       struct reader_registry *index;
        /*
         * Ask for each threads to execute a smp_mb() so we can consider the
         * compiler barriers around rcu read lock as real memory barriers.
         */
-       if (!reader_data)
+       if (!registry)
                return;
-       sig_done = 0;
        /*
         * pthread_kill has a smp_mb(). But beware, we assume it performs
         * a cache flush on architectures with non-coherent cache. Let's play
         * safe and don't assume anything : we use smp_mc() to make sure the
         * cache flush is enforced.
-        * smp_mb();    write sig_done before sending the signals
         */
-       smp_mc();       /* write sig_done before sending the signals */
-       for (index = reader_data; index < reader_data + num_readers; index++)
+       for (index = registry; index < registry + num_readers; index++) {
+               *index->need_mb = 1;
+               smp_mc();       /* write need_mb before sending the signal */
                pthread_kill(index->tid, SIGURCU);
+       }
        /*
         * Wait for sighandler (and thus mb()) to execute on every thread.
-        * BUSY-LOOP.
+        *
+        * Note that the pthread_kill() will never be executed on systems
+        * that correctly deliver signals in a timely manner.  However, it
+        * is not uncommon for kernels to have bugs that can result in
+        * lost or unduly delayed signals.
+        *
+        * If you are seeing the below pthread_kill() executing much at
+        * all, we suggest testing the underlying kernel and filing the
+        * relevant bug report.  For Linux kernels, we recommend getting
+        * the Linux Test Project (LTP).
         */
-       while (LOAD_REMOTE(sig_done) < num_readers)
-               cpu_relax();
-       smp_mb();       /* read sig_done before ending the barrier */
+       for (index = registry; index < registry + num_readers; index++) {
+               while (*index->need_mb) {
+                       pthread_kill(index->tid, SIGURCU);
+                       poll(NULL, 0, 1);
+               }
+       }
+       smp_mb();       /* read ->need_mb before ending the barrier */
 }
-#endif
+#endif /* #else #ifdef DEBUG_FULL_MB */
 
 void wait_for_quiescent_state(void)
 {
-       struct reader_data *index;
+       struct reader_registry *index;
 
-       if (!reader_data)
+       if (!registry)
                return;
        /*
         * Wait for each thread urcu_active_readers count to become 0.
         */
-       for (index = reader_data; index < reader_data + num_readers; index++) {
+       for (index = registry; index < registry + num_readers; index++) {
+#ifndef HAS_INCOHERENT_CACHES
+               while (rcu_old_gp_ongoing(index->urcu_active_readers))
+                       cpu_relax();
+#else /* #ifndef HAS_INCOHERENT_CACHES */
                int wait_loops = 0;
                /*
                 * BUSY-LOOP. Force the reader thread to commit its
@@ -157,12 +203,13 @@ void wait_for_quiescent_state(void)
                 */
                while (rcu_old_gp_ongoing(index->urcu_active_readers)) {
                        if (wait_loops++ == KICK_READER_LOOPS) {
-                               force_mb_single_thread(index->tid);
+                               force_mb_single_thread(index);
                                wait_loops = 0;
                        } else {
                                cpu_relax();
                        }
                }
+#endif /* #else #ifndef HAS_INCOHERENT_CACHES */
        }
 }
 
@@ -183,8 +230,8 @@ void synchronize_rcu(void)
         * 0 quiescent state. Failure to do so could result in the writer
         * waiting forever while new readers are always accessing data (no
         * progress).
+        * Ensured by STORE_SHARED and LOAD_SHARED.
         */
-       smp_mc();
 
        /*
         * Wait for previous parity to be empty of readers.
@@ -196,8 +243,8 @@ void synchronize_rcu(void)
         * committing qparity update to memory. Failure to do so could result in
         * the writer waiting forever while new readers are always accessing
         * data (no progress).
+        * Ensured by STORE_SHARED and LOAD_SHARED.
         */
-       smp_mc();
 
        switch_next_urcu_qparity();     /* 1 -> 0 */
 
@@ -206,8 +253,8 @@ void synchronize_rcu(void)
         * 1 quiescent state. Failure to do so could result in the writer
         * waiting forever while new readers are always accessing data (no
         * progress).
+        * Ensured by STORE_SHARED and LOAD_SHARED.
         */
-       smp_mc();
 
        /*
         * Wait for previous parity to be empty of readers.
@@ -224,26 +271,27 @@ void synchronize_rcu(void)
 
 void urcu_add_reader(pthread_t id)
 {
-       struct reader_data *oldarray;
+       struct reader_registry *oldarray;
 
-       if (!reader_data) {
+       if (!registry) {
                alloc_readers = INIT_NUM_THREADS;
                num_readers = 0;
-               reader_data =
-                       malloc(sizeof(struct reader_data) * alloc_readers);
+               registry =
+                       malloc(sizeof(struct reader_registry) * alloc_readers);
        }
        if (alloc_readers < num_readers + 1) {
-               oldarray = reader_data;
-               reader_data = malloc(sizeof(struct reader_data)
+               oldarray = registry;
+               registry = malloc(sizeof(struct reader_registry)
                                * (alloc_readers << 1));
-               memcpy(reader_data, oldarray,
-                       sizeof(struct reader_data) * alloc_readers);
+               memcpy(registry, oldarray,
+                       sizeof(struct reader_registry) * alloc_readers);
                alloc_readers <<= 1;
                free(oldarray);
        }
-       reader_data[num_readers].tid = id;
+       registry[num_readers].tid = id;
        /* reference to the TLS of _this_ reader thread. */
-       reader_data[num_readers].urcu_active_readers = &urcu_active_readers;
+       registry[num_readers].urcu_active_readers = &urcu_active_readers;
+       registry[num_readers].need_mb = &need_mb;
        num_readers++;
 }
 
@@ -253,15 +301,15 @@ void urcu_add_reader(pthread_t id)
  */
 void urcu_remove_reader(pthread_t id)
 {
-       struct reader_data *index;
+       struct reader_registry *index;
 
-       assert(reader_data != NULL);
-       for (index = reader_data; index < reader_data + num_readers; index++) {
+       assert(registry != NULL);
+       for (index = registry; index < registry + num_readers; index++) {
                if (pthread_equal(index->tid, id)) {
-                       memcpy(index, &reader_data[num_readers - 1],
-                               sizeof(struct reader_data));
-                       reader_data[num_readers - 1].tid = 0;
-                       reader_data[num_readers - 1].urcu_active_readers = NULL;
+                       memcpy(index, &registry[num_readers - 1],
+                               sizeof(struct reader_registry));
+                       registry[num_readers - 1].tid = 0;
+                       registry[num_readers - 1].urcu_active_readers = NULL;
                        num_readers--;
                        return;
                }
@@ -293,7 +341,8 @@ void sigurcu_handler(int signo, siginfo_t *siginfo, void *context)
         * executed on.
         */
        smp_mb();
-       atomic_inc(&sig_done);
+       need_mb = 0;
+       smp_mb();
 }
 
 void __attribute__((constructor)) urcu_init(void)
@@ -320,6 +369,6 @@ void __attribute__((destructor)) urcu_exit(void)
                exit(-1);
        }
        assert(act.sa_sigaction == sigurcu_handler);
-       free(reader_data);
+       free(registry);
 }
-#endif
+#endif /* #ifndef DEBUG_FULL_MB */
This page took 0.026383 seconds and 4 git commands to generate.