urcu (mb/signal): list move
[urcu.git] / urcu.c
diff --git a/urcu.c b/urcu.c
index d4a06844885cde3da2d03092e3e9c45a939e0438..e70980f5b93e259f5506e3fb7e1b602708aa63b3 100644 (file)
--- a/urcu.c
+++ b/urcu.c
@@ -3,9 +3,24 @@
  *
  * Userspace RCU library
  *
- * Copyright February 2009 - Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
+ * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
+ * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
  *
- * Distributed under GPLv2
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * IBM's contributions to this file may be relicensed under LGPLv2 or later.
  */
 
 #include <stdio.h>
 #include <assert.h>
 #include <stdlib.h>
 #include <string.h>
+#include <errno.h>
+#include <poll.h>
 
+#include "urcu-static.h"
+/* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */
 #include "urcu.h"
 
-pthread_mutex_t urcu_mutex = PTHREAD_MUTEX_INITIALIZER;
+#ifndef URCU_MB
+static int init_done;
+
+void __attribute__((constructor)) urcu_init(void);
+void __attribute__((destructor)) urcu_exit(void);
+#else
+void urcu_init(void)
+{
+}
+#endif
+
+static pthread_mutex_t urcu_mutex = PTHREAD_MUTEX_INITIALIZER;
+
+int gp_futex;
 
 /*
  * Global grace period counter.
  * Contains the current RCU_GP_CTR_BIT.
- * Also has a RCU_GP_CTR_BIT of 1, to accelerate the reader fast path.
+ * Also has a RCU_GP_COUNT of 1, to accelerate the reader fast path.
+ * Written to only by writer with mutex taken. Read by both writer and readers.
  */
 long urcu_gp_ctr = RCU_GP_COUNT;
 
-long __thread urcu_active_readers;
-
-/* Thread IDs of registered readers */
-#define INIT_NUM_THREADS 4
-
-struct reader_data {
-       pthread_t tid;
-       long *urcu_active_readers;
-};
+/*
+ * Written to only by each individual reader. Read by both the reader and the
+ * writers.
+ */
+struct urcu_reader __thread urcu_reader;
 
 #ifdef DEBUG_YIELD
 unsigned int yield_active;
 unsigned int __thread rand_yield;
 #endif
 
-static struct reader_data *reader_data;
-static int num_readers, alloc_readers;
-static int sig_done;
+static LIST_HEAD(registry);
 
-void internal_urcu_lock(void)
+static void internal_urcu_lock(void)
 {
        int ret;
+
+#ifndef DISTRUST_SIGNALS_EXTREME
        ret = pthread_mutex_lock(&urcu_mutex);
        if (ret) {
                perror("Error in pthread mutex lock");
                exit(-1);
        }
+#else /* #ifndef DISTRUST_SIGNALS_EXTREME */
+       while ((ret = pthread_mutex_trylock(&urcu_mutex)) != 0) {
+               if (ret != EBUSY && ret != EINTR) {
+                       printf("ret = %d, errno = %d\n", ret, errno);
+                       perror("Error in pthread mutex lock");
+                       exit(-1);
+               }
+               if (urcu_reader.need_mb) {
+                       smp_mb();
+                       urcu_reader.need_mb = 0;
+                       smp_mb();
+               }
+               poll(NULL,0,10);
+       }
+#endif /* #else #ifndef DISTRUST_SIGNALS_EXTREME */
 }
 
-void internal_urcu_unlock(void)
+static void internal_urcu_unlock(void)
 {
        int ret;
 
@@ -71,164 +115,347 @@ void internal_urcu_unlock(void)
  */
 static void switch_next_urcu_qparity(void)
 {
-       urcu_gp_ctr ^= RCU_GP_CTR_BIT;
+       STORE_SHARED(urcu_gp_ctr, urcu_gp_ctr ^ RCU_GP_CTR_BIT);
+}
+
+#ifdef URCU_MB
+#if 0 /* unused */
+static void force_mb_single_thread(struct urcu_reader *index)
+{
+       smp_mb();
 }
+#endif //0
 
 static void force_mb_all_threads(void)
 {
-       struct reader_data *index;
+       smp_mb();
+}
+#else /* #ifdef URCU_MB */
+#if 0 /* unused */
+static void force_mb_single_thread(struct urcu_reader *index)
+{
+       assert(!list_empty(&registry));
        /*
-        * Ask for each threads to execute a mb() so we can consider the
+        * pthread_kill has a smp_mb(). But beware, we assume it performs
+        * a cache flush on architectures with non-coherent cache. Let's play
+        * safe and don't assume anything : we use smp_mc() to make sure the
+        * cache flush is enforced.
+        */
+       index->need_mb = 1;
+       smp_mc();       /* write ->need_mb before sending the signals */
+       pthread_kill(index->tid, SIGURCU);
+       smp_mb();
+       /*
+        * Wait for sighandler (and thus mb()) to execute on every thread.
+        * BUSY-LOOP.
+        */
+       while (index->need_mb) {
+               poll(NULL, 0, 1);
+       }
+       smp_mb();       /* read ->need_mb before ending the barrier */
+}
+#endif //0
+
+static void force_mb_all_threads(void)
+{
+       struct urcu_reader *index;
+
+       /*
+        * Ask for each threads to execute a smp_mb() so we can consider the
         * compiler barriers around rcu read lock as real memory barriers.
         */
-       if (!reader_data)
+       if (list_empty(&registry))
                return;
-       debug_yield_write();
-       sig_done = 0;
-       debug_yield_write();
-       mb();   /* write sig_done before sending the signals */
-       debug_yield_write();
-       for (index = reader_data; index < reader_data + num_readers; index++) {
+       /*
+        * pthread_kill has a smp_mb(). But beware, we assume it performs
+        * a cache flush on architectures with non-coherent cache. Let's play
+        * safe and don't assume anything : we use smp_mc() to make sure the
+        * cache flush is enforced.
+        */
+       list_for_each_entry(index, &registry, head) {
+               index->need_mb = 1;
+               smp_mc();       /* write need_mb before sending the signal */
                pthread_kill(index->tid, SIGURCU);
-               debug_yield_write();
        }
        /*
         * Wait for sighandler (and thus mb()) to execute on every thread.
-        * BUSY-LOOP.
+        *
+        * Note that the pthread_kill() will never be executed on systems
+        * that correctly deliver signals in a timely manner.  However, it
+        * is not uncommon for kernels to have bugs that can result in
+        * lost or unduly delayed signals.
+        *
+        * If you are seeing the below pthread_kill() executing much at
+        * all, we suggest testing the underlying kernel and filing the
+        * relevant bug report.  For Linux kernels, we recommend getting
+        * the Linux Test Project (LTP).
         */
-       while (sig_done < num_readers)
-               barrier();
-       debug_yield_write();
-       mb();   /* read sig_done before ending the barrier */
-       debug_yield_write();
+       list_for_each_entry(index, &registry, head) {
+               while (index->need_mb) {
+                       pthread_kill(index->tid, SIGURCU);
+                       poll(NULL, 0, 1);
+               }
+       }
+       smp_mb();       /* read ->need_mb before ending the barrier */
+}
+#endif /* #else #ifdef URCU_MB */
+
+/*
+ * synchronize_rcu() waiting. Single thread.
+ */
+static void wait_gp(void)
+{
+       /* Read reader_gp before read futex */
+       force_mb_all_threads();
+       if (uatomic_read(&gp_futex) == -1)
+               futex(&gp_futex, FUTEX_WAIT, -1,
+                     NULL, NULL, 0);
 }
 
 void wait_for_quiescent_state(void)
 {
-       struct reader_data *index;
+       LIST_HEAD(qsreaders);
+       int wait_loops = 0;
+       struct urcu_reader *index;
 
-       if (!reader_data)
+       if (list_empty(&registry))
                return;
-       /* Wait for each thread urcu_active_readers count to become 0.
+       /*
+        * Wait for each thread urcu_reader.ctr count to become 0.
         */
-       for (index = reader_data; index < reader_data + num_readers; index++) {
+       for (;;) {
+               wait_loops++;
+               if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS) {
+                       uatomic_dec(&gp_futex);
+                       /* Write futex before read reader_gp */
+                       force_mb_all_threads();
+               }
+
+               list_for_each_entry(index, &registry, head) {
+                       if (!rcu_old_gp_ongoing(&index->ctr))
+                               list_move(&index->head, &qsreaders);
+               }
+
+#ifndef HAS_INCOHERENT_CACHES
+               if (list_empty(&registry)) {
+                       if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS) {
+                               /* Read reader_gp before write futex */
+                               force_mb_all_threads();
+                               uatomic_set(&gp_futex, 0);
+                       }
+                       break;
+               } else {
+                       if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS)
+                               wait_gp();
+                       else
+                               cpu_relax();
+               }
+#else /* #ifndef HAS_INCOHERENT_CACHES */
                /*
-                * BUSY-LOOP.
+                * BUSY-LOOP. Force the reader thread to commit its
+                * urcu_reader.ctr update to memory if we wait for too long.
                 */
-               while (rcu_old_gp_ongoing(index->urcu_active_readers))
-                       barrier();
+               if (list_empty(&registry)) {
+                       if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS) {
+                               /* Read reader_gp before write futex */
+                               force_mb_all_threads();
+                               uatomic_set(&gp_futex, 0);
+                       }
+                       break;
+               } else {
+                       switch (wait_loops) {
+                       case RCU_QS_ACTIVE_ATTEMPTS:
+                               wait_gp();
+                               break; /* only escape switch */
+                       case KICK_READER_LOOPS:
+                               force_mb_all_threads();
+                               wait_loops = 0;
+                               break; /* only escape switch */
+                       default:
+                               cpu_relax();
+                       }
+               }
+#endif /* #else #ifndef HAS_INCOHERENT_CACHES */
        }
-       /*
-        * Locally : read *index->urcu_active_readers before freeing old
-        * pointer.
-        * Remote (reader threads) : Order urcu_qparity update and other
-        * thread's quiescent state counter read.
-        */
-       force_mb_all_threads();
+       /* put back the reader list in the registry */
+       list_move(&qsreaders, &registry);
 }
 
-static void switch_qparity(void)
+void synchronize_rcu(void)
 {
-       /* All threads should read qparity before accessing data structure. */
-       /* Write ptr before changing the qparity */
+       internal_urcu_lock();
+
+       /* All threads should read qparity before accessing data structure
+        * where new ptr points to. Must be done within internal_urcu_lock
+        * because it iterates on reader threads.*/
+       /* Write new ptr before changing the qparity */
        force_mb_all_threads();
-       debug_yield_write();
-       switch_next_urcu_qparity();
-       debug_yield_write();
+
+       switch_next_urcu_qparity();     /* 0 -> 1 */
+
+       /*
+        * Must commit qparity update to memory before waiting for parity
+        * 0 quiescent state. Failure to do so could result in the writer
+        * waiting forever while new readers are always accessing data (no
+        * progress).
+        * Ensured by STORE_SHARED and LOAD_SHARED.
+        */
+
+       /*
+        * Adding a smp_mb() which is _not_ formally required, but makes the
+        * model easier to understand. It does not have a big performance impact
+        * anyway, given this is the write-side.
+        */
+       smp_mb();
+
+       /*
+        * Wait for previous parity to be empty of readers.
+        */
+       wait_for_quiescent_state();     /* Wait readers in parity 0 */
+
+       /*
+        * Must finish waiting for quiescent state for parity 0 before
+        * committing qparity update to memory. Failure to do so could result in
+        * the writer waiting forever while new readers are always accessing
+        * data (no progress).
+        * Ensured by STORE_SHARED and LOAD_SHARED.
+        */
+
+       /*
+        * Adding a smp_mb() which is _not_ formally required, but makes the
+        * model easier to understand. It does not have a big performance impact
+        * anyway, given this is the write-side.
+        */
+       smp_mb();
+
+       switch_next_urcu_qparity();     /* 1 -> 0 */
+
+       /*
+        * Must commit qparity update to memory before waiting for parity
+        * 1 quiescent state. Failure to do so could result in the writer
+        * waiting forever while new readers are always accessing data (no
+        * progress).
+        * Ensured by STORE_SHARED and LOAD_SHARED.
+        */
+
+       /*
+        * Adding a smp_mb() which is _not_ formally required, but makes the
+        * model easier to understand. It does not have a big performance impact
+        * anyway, given this is the write-side.
+        */
+       smp_mb();
 
        /*
         * Wait for previous parity to be empty of readers.
         */
-       wait_for_quiescent_state();
+       wait_for_quiescent_state();     /* Wait readers in parity 1 */
+
+       /* Finish waiting for reader threads before letting the old ptr being
+        * freed. Must be done within internal_urcu_lock because it iterates on
+        * reader threads. */
+       force_mb_all_threads();
+
+       internal_urcu_unlock();
 }
 
-void synchronize_rcu(void)
+/*
+ * library wrappers to be used by non-LGPL compatible source code.
+ */
+
+void rcu_read_lock(void)
 {
-       debug_yield_write();
-       internal_urcu_lock();
-       debug_yield_write();
-       switch_qparity();
-       debug_yield_write();
-       switch_qparity();
-       debug_yield_write();
-       internal_urcu_unlock();
-       debug_yield_write();
+       _rcu_read_lock();
 }
 
-void urcu_add_reader(pthread_t id)
+void rcu_read_unlock(void)
 {
-       struct reader_data *oldarray;
+       _rcu_read_unlock();
+}
 
-       if (!reader_data) {
-               alloc_readers = INIT_NUM_THREADS;
-               num_readers = 0;
-               reader_data =
-                       malloc(sizeof(struct reader_data) * alloc_readers);
-       }
-       if (alloc_readers < num_readers + 1) {
-               oldarray = reader_data;
-               reader_data = malloc(sizeof(struct reader_data)
-                               * (alloc_readers << 1));
-               memcpy(reader_data, oldarray,
-                       sizeof(struct reader_data) * alloc_readers);
-               alloc_readers <<= 1;
-               free(oldarray);
-       }
-       reader_data[num_readers].tid = id;
-       /* reference to the TLS of _this_ reader thread. */
-       reader_data[num_readers].urcu_active_readers = &urcu_active_readers;
-       num_readers++;
+void *rcu_dereference(void *p)
+{
+       return _rcu_dereference(p);
 }
 
-/*
- * Never shrink (implementation limitation).
- * This is O(nb threads). Eventually use a hash table.
- */
-void urcu_remove_reader(pthread_t id)
-{
-       struct reader_data *index;
-
-       assert(reader_data != NULL);
-       for (index = reader_data; index < reader_data + num_readers; index++) {
-               if (pthread_equal(index->tid, id)) {
-                       memcpy(index, &reader_data[num_readers - 1],
-                               sizeof(struct reader_data));
-                       reader_data[num_readers - 1].tid = 0;
-                       reader_data[num_readers - 1].urcu_active_readers = NULL;
-                       num_readers--;
-                       return;
-               }
-       }
-       /* Hrm not found, forgot to register ? */
-       assert(0);
+void *rcu_assign_pointer_sym(void **p, void *v)
+{
+       wmb();
+       return STORE_SHARED(p, v);
+}
+
+void *rcu_xchg_pointer_sym(void **p, void *v)
+{
+       wmb();
+       return uatomic_xchg(p, v);
 }
 
-void urcu_register_thread(void)
+void *rcu_cmpxchg_pointer_sym(void **p, void *old, void *_new)
 {
+       wmb();
+       return uatomic_cmpxchg(p, old, _new);
+}
+
+void *rcu_publish_content_sym(void **p, void *v)
+{
+       void *oldptr;
+
+       oldptr = _rcu_xchg_pointer(p, v);
+       synchronize_rcu();
+       return oldptr;
+}
+
+void rcu_register_thread(void)
+{
+       urcu_reader.tid = pthread_self();
+       assert(urcu_reader.need_mb == 0);
+       assert(urcu_reader.ctr == 0);
+
        internal_urcu_lock();
-       urcu_add_reader(pthread_self());
+       urcu_init();    /* In case gcc does not support constructor attribute */
+       list_add(&urcu_reader.head, &registry);
        internal_urcu_unlock();
 }
 
-void urcu_unregister_thread(void)
+void rcu_unregister_thread(void)
 {
        internal_urcu_lock();
-       urcu_remove_reader(pthread_self());
+       list_del(&urcu_reader.head);
        internal_urcu_unlock();
 }
 
-void sigurcu_handler(int signo, siginfo_t *siginfo, void *context)
+#ifndef URCU_MB
+static void sigurcu_handler(int signo, siginfo_t *siginfo, void *context)
 {
-       mb();
-       atomic_inc(&sig_done);
+       /*
+        * Executing this smp_mb() is the only purpose of this signal handler.
+        * It punctually promotes barrier() into smp_mb() on every thread it is
+        * executed on.
+        */
+       smp_mb();
+       urcu_reader.need_mb = 0;
+       smp_mb();
 }
 
-void __attribute__((constructor)) urcu_init(void)
+/*
+ * urcu_init constructor. Called when the library is linked, but also when
+ * reader threads are calling rcu_register_thread().
+ * Should only be called by a single thread at a given time. This is ensured by
+ * holing the internal_urcu_lock() from rcu_register_thread() or by running at
+ * library load time, which should not be executed by multiple threads nor
+ * concurrently with rcu_register_thread() anyway.
+ */
+void urcu_init(void)
 {
        struct sigaction act;
        int ret;
 
+       if (init_done)
+               return;
+       init_done = 1;
+
        act.sa_sigaction = sigurcu_handler;
+       act.sa_flags = SA_SIGINFO | SA_RESTART;
+       sigemptyset(&act.sa_mask);
        ret = sigaction(SIGURCU, &act, NULL);
        if (ret) {
                perror("Error in sigaction");
@@ -236,7 +463,7 @@ void __attribute__((constructor)) urcu_init(void)
        }
 }
 
-void __attribute__((destructor)) urcu_exit(void)
+void urcu_exit(void)
 {
        struct sigaction act;
        int ret;
@@ -247,5 +474,6 @@ void __attribute__((destructor)) urcu_exit(void)
                exit(-1);
        }
        assert(act.sa_sigaction == sigurcu_handler);
-       free(reader_data);
+       assert(list_empty(&registry));
 }
+#endif /* #ifndef URCU_MB */
This page took 0.028649 seconds and 4 git commands to generate.