*
* Userspace RCU library
*
- * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
+ * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
* Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
*
* This library is free software; you can redistribute it and/or
*/
#define _BSD_SOURCE
+#define _GNU_SOURCE
+#define _LGPL_SOURCE
#include <stdio.h>
#include <pthread.h>
#include <signal.h>
#include <assert.h>
#include <stdlib.h>
+#include <stdint.h>
#include <string.h>
#include <errno.h>
#include <poll.h>
-#include "urcu-static.h"
+#include "urcu/wfcqueue.h"
+#include "urcu/map/urcu.h"
+#include "urcu/static/urcu.h"
+#include "urcu-pointer.h"
+#include "urcu/tls-compat.h"
+
+#include "urcu-die.h"
+#include "urcu-wait.h"
+
/* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */
+#undef _LGPL_SOURCE
#include "urcu.h"
+#define _LGPL_SOURCE
+
+/*
+ * If a reader is really non-cooperative and refuses to commit its
+ * rcu_active_readers count to memory (there is no barrier in the reader
+ * per-se), kick it after a few loops waiting for it.
+ */
+#define KICK_READER_LOOPS 10000
+
+/*
+ * Active attempts to check for reader Q.S. before calling futex().
+ */
+#define RCU_QS_ACTIVE_ATTEMPTS 100
#ifdef RCU_MEMBARRIER
static int init_done;
-int has_sys_membarrier;
+int rcu_has_sys_membarrier;
void __attribute__((constructor)) rcu_init(void);
#endif
static pthread_mutex_t rcu_gp_lock = PTHREAD_MUTEX_INITIALIZER;
-int gp_futex;
+int32_t rcu_gp_futex;
/*
* Global grace period counter.
* Written to only by each individual reader. Read by both the reader and the
* writers.
*/
-struct rcu_reader __thread rcu_reader;
+DEFINE_URCU_TLS(struct rcu_reader, rcu_reader);
#ifdef DEBUG_YIELD
-unsigned int yield_active;
-unsigned int __thread rand_yield;
+unsigned int rcu_yield_active;
+DEFINE_URCU_TLS(unsigned int, rcu_rand_yield);
#endif
-static LIST_HEAD(registry);
+static CDS_LIST_HEAD(registry);
+
+/*
+ * Queue keeping threads awaiting to wait for a grace period. Contains
+ * struct gp_waiters_thread objects.
+ */
+static DEFINE_URCU_WAIT_QUEUE(gp_waiters);
static void mutex_lock(pthread_mutex_t *mutex)
{
#ifndef DISTRUST_SIGNALS_EXTREME
ret = pthread_mutex_lock(mutex);
- if (ret) {
- perror("Error in pthread mutex lock");
- exit(-1);
- }
+ if (ret)
+ urcu_die(ret);
#else /* #ifndef DISTRUST_SIGNALS_EXTREME */
while ((ret = pthread_mutex_trylock(mutex)) != 0) {
- if (ret != EBUSY && ret != EINTR) {
- printf("ret = %d, errno = %d\n", ret, errno);
- perror("Error in pthread mutex lock");
- exit(-1);
- }
- if (LOAD_SHARED(rcu_reader.need_mb)) {
- smp_mb();
- _STORE_SHARED(rcu_reader.need_mb, 0);
- smp_mb();
+ if (ret != EBUSY && ret != EINTR)
+ urcu_die(ret);
+ if (CMM_LOAD_SHARED(URCU_TLS(rcu_reader).need_mb)) {
+ cmm_smp_mb();
+ _CMM_STORE_SHARED(URCU_TLS(rcu_reader).need_mb, 0);
+ cmm_smp_mb();
}
poll(NULL,0,10);
}
int ret;
ret = pthread_mutex_unlock(mutex);
- if (ret) {
- perror("Error in pthread mutex unlock");
- exit(-1);
- }
+ if (ret)
+ urcu_die(ret);
}
#ifdef RCU_MEMBARRIER
static void smp_mb_master(int group)
{
- if (likely(has_sys_membarrier))
+ if (caa_likely(rcu_has_sys_membarrier))
membarrier(MEMBARRIER_EXPEDITED);
else
- smp_mb();
+ cmm_smp_mb();
}
#endif
#ifdef RCU_MB
static void smp_mb_master(int group)
{
- smp_mb();
+ cmm_smp_mb();
}
#endif
struct rcu_reader *index;
/*
- * Ask for each threads to execute a smp_mb() so we can consider the
+ * Ask for each threads to execute a cmm_smp_mb() so we can consider the
* compiler barriers around rcu read lock as real memory barriers.
*/
- if (list_empty(®istry))
+ if (cds_list_empty(®istry))
return;
/*
- * pthread_kill has a smp_mb(). But beware, we assume it performs
+ * pthread_kill has a cmm_smp_mb(). But beware, we assume it performs
* a cache flush on architectures with non-coherent cache. Let's play
- * safe and don't assume anything : we use smp_mc() to make sure the
+ * safe and don't assume anything : we use cmm_smp_mc() to make sure the
* cache flush is enforced.
*/
- list_for_each_entry(index, ®istry, node) {
- STORE_SHARED(index->need_mb, 1);
+ cds_list_for_each_entry(index, ®istry, node) {
+ CMM_STORE_SHARED(index->need_mb, 1);
pthread_kill(index->tid, SIGRCU);
}
/*
* relevant bug report. For Linux kernels, we recommend getting
* the Linux Test Project (LTP).
*/
- list_for_each_entry(index, ®istry, node) {
- while (LOAD_SHARED(index->need_mb)) {
+ cds_list_for_each_entry(index, ®istry, node) {
+ while (CMM_LOAD_SHARED(index->need_mb)) {
pthread_kill(index->tid, SIGRCU);
poll(NULL, 0, 1);
}
}
- smp_mb(); /* read ->need_mb before ending the barrier */
+ cmm_smp_mb(); /* read ->need_mb before ending the barrier */
}
static void smp_mb_master(int group)
{
/* Read reader_gp before read futex */
smp_mb_master(RCU_MB_GROUP);
- if (uatomic_read(&gp_futex) == -1)
- futex_async(&gp_futex, FUTEX_WAIT, -1,
+ if (uatomic_read(&rcu_gp_futex) == -1)
+ futex_async(&rcu_gp_futex, FUTEX_WAIT, -1,
NULL, NULL, 0);
}
-void update_counter_and_wait(void)
+static void wait_for_readers(struct cds_list_head *input_readers,
+ struct cds_list_head *cur_snap_readers,
+ struct cds_list_head *qsreaders)
{
- LIST_HEAD(qsreaders);
int wait_loops = 0;
struct rcu_reader *index, *tmp;
- /* Switch parity: 0 -> 1, 1 -> 0 */
- STORE_SHARED(rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR_PHASE);
-
- /*
- * Must commit rcu_gp_ctr update to memory before waiting for quiescent
- * state. Failure to do so could result in the writer waiting forever
- * while new readers are always accessing data (no progress). Enforce
- * compiler-order of store to rcu_gp_ctr before load rcu_reader ctr.
- */
- barrier();
-
/*
- *
- * Adding a smp_mb() which is _not_ formally required, but makes the
- * model easier to understand. It does not have a big performance impact
- * anyway, given this is the write-side.
- */
- smp_mb();
-
- /*
- * Wait for each thread rcu_reader.ctr count to become 0.
+ * Wait for each thread URCU_TLS(rcu_reader).ctr to either
+ * indicate quiescence (not nested), or observe the current
+ * rcu_gp_ctr value.
*/
for (;;) {
wait_loops++;
if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS) {
- uatomic_dec(&gp_futex);
+ uatomic_dec(&rcu_gp_futex);
/* Write futex before read reader_gp */
smp_mb_master(RCU_MB_GROUP);
}
- list_for_each_entry_safe(index, tmp, ®istry, node) {
- if (!rcu_gp_ongoing(&index->ctr))
- list_move(&index->node, &qsreaders);
+ cds_list_for_each_entry_safe(index, tmp, input_readers, node) {
+ switch (rcu_reader_state(&index->ctr)) {
+ case RCU_READER_ACTIVE_CURRENT:
+ if (cur_snap_readers) {
+ cds_list_move(&index->node,
+ cur_snap_readers);
+ break;
+ }
+ /* Fall-through */
+ case RCU_READER_INACTIVE:
+ cds_list_move(&index->node, qsreaders);
+ break;
+ case RCU_READER_ACTIVE_OLD:
+ /*
+ * Old snapshot. Leaving node in
+ * input_readers will make us busy-loop
+ * until the snapshot becomes current or
+ * the reader becomes inactive.
+ */
+ break;
+ }
}
#ifndef HAS_INCOHERENT_CACHES
- if (list_empty(®istry)) {
+ if (cds_list_empty(input_readers)) {
if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS) {
/* Read reader_gp before write futex */
smp_mb_master(RCU_MB_GROUP);
- uatomic_set(&gp_futex, 0);
+ uatomic_set(&rcu_gp_futex, 0);
}
break;
} else {
if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS)
wait_gp();
else
- cpu_relax();
+ caa_cpu_relax();
}
#else /* #ifndef HAS_INCOHERENT_CACHES */
/*
* BUSY-LOOP. Force the reader thread to commit its
- * rcu_reader.ctr update to memory if we wait for too long.
+ * URCU_TLS(rcu_reader).ctr update to memory if we wait
+ * for too long.
*/
- if (list_empty(®istry)) {
+ if (cds_list_empty(input_readers)) {
if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS) {
/* Read reader_gp before write futex */
smp_mb_master(RCU_MB_GROUP);
- uatomic_set(&gp_futex, 0);
+ uatomic_set(&rcu_gp_futex, 0);
}
break;
} else {
wait_loops = 0;
break; /* only escape switch */
default:
- cpu_relax();
+ caa_cpu_relax();
}
}
#endif /* #else #ifndef HAS_INCOHERENT_CACHES */
}
- /* put back the reader list in the registry */
- list_splice(&qsreaders, ®istry);
}
void synchronize_rcu(void)
{
+ CDS_LIST_HEAD(cur_snap_readers);
+ CDS_LIST_HEAD(qsreaders);
+ DEFINE_URCU_WAIT_NODE(wait, URCU_WAIT_WAITING);
+ struct urcu_waiters waiters;
+
+ /*
+ * Add ourself to gp_waiters queue of threads awaiting to wait
+ * for a grace period. Proceed to perform the grace period only
+ * if we are the first thread added into the queue.
+ * The implicit memory barrier before urcu_wait_add()
+ * orders prior memory accesses of threads put into the wait
+ * queue before their insertion into the wait queue.
+ */
+ if (urcu_wait_add(&gp_waiters, &wait) != 0) {
+ /* Not first in queue: will be awakened by another thread. */
+ urcu_adaptative_busy_wait(&wait);
+ /* Order following memory accesses after grace period. */
+ cmm_smp_mb();
+ return;
+ }
+ /* We won't need to wake ourself up */
+ urcu_wait_set_state(&wait, URCU_WAIT_RUNNING);
+
mutex_lock(&rcu_gp_lock);
- if (list_empty(®istry))
+ /*
+ * Move all waiters into our local queue.
+ */
+ urcu_move_waiters(&waiters, &gp_waiters);
+
+ if (cds_list_empty(®istry))
goto out;
/* All threads should read qparity before accessing data structure
smp_mb_master(RCU_MB_GROUP);
/*
- * Wait for previous parity to be empty of readers.
+ * Wait for readers to observe original parity or be quiescent.
*/
- update_counter_and_wait(); /* 0 -> 1, wait readers in parity 0 */
+ wait_for_readers(®istry, &cur_snap_readers, &qsreaders);
/*
- * Must finish waiting for quiescent state for parity 0 before
+ * Must finish waiting for quiescent state for original parity before
* committing next rcu_gp_ctr update to memory. Failure to do so could
* result in the writer waiting forever while new readers are always
* accessing data (no progress). Enforce compiler-order of load
- * rcu_reader ctr before store to rcu_gp_ctr.
+ * URCU_TLS(rcu_reader).ctr before store to rcu_gp_ctr.
+ */
+ cmm_barrier();
+
+ /*
+ * Adding a cmm_smp_mb() which is _not_ formally required, but makes the
+ * model easier to understand. It does not have a big performance impact
+ * anyway, given this is the write-side.
+ */
+ cmm_smp_mb();
+
+ /* Switch parity: 0 -> 1, 1 -> 0 */
+ CMM_STORE_SHARED(rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR_PHASE);
+
+ /*
+ * Must commit rcu_gp_ctr update to memory before waiting for quiescent
+ * state. Failure to do so could result in the writer waiting forever
+ * while new readers are always accessing data (no progress). Enforce
+ * compiler-order of store to rcu_gp_ctr before load rcu_reader ctr.
*/
- barrier();
+ cmm_barrier();
/*
- * Adding a smp_mb() which is _not_ formally required, but makes the
+ *
+ * Adding a cmm_smp_mb() which is _not_ formally required, but makes the
* model easier to understand. It does not have a big performance impact
* anyway, given this is the write-side.
*/
- smp_mb();
+ cmm_smp_mb();
/*
- * Wait for previous parity to be empty of readers.
+ * Wait for readers to observe new parity or be quiescent.
*/
- update_counter_and_wait(); /* 1 -> 0, wait readers in parity 1 */
+ wait_for_readers(&cur_snap_readers, NULL, &qsreaders);
+
+ /*
+ * Put quiescent reader list back into registry.
+ */
+ cds_list_splice(&qsreaders, ®istry);
/* Finish waiting for reader threads before letting the old ptr being
* freed. Must be done within rcu_gp_lock because it iterates on reader
smp_mb_master(RCU_MB_GROUP);
out:
mutex_unlock(&rcu_gp_lock);
+
+ /*
+ * Wakeup waiters only after we have completed the grace period
+ * and have ensured the memory barriers at the end of the grace
+ * period have been issued.
+ */
+ urcu_wake_all_waiters(&waiters);
}
/*
_rcu_read_unlock();
}
+int rcu_read_ongoing(void)
+{
+ return _rcu_read_ongoing();
+}
+
void rcu_register_thread(void)
{
- rcu_reader.tid = pthread_self();
- assert(rcu_reader.need_mb == 0);
- assert(!(rcu_reader.ctr & RCU_GP_CTR_NEST_MASK));
+ URCU_TLS(rcu_reader).tid = pthread_self();
+ assert(URCU_TLS(rcu_reader).need_mb == 0);
+ assert(!(URCU_TLS(rcu_reader).ctr & RCU_GP_CTR_NEST_MASK));
mutex_lock(&rcu_gp_lock);
rcu_init(); /* In case gcc does not support constructor attribute */
- list_add(&rcu_reader.node, ®istry);
+ cds_list_add(&URCU_TLS(rcu_reader).node, ®istry);
mutex_unlock(&rcu_gp_lock);
}
void rcu_unregister_thread(void)
{
mutex_lock(&rcu_gp_lock);
- list_del(&rcu_reader.node);
+ cds_list_del(&URCU_TLS(rcu_reader).node);
mutex_unlock(&rcu_gp_lock);
}
return;
init_done = 1;
if (!membarrier(MEMBARRIER_EXPEDITED | MEMBARRIER_QUERY))
- has_sys_membarrier = 1;
+ rcu_has_sys_membarrier = 1;
}
#endif
static void sigrcu_handler(int signo, siginfo_t *siginfo, void *context)
{
/*
- * Executing this smp_mb() is the only purpose of this signal handler.
- * It punctually promotes barrier() into smp_mb() on every thread it is
+ * Executing this cmm_smp_mb() is the only purpose of this signal handler.
+ * It punctually promotes cmm_barrier() into cmm_smp_mb() on every thread it is
* executed on.
*/
- smp_mb();
- _STORE_SHARED(rcu_reader.need_mb, 0);
- smp_mb();
+ cmm_smp_mb();
+ _CMM_STORE_SHARED(URCU_TLS(rcu_reader).need_mb, 0);
+ cmm_smp_mb();
}
/*
act.sa_flags = SA_SIGINFO | SA_RESTART;
sigemptyset(&act.sa_mask);
ret = sigaction(SIGRCU, &act, NULL);
- if (ret) {
- perror("Error in sigaction");
- exit(-1);
- }
+ if (ret)
+ urcu_die(errno);
}
void rcu_exit(void)
int ret;
ret = sigaction(SIGRCU, NULL, &act);
- if (ret) {
- perror("Error in sigaction");
- exit(-1);
- }
+ if (ret)
+ urcu_die(errno);
assert(act.sa_sigaction == sigrcu_handler);
- assert(list_empty(®istry));
+ assert(cds_list_empty(®istry));
}
+
#endif /* #ifdef RCU_SIGNAL */
+
+DEFINE_RCU_FLAVOR(rcu_flavor);
+
+#include "urcu-call-rcu-impl.h"
+#include "urcu-defer-impl.h"