*
* Userspace RCU library
*
- * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
+ * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
* Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
*
* This library is free software; you can redistribute it and/or
* Also has a RCU_GP_COUNT of 1, to accelerate the reader fast path.
* Written to only by writer with mutex taken. Read by both writer and readers.
*/
-long rcu_gp_ctr = RCU_GP_COUNT;
+unsigned long rcu_gp_ctr = RCU_GP_COUNT;
/*
* Written to only by each individual reader. Read by both the reader and the
perror("Error in pthread mutex lock");
exit(-1);
}
- if (rcu_reader.need_mb) {
+ if (LOAD_SHARED(rcu_reader.need_mb)) {
smp_mb();
- rcu_reader.need_mb = 0;
+ _STORE_SHARED(rcu_reader.need_mb, 0);
smp_mb();
}
poll(NULL,0,10);
}
#ifdef RCU_MEMBARRIER
-static void smp_mb_heavy(void)
+static void smp_mb_master(int group)
{
if (likely(has_sys_membarrier))
membarrier(MEMBARRIER_EXPEDITED);
#endif
#ifdef RCU_MB
-static void smp_mb_heavy(void)
+static void smp_mb_master(int group)
{
smp_mb();
}
* safe and don't assume anything : we use smp_mc() to make sure the
* cache flush is enforced.
*/
- list_for_each_entry(index, ®istry, head) {
- index->need_mb = 1;
- smp_mc(); /* write need_mb before sending the signal */
+ list_for_each_entry(index, ®istry, node) {
+ STORE_SHARED(index->need_mb, 1);
pthread_kill(index->tid, SIGRCU);
}
/*
* relevant bug report. For Linux kernels, we recommend getting
* the Linux Test Project (LTP).
*/
- list_for_each_entry(index, ®istry, head) {
- while (index->need_mb) {
+ list_for_each_entry(index, ®istry, node) {
+ while (LOAD_SHARED(index->need_mb)) {
pthread_kill(index->tid, SIGRCU);
poll(NULL, 0, 1);
}
smp_mb(); /* read ->need_mb before ending the barrier */
}
-static void smp_mb_heavy(void)
+static void smp_mb_master(int group)
{
force_mb_all_readers();
}
static void wait_gp(void)
{
/* Read reader_gp before read futex */
- smp_mb_heavy();
+ smp_mb_master(RCU_MB_GROUP);
if (uatomic_read(&gp_futex) == -1)
futex_async(&gp_futex, FUTEX_WAIT, -1,
NULL, NULL, 0);
int wait_loops = 0;
struct rcu_reader *index, *tmp;
- /* Switch parity: 1 -> 0, 0 -> 1 */
+ /* Switch parity: 0 -> 1, 1 -> 0 */
STORE_SHARED(rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR_PHASE);
/*
- * Must commit qparity update to memory before waiting for other parity
- * quiescent state. Failure to do so could result in the writer waiting
- * forever while new readers are always accessing data (no progress).
- * Ensured by STORE_SHARED and LOAD_SHARED.
+ * Must commit rcu_gp_ctr update to memory before waiting for quiescent
+ * state. Failure to do so could result in the writer waiting forever
+ * while new readers are always accessing data (no progress). Enforce
+ * compiler-order of store to rcu_gp_ctr before load rcu_reader ctr.
*/
+ barrier();
/*
+ *
* Adding a smp_mb() which is _not_ formally required, but makes the
* model easier to understand. It does not have a big performance impact
* anyway, given this is the write-side.
if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS) {
uatomic_dec(&gp_futex);
/* Write futex before read reader_gp */
- smp_mb_heavy();
+ smp_mb_master(RCU_MB_GROUP);
}
- list_for_each_entry_safe(index, tmp, ®istry, head) {
- if (!rcu_old_gp_ongoing(&index->ctr))
- list_move(&index->head, &qsreaders);
+ list_for_each_entry_safe(index, tmp, ®istry, node) {
+ if (!rcu_gp_ongoing(&index->ctr))
+ list_move(&index->node, &qsreaders);
}
#ifndef HAS_INCOHERENT_CACHES
if (list_empty(®istry)) {
if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS) {
/* Read reader_gp before write futex */
- smp_mb_heavy();
+ smp_mb_master(RCU_MB_GROUP);
uatomic_set(&gp_futex, 0);
}
break;
if (list_empty(®istry)) {
if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS) {
/* Read reader_gp before write futex */
- smp_mb_heavy();
+ smp_mb_master(RCU_MB_GROUP);
uatomic_set(&gp_futex, 0);
}
break;
wait_gp();
break; /* only escape switch */
case KICK_READER_LOOPS:
- smp_mb_heavy();
+ smp_mb_master(RCU_MB_GROUP);
wait_loops = 0;
break; /* only escape switch */
default:
* where new ptr points to. Must be done within rcu_gp_lock because it
* iterates on reader threads.*/
/* Write new ptr before changing the qparity */
- smp_mb_heavy();
+ smp_mb_master(RCU_MB_GROUP);
/*
* Wait for previous parity to be empty of readers.
/*
* Must finish waiting for quiescent state for parity 0 before
- * committing qparity update to memory. Failure to do so could result in
- * the writer waiting forever while new readers are always accessing
- * data (no progress).
- * Ensured by STORE_SHARED and LOAD_SHARED.
+ * committing next rcu_gp_ctr update to memory. Failure to do so could
+ * result in the writer waiting forever while new readers are always
+ * accessing data (no progress). Enforce compiler-order of load
+ * rcu_reader ctr before store to rcu_gp_ctr.
*/
+ barrier();
/*
* Adding a smp_mb() which is _not_ formally required, but makes the
/* Finish waiting for reader threads before letting the old ptr being
* freed. Must be done within rcu_gp_lock because it iterates on reader
* threads. */
- smp_mb_heavy();
+ smp_mb_master(RCU_MB_GROUP);
out:
mutex_unlock(&rcu_gp_lock);
}
{
rcu_reader.tid = pthread_self();
assert(rcu_reader.need_mb == 0);
- assert(rcu_reader.ctr == 0);
+ assert(!(rcu_reader.ctr & RCU_GP_CTR_NEST_MASK));
mutex_lock(&rcu_gp_lock);
rcu_init(); /* In case gcc does not support constructor attribute */
- list_add(&rcu_reader.head, ®istry);
+ list_add(&rcu_reader.node, ®istry);
mutex_unlock(&rcu_gp_lock);
}
void rcu_unregister_thread(void)
{
mutex_lock(&rcu_gp_lock);
- list_del(&rcu_reader.head);
+ list_del(&rcu_reader.node);
mutex_unlock(&rcu_gp_lock);
}
* executed on.
*/
smp_mb();
- rcu_reader.need_mb = 0;
+ _STORE_SHARED(rcu_reader.need_mb, 0);
smp_mb();
}