* Also has a RCU_GP_COUNT of 1, to accelerate the reader fast path.
* Written to only by writer with mutex taken. Read by both writer and readers.
*/
-long rcu_gp_ctr = RCU_GP_COUNT;
+unsigned long rcu_gp_ctr = RCU_GP_COUNT;
/*
* Written to only by each individual reader. Read by both the reader and the
perror("Error in pthread mutex lock");
exit(-1);
}
- if (rcu_reader.need_mb) {
+ if (LOAD_SHARED(rcu_reader.need_mb)) {
smp_mb();
- rcu_reader.need_mb = 0;
+ _STORE_SHARED(rcu_reader.need_mb, 0);
smp_mb();
}
poll(NULL,0,10);
}
}
-/*
- * called with rcu_gp_lock held.
- */
-static void switch_next_rcu_qparity(void)
-{
- STORE_SHARED(rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR_PHASE);
-}
-
#ifdef RCU_MEMBARRIER
-static void smp_mb_heavy(void)
+static void smp_mb_master(int group)
{
if (likely(has_sys_membarrier))
membarrier(MEMBARRIER_EXPEDITED);
#endif
#ifdef RCU_MB
-static void smp_mb_heavy(void)
+static void smp_mb_master(int group)
{
smp_mb();
}
* cache flush is enforced.
*/
list_for_each_entry(index, ®istry, head) {
- index->need_mb = 1;
- smp_mc(); /* write need_mb before sending the signal */
+ STORE_SHARED(index->need_mb, 1);
pthread_kill(index->tid, SIGRCU);
}
/*
* the Linux Test Project (LTP).
*/
list_for_each_entry(index, ®istry, head) {
- while (index->need_mb) {
+ while (LOAD_SHARED(index->need_mb)) {
pthread_kill(index->tid, SIGRCU);
poll(NULL, 0, 1);
}
smp_mb(); /* read ->need_mb before ending the barrier */
}
-static void smp_mb_heavy(void)
+static void smp_mb_master(int group)
{
force_mb_all_readers();
}
static void wait_gp(void)
{
/* Read reader_gp before read futex */
- smp_mb_heavy();
+ smp_mb_master(RCU_MB_GROUP);
if (uatomic_read(&gp_futex) == -1)
futex_async(&gp_futex, FUTEX_WAIT, -1,
NULL, NULL, 0);
}
-void wait_for_quiescent_state(void)
+void update_counter_and_wait(void)
{
LIST_HEAD(qsreaders);
int wait_loops = 0;
struct rcu_reader *index, *tmp;
- if (list_empty(®istry))
- return;
+ /* Switch parity: 0 -> 1, 1 -> 0 */
+ STORE_SHARED(rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR_PHASE);
+
+ /*
+ * Must commit qparity update to memory before waiting for other parity
+ * quiescent state. Failure to do so could result in the writer waiting
+ * forever while new readers are always accessing data (no progress).
+ * Ensured by STORE_SHARED and LOAD_SHARED.
+ */
+
+ /*
+ * Adding a smp_mb() which is _not_ formally required, but makes the
+ * model easier to understand. It does not have a big performance impact
+ * anyway, given this is the write-side.
+ */
+ smp_mb();
+
/*
* Wait for each thread rcu_reader.ctr count to become 0.
*/
if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS) {
uatomic_dec(&gp_futex);
/* Write futex before read reader_gp */
- smp_mb_heavy();
+ smp_mb_master(RCU_MB_GROUP);
}
list_for_each_entry_safe(index, tmp, ®istry, head) {
- if (!rcu_old_gp_ongoing(&index->ctr))
+ if (!rcu_gp_ongoing(&index->ctr))
list_move(&index->head, &qsreaders);
}
if (list_empty(®istry)) {
if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS) {
/* Read reader_gp before write futex */
- smp_mb_heavy();
+ smp_mb_master(RCU_MB_GROUP);
uatomic_set(&gp_futex, 0);
}
break;
if (list_empty(®istry)) {
if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS) {
/* Read reader_gp before write futex */
- smp_mb_heavy();
+ smp_mb_master(RCU_MB_GROUP);
uatomic_set(&gp_futex, 0);
}
break;
wait_gp();
break; /* only escape switch */
case KICK_READER_LOOPS:
- smp_mb_heavy();
+ smp_mb_master(RCU_MB_GROUP);
wait_loops = 0;
break; /* only escape switch */
default:
{
mutex_lock(&rcu_gp_lock);
+ if (list_empty(®istry))
+ goto out;
+
/* All threads should read qparity before accessing data structure
* where new ptr points to. Must be done within rcu_gp_lock because it
* iterates on reader threads.*/
/* Write new ptr before changing the qparity */
- smp_mb_heavy();
-
- switch_next_rcu_qparity(); /* 0 -> 1 */
-
- /*
- * Must commit qparity update to memory before waiting for parity
- * 0 quiescent state. Failure to do so could result in the writer
- * waiting forever while new readers are always accessing data (no
- * progress).
- * Ensured by STORE_SHARED and LOAD_SHARED.
- */
-
- /*
- * Adding a smp_mb() which is _not_ formally required, but makes the
- * model easier to understand. It does not have a big performance impact
- * anyway, given this is the write-side.
- */
- smp_mb();
+ smp_mb_master(RCU_MB_GROUP);
/*
* Wait for previous parity to be empty of readers.
*/
- wait_for_quiescent_state(); /* Wait readers in parity 0 */
+ update_counter_and_wait(); /* 0 -> 1, wait readers in parity 0 */
/*
* Must finish waiting for quiescent state for parity 0 before
*/
smp_mb();
- switch_next_rcu_qparity(); /* 1 -> 0 */
-
- /*
- * Must commit qparity update to memory before waiting for parity
- * 1 quiescent state. Failure to do so could result in the writer
- * waiting forever while new readers are always accessing data (no
- * progress).
- * Ensured by STORE_SHARED and LOAD_SHARED.
- */
-
- /*
- * Adding a smp_mb() which is _not_ formally required, but makes the
- * model easier to understand. It does not have a big performance impact
- * anyway, given this is the write-side.
- */
- smp_mb();
-
/*
* Wait for previous parity to be empty of readers.
*/
- wait_for_quiescent_state(); /* Wait readers in parity 1 */
+ update_counter_and_wait(); /* 1 -> 0, wait readers in parity 1 */
/* Finish waiting for reader threads before letting the old ptr being
* freed. Must be done within rcu_gp_lock because it iterates on reader
* threads. */
- smp_mb_heavy();
-
+ smp_mb_master(RCU_MB_GROUP);
+out:
mutex_unlock(&rcu_gp_lock);
}
* executed on.
*/
smp_mb();
- rcu_reader.need_mb = 0;
+ _STORE_SHARED(rcu_reader.need_mb, 0);
smp_mb();
}