* Global grace period counter.
* Contains the current RCU_GP_CTR_BIT.
* Also has a RCU_GP_CTR_BIT of 1, to accelerate the reader fast path.
+ * Written to only by writer with mutex taken. Read by both writer and readers.
*/
long urcu_gp_ctr = RCU_GP_COUNT;
+/*
+ * Written to only by each individual reader. Read by both the reader and the
+ * writers.
+ */
long __thread urcu_active_readers;
/* Thread IDs of registered readers */
*/
static void switch_next_urcu_qparity(void)
{
- urcu_gp_ctr ^= RCU_GP_CTR_BIT;
+ STORE_SHARED(urcu_gp_ctr, urcu_gp_ctr ^ RCU_GP_CTR_BIT);
}
#ifdef DEBUG_FULL_MB
{
assert(reader_data);
sig_done = 0;
- smp_mb(); /* write sig_done before sending the signals */
+ /*
+ * pthread_kill has a smp_mb(). But beware, we assume it performs
+ * a cache flush on architectures with non-coherent cache. Let's play
+ * safe and don't assume anything : we use smp_mc() to make sure the
+ * cache flush is enforced.
+ * smp_mb(); write sig_done before sending the signals
+ */
+ smp_mc(); /* write sig_done before sending the signals */
pthread_kill(tid, SIGURCU);
/*
* Wait for sighandler (and thus mb()) to execute on every thread.
* BUSY-LOOP.
*/
- while (LOAD_REMOTE(sig_done) < 1)
+ while (LOAD_SHARED(sig_done) < 1)
cpu_relax();
smp_mb(); /* read sig_done before ending the barrier */
}
sig_done = 0;
/*
* pthread_kill has a smp_mb(). But beware, we assume it performs
- * a cache flush on architectures with non-coherent cache.
+ * a cache flush on architectures with non-coherent cache. Let's play
+ * safe and don't assume anything : we use smp_mc() to make sure the
+ * cache flush is enforced.
* smp_mb(); write sig_done before sending the signals
*/
+ smp_mc(); /* write sig_done before sending the signals */
for (index = reader_data; index < reader_data + num_readers; index++)
pthread_kill(index->tid, SIGURCU);
/*
* Wait for sighandler (and thus mb()) to execute on every thread.
* BUSY-LOOP.
*/
- while (LOAD_REMOTE(sig_done) < num_readers)
+ while (LOAD_SHARED(sig_done) < num_readers)
cpu_relax();
smp_mb(); /* read sig_done before ending the barrier */
}
if (wait_loops++ == KICK_READER_LOOPS) {
force_mb_single_thread(index->tid);
wait_loops = 0;
+ } else {
+ cpu_relax();
}
}
}
* 0 quiescent state. Failure to do so could result in the writer
* waiting forever while new readers are always accessing data (no
* progress).
+ * Ensured by STORE_SHARED and LOAD_SHARED.
*/
- smp_mc();
/*
* Wait for previous parity to be empty of readers.
* committing qparity update to memory. Failure to do so could result in
* the writer waiting forever while new readers are always accessing
* data (no progress).
+ * Ensured by STORE_SHARED and LOAD_SHARED.
*/
- smp_mc();
switch_next_urcu_qparity(); /* 1 -> 0 */
* 1 quiescent state. Failure to do so could result in the writer
* waiting forever while new readers are always accessing data (no
* progress).
+ * Ensured by STORE_SHARED and LOAD_SHARED.
*/
- smp_mc();
/*
* Wait for previous parity to be empty of readers.