#include "urcu/static/urcu-qsbr.h"
#include "urcu-pointer.h"
#include "urcu/tls-compat.h"
+#include "urcu/waitqueue-lifo.h"
#include "urcu-die.h"
void __attribute__((destructor)) rcu_exit(void);
static pthread_mutex_t rcu_gp_lock = PTHREAD_MUTEX_INITIALIZER;
-
-int32_t rcu_gp_futex;
-
-/*
- * Global grace period counter.
- */
-unsigned long rcu_gp_ctr = RCU_GP_ONLINE;
+struct rcu_gp rcu_gp = { .ctr = RCU_GP_ONLINE };
/*
* Active attempts to check for reader Q.S. before calling futex().
static CDS_LIST_HEAD(registry);
+/*
+ * Queue keeping threads awaiting to wait for a grace period. Contains
+ * struct gp_waiters_thread objects.
+ */
+static DEFINE_URCU_WAIT_QUEUE(gp_waiters);
+
static void mutex_lock(pthread_mutex_t *mutex)
{
int ret;
{
/* Read reader_gp before read futex */
cmm_smp_rmb();
- if (uatomic_read(&rcu_gp_futex) == -1)
- futex_noasync(&rcu_gp_futex, FUTEX_WAIT, -1,
+ if (uatomic_read(&rcu_gp.futex) == -1)
+ futex_noasync(&rcu_gp.futex, FUTEX_WAIT, -1,
NULL, NULL, 0);
}
-static void wait_for_readers(void)
+static void wait_for_readers(struct cds_list_head *input_readers,
+ struct cds_list_head *cur_snap_readers,
+ struct cds_list_head *qsreaders)
{
- CDS_LIST_HEAD(qsreaders);
- int wait_loops = 0;
+ unsigned int wait_loops = 0;
struct rcu_reader *index, *tmp;
/*
* Wait for each thread URCU_TLS(rcu_reader).ctr to either
* indicate quiescence (offline), or for them to observe the
- * current rcu_gp_ctr value.
+ * current rcu_gp.ctr value.
*/
for (;;) {
- wait_loops++;
+ if (wait_loops < RCU_QS_ACTIVE_ATTEMPTS)
+ wait_loops++;
if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS) {
- uatomic_set(&rcu_gp_futex, -1);
+ uatomic_set(&rcu_gp.futex, -1);
/*
* Write futex before write waiting (the other side
* reads them in the opposite order).
*/
cmm_smp_wmb();
- cds_list_for_each_entry(index, ®istry, node) {
+ cds_list_for_each_entry(index, input_readers, node) {
_CMM_STORE_SHARED(index->waiting, 1);
}
/* Write futex before read reader_gp */
cmm_smp_mb();
}
- cds_list_for_each_entry_safe(index, tmp, ®istry, node) {
- if (!rcu_gp_ongoing(&index->ctr))
- cds_list_move(&index->node, &qsreaders);
+ cds_list_for_each_entry_safe(index, tmp, input_readers, node) {
+ switch (rcu_reader_state(&index->ctr)) {
+ case RCU_READER_ACTIVE_CURRENT:
+ if (cur_snap_readers) {
+ cds_list_move(&index->node,
+ cur_snap_readers);
+ break;
+ }
+ /* Fall-through */
+ case RCU_READER_INACTIVE:
+ cds_list_move(&index->node, qsreaders);
+ break;
+ case RCU_READER_ACTIVE_OLD:
+ /*
+ * Old snapshot. Leaving node in
+ * input_readers will make us busy-loop
+ * until the snapshot becomes current or
+ * the reader becomes inactive.
+ */
+ break;
+ }
}
- if (cds_list_empty(®istry)) {
+ if (cds_list_empty(input_readers)) {
if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS) {
/* Read reader_gp before write futex */
cmm_smp_mb();
- uatomic_set(&rcu_gp_futex, 0);
+ uatomic_set(&rcu_gp.futex, 0);
}
break;
} else {
}
}
}
- /* put back the reader list in the registry */
- cds_list_splice(&qsreaders, ®istry);
}
/*
#if (CAA_BITS_PER_LONG < 64)
void synchronize_rcu(void)
{
+ CDS_LIST_HEAD(cur_snap_readers);
+ CDS_LIST_HEAD(qsreaders);
unsigned long was_online;
+ DEFINE_URCU_WAIT_NODE(wait, URCU_WAIT_WAITING);
+ struct urcu_waiters waiters;
- was_online = URCU_TLS(rcu_reader).ctr;
+ was_online = rcu_read_ongoing();
/* All threads should read qparity before accessing data structure
* where new ptr points to. In the "then" case, rcu_thread_offline
else
cmm_smp_mb();
+ /*
+ * Add ourself to gp_waiters queue of threads awaiting to wait
+ * for a grace period. Proceed to perform the grace period only
+ * if we are the first thread added into the queue.
+ */
+ if (urcu_wait_add(&gp_waiters, &wait) != 0) {
+ /* Not first in queue: will be awakened by another thread. */
+ urcu_adaptative_busy_wait(&wait);
+ goto gp_end;
+ }
+ /* We won't need to wake ourself up */
+ urcu_wait_set_state(&wait, URCU_WAIT_RUNNING);
+
mutex_lock(&rcu_gp_lock);
+ /*
+ * Move all waiters into our local queue.
+ */
+ urcu_move_waiters(&waiters, &gp_waiters);
+
if (cds_list_empty(®istry))
goto out;
/*
* Wait for readers to observe original parity or be quiescent.
*/
- wait_for_readers();
+ wait_for_readers(®istry, &cur_snap_readers, &qsreaders);
/*
* Must finish waiting for quiescent state for original parity
- * before committing next rcu_gp_ctr update to memory. Failure
+ * before committing next rcu_gp.ctr update to memory. Failure
* to do so could result in the writer waiting forever while new
* readers are always accessing data (no progress). Enforce
* compiler-order of load URCU_TLS(rcu_reader).ctr before store
- * to rcu_gp_ctr.
+ * to rcu_gp.ctr.
*/
cmm_barrier();
cmm_smp_mb();
/* Switch parity: 0 -> 1, 1 -> 0 */
- CMM_STORE_SHARED(rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR);
+ CMM_STORE_SHARED(rcu_gp.ctr, rcu_gp.ctr ^ RCU_GP_CTR);
/*
- * Must commit rcu_gp_ctr update to memory before waiting for
+ * Must commit rcu_gp.ctr update to memory before waiting for
* quiescent state. Failure to do so could result in the writer
* waiting forever while new readers are always accessing data
- * (no progress). Enforce compiler-order of store to rcu_gp_ctr
+ * (no progress). Enforce compiler-order of store to rcu_gp.ctr
* before load URCU_TLS(rcu_reader).ctr.
*/
cmm_barrier();
/*
* Wait for readers to observe new parity or be quiescent.
*/
- wait_for_readers();
+ wait_for_readers(&cur_snap_readers, NULL, &qsreaders);
+
+ /*
+ * Put quiescent reader list back into registry.
+ */
+ cds_list_splice(&qsreaders, ®istry);
out:
mutex_unlock(&rcu_gp_lock);
-
+ urcu_wake_all_waiters(&waiters);
+gp_end:
/*
* Finish waiting for reader threads before letting the old ptr being
* freed.
#else /* !(CAA_BITS_PER_LONG < 64) */
void synchronize_rcu(void)
{
+ CDS_LIST_HEAD(qsreaders);
unsigned long was_online;
+ DEFINE_URCU_WAIT_NODE(wait, URCU_WAIT_WAITING);
+ struct urcu_waiters waiters;
- was_online = URCU_TLS(rcu_reader).ctr;
+ was_online = rcu_read_ongoing();
/*
* Mark the writer thread offline to make sure we don't wait for
else
cmm_smp_mb();
+ /*
+ * Add ourself to gp_waiters queue of threads awaiting to wait
+ * for a grace period. Proceed to perform the grace period only
+ * if we are the first thread added into the queue.
+ */
+ if (urcu_wait_add(&gp_waiters, &wait) != 0) {
+ /* Not first in queue: will be awakened by another thread. */
+ urcu_adaptative_busy_wait(&wait);
+ goto gp_end;
+ }
+ /* We won't need to wake ourself up */
+ urcu_wait_set_state(&wait, URCU_WAIT_RUNNING);
+
mutex_lock(&rcu_gp_lock);
+
+ /*
+ * Move all waiters into our local queue.
+ */
+ urcu_move_waiters(&waiters, &gp_waiters);
+
if (cds_list_empty(®istry))
goto out;
/* Increment current G.P. */
- CMM_STORE_SHARED(rcu_gp_ctr, rcu_gp_ctr + RCU_GP_CTR);
+ CMM_STORE_SHARED(rcu_gp.ctr, rcu_gp.ctr + RCU_GP_CTR);
/*
- * Must commit rcu_gp_ctr update to memory before waiting for
+ * Must commit rcu_gp.ctr update to memory before waiting for
* quiescent state. Failure to do so could result in the writer
* waiting forever while new readers are always accessing data
- * (no progress). Enforce compiler-order of store to rcu_gp_ctr
+ * (no progress). Enforce compiler-order of store to rcu_gp.ctr
* before load URCU_TLS(rcu_reader).ctr.
*/
cmm_barrier();
/*
* Wait for readers to observe new count of be quiescent.
*/
- wait_for_readers();
+ wait_for_readers(®istry, NULL, &qsreaders);
+
+ /*
+ * Put quiescent reader list back into registry.
+ */
+ cds_list_splice(&qsreaders, ®istry);
out:
mutex_unlock(&rcu_gp_lock);
-
+ urcu_wake_all_waiters(&waiters);
+gp_end:
if (was_online)
rcu_thread_online();
else
_rcu_read_unlock();
}
+int rcu_read_ongoing(void)
+{
+ return _rcu_read_ongoing();
+}
+
void rcu_quiescent_state(void)
{
_rcu_quiescent_state();