*/
struct rcu_reader __thread *rcu_reader;
-static LIST_HEAD(registry);
+static CDS_LIST_HEAD(registry);
struct registry_arena {
void *p;
void update_counter_and_wait(void)
{
- LIST_HEAD(qsreaders);
+ CDS_LIST_HEAD(qsreaders);
int wait_loops = 0;
struct rcu_reader *index, *tmp;
/* Switch parity: 0 -> 1, 1 -> 0 */
- STORE_SHARED(rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR_PHASE);
+ CMM_STORE_SHARED(rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR_PHASE);
/*
* Must commit qparity update to memory before waiting for other parity
* quiescent state. Failure to do so could result in the writer waiting
* forever while new readers are always accessing data (no progress).
- * Ensured by STORE_SHARED and LOAD_SHARED.
+ * Ensured by CMM_STORE_SHARED and CMM_LOAD_SHARED.
*/
/*
*/
for (;;) {
wait_loops++;
- list_for_each_entry_safe(index, tmp, ®istry, node) {
+ cds_list_for_each_entry_safe(index, tmp, ®istry, node) {
if (!rcu_old_gp_ongoing(&index->ctr))
- list_move(&index->node, &qsreaders);
+ cds_list_move(&index->node, &qsreaders);
}
- if (list_empty(®istry)) {
+ if (cds_list_empty(®istry)) {
break;
} else {
if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS)
usleep(RCU_SLEEP_DELAY);
else
- cpu_relax();
+ caa_cpu_relax();
}
}
/* put back the reader list in the registry */
- list_splice(&qsreaders, ®istry);
+ cds_list_splice(&qsreaders, ®istry);
}
void synchronize_rcu(void)
mutex_lock(&rcu_gp_lock);
- if (list_empty(®istry))
+ if (cds_list_empty(®istry))
goto out;
/* All threads should read qparity before accessing data structure
/* Add to registry */
rcu_reader_reg->tid = pthread_self();
assert(rcu_reader_reg->ctr == 0);
- list_add(&rcu_reader_reg->node, ®istry);
+ cds_list_add(&rcu_reader_reg->node, ®istry);
rcu_reader = rcu_reader_reg;
}
ret = pthread_kill(tid, 0);
assert(ret != EINVAL);
if (ret == ESRCH) {
- list_del(&rcu_reader_reg->node);
+ cds_list_del(&rcu_reader_reg->node);
rcu_reader_reg->ctr = 0;
rcu_reader_reg->alloc = 0;
registry_arena.used -= sizeof(struct rcu_reader);