*
* Userspace RCU QSBR library
*
- * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
+ * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
* Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
*
* This library is free software; you can redistribute it and/or
static void wait_gp(void)
{
/* Read reader_gp before read futex */
- smp_rmb();
+ cmm_smp_rmb();
if (uatomic_read(&gp_futex) == -1)
futex_noasync(&gp_futex, FUTEX_WAIT, -1,
NULL, NULL, 0);
#if (BITS_PER_LONG < 64)
/* Switch parity: 0 -> 1, 1 -> 0 */
- STORE_SHARED(rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR);
+ CAA_STORE_SHARED(rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR);
#else /* !(BITS_PER_LONG < 64) */
/* Increment current G.P. */
- STORE_SHARED(rcu_gp_ctr, rcu_gp_ctr + RCU_GP_CTR);
+ CAA_STORE_SHARED(rcu_gp_ctr, rcu_gp_ctr + RCU_GP_CTR);
#endif /* !(BITS_PER_LONG < 64) */
/*
* while new readers are always accessing data (no progress). Enforce
* compiler-order of store to rcu_gp_ctr before load rcu_reader ctr.
*/
- barrier();
+ cmm_barrier();
/*
- * Adding a smp_mb() which is _not_ formally required, but makes the
+ * Adding a cmm_smp_mb() which is _not_ formally required, but makes the
* model easier to understand. It does not have a big performance impact
* anyway, given this is the write-side.
*/
- smp_mb();
+ cmm_smp_mb();
/*
* Wait for each thread rcu_reader_qs_gp count to become 0.
if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS) {
uatomic_dec(&gp_futex);
/* Write futex before read reader_gp */
- smp_mb();
+ cmm_smp_mb();
}
list_for_each_entry_safe(index, tmp, ®istry, node) {
if (list_empty(®istry)) {
if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS) {
/* Read reader_gp before write futex */
- smp_mb();
+ cmm_smp_mb();
uatomic_set(&gp_futex, 0);
}
break;
wait_gp();
} else {
#ifndef HAS_INCOHERENT_CACHES
- cpu_relax();
+ caa_cpu_relax();
#else /* #ifndef HAS_INCOHERENT_CACHES */
- smp_mb();
+ cmm_smp_mb();
#endif /* #else #ifndef HAS_INCOHERENT_CACHES */
}
}
* where new ptr points to.
*/
/* Write new ptr before changing the qparity */
- smp_mb();
+ cmm_smp_mb();
/*
* Mark the writer thread offline to make sure we don't wait for
* threads registered as readers.
*/
if (was_online)
- STORE_SHARED(rcu_reader.ctr, 0);
+ CAA_STORE_SHARED(rcu_reader.ctr, 0);
mutex_lock(&rcu_gp_lock);
* accessing data (no progress). Enforce compiler-order of load
* rcu_reader ctr before store to rcu_gp_ctr.
*/
- barrier();
+ cmm_barrier();
/*
- * Adding a smp_mb() which is _not_ formally required, but makes the
+ * Adding a cmm_smp_mb() which is _not_ formally required, but makes the
* model easier to understand. It does not have a big performance impact
* anyway, given this is the write-side.
*/
- smp_mb();
+ cmm_smp_mb();
/*
* Wait for previous parity to be empty of readers.
* freed.
*/
if (was_online)
- _STORE_SHARED(rcu_reader.ctr, LOAD_SHARED(rcu_gp_ctr));
- smp_mb();
+ _CAA_STORE_SHARED(rcu_reader.ctr, CAA_LOAD_SHARED(rcu_gp_ctr));
+ cmm_smp_mb();
}
#else /* !(BITS_PER_LONG < 64) */
void synchronize_rcu(void)
* our own quiescent state. This allows using synchronize_rcu() in
* threads registered as readers.
*/
- smp_mb();
+ cmm_smp_mb();
if (was_online)
- STORE_SHARED(rcu_reader.ctr, 0);
+ CAA_STORE_SHARED(rcu_reader.ctr, 0);
mutex_lock(&rcu_gp_lock);
if (list_empty(®istry))
mutex_unlock(&rcu_gp_lock);
if (was_online)
- _STORE_SHARED(rcu_reader.ctr, LOAD_SHARED(rcu_gp_ctr));
- smp_mb();
+ _CAA_STORE_SHARED(rcu_reader.ctr, CAA_LOAD_SHARED(rcu_gp_ctr));
+ cmm_smp_mb();
}
#endif /* !(BITS_PER_LONG < 64) */