int wait_loops = 0;
struct rcu_reader *index, *tmp;
-#if (BITS_PER_LONG < 64)
+#if (CAA_BITS_PER_LONG < 64)
/* Switch parity: 0 -> 1, 1 -> 0 */
- CAA_STORE_SHARED(rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR);
-#else /* !(BITS_PER_LONG < 64) */
+ CMM_STORE_SHARED(rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR);
+#else /* !(CAA_BITS_PER_LONG < 64) */
/* Increment current G.P. */
- CAA_STORE_SHARED(rcu_gp_ctr, rcu_gp_ctr + RCU_GP_CTR);
-#endif /* !(BITS_PER_LONG < 64) */
+ CMM_STORE_SHARED(rcu_gp_ctr, rcu_gp_ctr + RCU_GP_CTR);
+#endif /* !(CAA_BITS_PER_LONG < 64) */
/*
* Must commit rcu_gp_ctr update to memory before waiting for quiescent
* long-size to ensure we do not encounter an overflow bug.
*/
-#if (BITS_PER_LONG < 64)
+#if (CAA_BITS_PER_LONG < 64)
void synchronize_rcu(void)
{
unsigned long was_online;
* threads registered as readers.
*/
if (was_online)
- CAA_STORE_SHARED(rcu_reader.ctr, 0);
+ CMM_STORE_SHARED(rcu_reader.ctr, 0);
mutex_lock(&rcu_gp_lock);
* freed.
*/
if (was_online)
- _CAA_STORE_SHARED(rcu_reader.ctr, CAA_LOAD_SHARED(rcu_gp_ctr));
+ _CMM_STORE_SHARED(rcu_reader.ctr, CMM_LOAD_SHARED(rcu_gp_ctr));
cmm_smp_mb();
}
-#else /* !(BITS_PER_LONG < 64) */
+#else /* !(CAA_BITS_PER_LONG < 64) */
void synchronize_rcu(void)
{
unsigned long was_online;
*/
cmm_smp_mb();
if (was_online)
- CAA_STORE_SHARED(rcu_reader.ctr, 0);
+ CMM_STORE_SHARED(rcu_reader.ctr, 0);
mutex_lock(&rcu_gp_lock);
if (cds_list_empty(®istry))
mutex_unlock(&rcu_gp_lock);
if (was_online)
- _CAA_STORE_SHARED(rcu_reader.ctr, CAA_LOAD_SHARED(rcu_gp_ctr));
+ _CMM_STORE_SHARED(rcu_reader.ctr, CMM_LOAD_SHARED(rcu_gp_ctr));
cmm_smp_mb();
}
-#endif /* !(BITS_PER_LONG < 64) */
+#endif /* !(CAA_BITS_PER_LONG < 64) */
/*
* library wrappers to be used by non-LGPL compatible source code.