LOAD_SHARED and STORE_SHARED should have CMM_ prefix
[urcu.git] / urcu-bp-static.h
index 2049ee6dd06311d133a1f6e805fe2ad39d616abe..14c6cfecac3af01a3bc4680c36381736f37a30f7 100644 (file)
@@ -162,7 +162,7 @@ static inline int rcu_old_gp_ongoing(long *value)
         * Make sure both tests below are done on the same version of *value
         * to insure consistency.
         */
-       v = CAA_LOAD_SHARED(*value);
+       v = CMM_LOAD_SHARED(*value);
        return (v & RCU_GP_CTR_NEST_MASK) &&
                 ((v ^ rcu_gp_ctr) & RCU_GP_CTR_PHASE);
 }
@@ -182,14 +182,14 @@ static inline void _rcu_read_lock(void)
         *   RCU_GP_COUNT | (~RCU_GP_CTR_PHASE or RCU_GP_CTR_PHASE)
         */
        if (likely(!(tmp & RCU_GP_CTR_NEST_MASK))) {
-               _CAA_STORE_SHARED(rcu_reader->ctr, _CAA_LOAD_SHARED(rcu_gp_ctr));
+               _CMM_STORE_SHARED(rcu_reader->ctr, _CMM_LOAD_SHARED(rcu_gp_ctr));
                /*
                 * Set active readers count for outermost nesting level before
                 * accessing the pointer.
                 */
                cmm_smp_mb();
        } else {
-               _CAA_STORE_SHARED(rcu_reader->ctr, tmp + RCU_GP_COUNT);
+               _CMM_STORE_SHARED(rcu_reader->ctr, tmp + RCU_GP_COUNT);
        }
 }
 
@@ -199,7 +199,7 @@ static inline void _rcu_read_unlock(void)
         * Finish using rcu before decrementing the pointer.
         */
        cmm_smp_mb();
-       _CAA_STORE_SHARED(rcu_reader->ctr, rcu_reader->ctr - RCU_GP_COUNT);
+       _CMM_STORE_SHARED(rcu_reader->ctr, rcu_reader->ctr - RCU_GP_COUNT);
        cmm_barrier();  /* Ensure the compiler does not reorder us with mutex */
 }
 
This page took 0.022816 seconds and 4 git commands to generate.