Rename all arch primitives with prefix caa_
[urcu.git] / urcu-qsbr.c
index 5e8b9603ae2f4c5f17efc6f8c54d9f1be9d9ccb0..51d34c7b34ea6bf64846b628885f584e4019e2db 100644 (file)
@@ -100,7 +100,7 @@ static void mutex_unlock(pthread_mutex_t *mutex)
 static void wait_gp(void)
 {
        /* Read reader_gp before read futex */
-       smp_rmb();
+       cmm_smp_rmb();
        if (uatomic_read(&gp_futex) == -1)
                futex_noasync(&gp_futex, FUTEX_WAIT, -1,
                      NULL, NULL, 0);
@@ -114,10 +114,10 @@ static void update_counter_and_wait(void)
 
 #if (BITS_PER_LONG < 64)
        /* Switch parity: 0 -> 1, 1 -> 0 */
-       STORE_SHARED(rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR);
+       CAA_STORE_SHARED(rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR);
 #else  /* !(BITS_PER_LONG < 64) */
        /* Increment current G.P. */
-       STORE_SHARED(rcu_gp_ctr, rcu_gp_ctr + RCU_GP_CTR);
+       CAA_STORE_SHARED(rcu_gp_ctr, rcu_gp_ctr + RCU_GP_CTR);
 #endif /* !(BITS_PER_LONG < 64) */
 
        /*
@@ -126,14 +126,14 @@ static void update_counter_and_wait(void)
         * while new readers are always accessing data (no progress). Enforce
         * compiler-order of store to rcu_gp_ctr before load rcu_reader ctr.
         */
-       barrier();
+       cmm_barrier();
 
        /*
-        * Adding a smp_mb() which is _not_ formally required, but makes the
+        * Adding a cmm_smp_mb() which is _not_ formally required, but makes the
         * model easier to understand. It does not have a big performance impact
         * anyway, given this is the write-side.
         */
-       smp_mb();
+       cmm_smp_mb();
 
        /*
         * Wait for each thread rcu_reader_qs_gp count to become 0.
@@ -143,7 +143,7 @@ static void update_counter_and_wait(void)
                if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS) {
                        uatomic_dec(&gp_futex);
                        /* Write futex before read reader_gp */
-                       smp_mb();
+                       cmm_smp_mb();
                }
 
                list_for_each_entry_safe(index, tmp, &registry, node) {
@@ -154,7 +154,7 @@ static void update_counter_and_wait(void)
                if (list_empty(&registry)) {
                        if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS) {
                                /* Read reader_gp before write futex */
-                               smp_mb();
+                               cmm_smp_mb();
                                uatomic_set(&gp_futex, 0);
                        }
                        break;
@@ -163,9 +163,9 @@ static void update_counter_and_wait(void)
                                wait_gp();
                        } else {
 #ifndef HAS_INCOHERENT_CACHES
-                               cpu_relax();
+                               caa_cpu_relax();
 #else /* #ifndef HAS_INCOHERENT_CACHES */
-                               smp_mb();
+                               cmm_smp_mb();
 #endif /* #else #ifndef HAS_INCOHERENT_CACHES */
                        }
                }
@@ -190,7 +190,7 @@ void synchronize_rcu(void)
         * where new ptr points to.
         */
        /* Write new ptr before changing the qparity */
-       smp_mb();
+       cmm_smp_mb();
 
        /*
         * Mark the writer thread offline to make sure we don't wait for
@@ -198,7 +198,7 @@ void synchronize_rcu(void)
         * threads registered as readers.
         */
        if (was_online)
-               STORE_SHARED(rcu_reader.ctr, 0);
+               CAA_STORE_SHARED(rcu_reader.ctr, 0);
 
        mutex_lock(&rcu_gp_lock);
 
@@ -217,14 +217,14 @@ void synchronize_rcu(void)
         * accessing data (no progress).  Enforce compiler-order of load
         * rcu_reader ctr before store to rcu_gp_ctr.
         */
-       barrier();
+       cmm_barrier();
 
        /*
-        * Adding a smp_mb() which is _not_ formally required, but makes the
+        * Adding a cmm_smp_mb() which is _not_ formally required, but makes the
         * model easier to understand. It does not have a big performance impact
         * anyway, given this is the write-side.
         */
-       smp_mb();
+       cmm_smp_mb();
 
        /*
         * Wait for previous parity to be empty of readers.
@@ -238,8 +238,8 @@ out:
         * freed.
         */
        if (was_online)
-               _STORE_SHARED(rcu_reader.ctr, LOAD_SHARED(rcu_gp_ctr));
-       smp_mb();
+               _CAA_STORE_SHARED(rcu_reader.ctr, CAA_LOAD_SHARED(rcu_gp_ctr));
+       cmm_smp_mb();
 }
 #else /* !(BITS_PER_LONG < 64) */
 void synchronize_rcu(void)
@@ -253,9 +253,9 @@ void synchronize_rcu(void)
         * our own quiescent state. This allows using synchronize_rcu() in
         * threads registered as readers.
         */
-       smp_mb();
+       cmm_smp_mb();
        if (was_online)
-               STORE_SHARED(rcu_reader.ctr, 0);
+               CAA_STORE_SHARED(rcu_reader.ctr, 0);
 
        mutex_lock(&rcu_gp_lock);
        if (list_empty(&registry))
@@ -265,8 +265,8 @@ out:
        mutex_unlock(&rcu_gp_lock);
 
        if (was_online)
-               _STORE_SHARED(rcu_reader.ctr, LOAD_SHARED(rcu_gp_ctr));
-       smp_mb();
+               _CAA_STORE_SHARED(rcu_reader.ctr, CAA_LOAD_SHARED(rcu_gp_ctr));
+       cmm_smp_mb();
 }
 #endif  /* !(BITS_PER_LONG < 64) */
 
This page took 0.025785 seconds and 4 git commands to generate.