Rename all arch primitives with prefix caa_
[urcu.git] / urcu-defer.c
index 92635ed3cca4c1f45390bfac5348714722f56547..c28e8488354e4138f24114200258eb3abdf7f3d3 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Userspace RCU library - batch memory reclamation
  *
- * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
+ * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
  *
  * This library is free software; you can redistribute it and/or
  * modify it under the terms of the GNU Lesser General Public
@@ -110,7 +110,7 @@ static unsigned long rcu_defer_num_callbacks(void)
 
        mutex_lock(&rcu_defer_mutex);
        list_for_each_entry(index, &registry, list) {
-               head = LOAD_SHARED(index->head);
+               head = CAA_LOAD_SHARED(index->head);
                num_items += head - index->tail;
        }
        mutex_unlock(&rcu_defer_mutex);
@@ -123,13 +123,13 @@ static unsigned long rcu_defer_num_callbacks(void)
 static void wait_defer(void)
 {
        uatomic_dec(&defer_thread_futex);
-       smp_mb();       /* Write futex before read queue */
+       cmm_smp_mb();   /* Write futex before read queue */
        if (rcu_defer_num_callbacks()) {
-               smp_mb();       /* Read queue before write futex */
+               cmm_smp_mb();   /* Read queue before write futex */
                /* Callbacks are queued, don't wait. */
                uatomic_set(&defer_thread_futex, 0);
        } else {
-               smp_rmb();      /* Read queue before read futex */
+               cmm_smp_rmb();  /* Read queue before read futex */
                if (uatomic_read(&defer_thread_futex) == -1)
                        futex_noasync(&defer_thread_futex, FUTEX_WAIT, -1,
                              NULL, NULL, 0);
@@ -152,22 +152,22 @@ static void rcu_defer_barrier_queue(struct defer_queue *queue,
         */
 
        for (i = queue->tail; i != head;) {
-               smp_rmb();       /* read head before q[]. */
-               p = LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]);
+               cmm_smp_rmb();       /* read head before q[]. */
+               p = CAA_LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]);
                if (unlikely(DQ_IS_FCT_BIT(p))) {
                        DQ_CLEAR_FCT_BIT(p);
                        queue->last_fct_out = p;
-                       p = LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]);
+                       p = CAA_LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]);
                } else if (unlikely(p == DQ_FCT_MARK)) {
-                       p = LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]);
+                       p = CAA_LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]);
                        queue->last_fct_out = p;
-                       p = LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]);
+                       p = CAA_LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]);
                }
                fct = queue->last_fct_out;
                fct(p);
        }
-       smp_mb();       /* push tail after having used q[] */
-       STORE_SHARED(queue->tail, i);
+       cmm_smp_mb();   /* push tail after having used q[] */
+       CAA_STORE_SHARED(queue->tail, i);
 }
 
 static void _rcu_defer_barrier_thread(void)
@@ -212,7 +212,7 @@ void rcu_defer_barrier(void)
 
        mutex_lock(&rcu_defer_mutex);
        list_for_each_entry(index, &registry, list) {
-               index->last_head = LOAD_SHARED(index->head);
+               index->last_head = CAA_LOAD_SHARED(index->head);
                num_items += index->last_head - index->tail;
        }
        if (likely(!num_items)) {
@@ -241,7 +241,7 @@ void _defer_rcu(void (*fct)(void *p), void *p)
         * thread.
         */
        head = defer_queue.head;
-       tail = LOAD_SHARED(defer_queue.tail);
+       tail = CAA_LOAD_SHARED(defer_queue.tail);
 
        /*
         * If queue is full, or reached threshold. Empty queue ourself.
@@ -250,7 +250,7 @@ void _defer_rcu(void (*fct)(void *p), void *p)
        if (unlikely(head - tail >= DEFER_QUEUE_SIZE - 2)) {
                assert(head - tail <= DEFER_QUEUE_SIZE);
                rcu_defer_barrier_thread();
-               assert(head - LOAD_SHARED(defer_queue.tail) == 0);
+               assert(head - CAA_LOAD_SHARED(defer_queue.tail) == 0);
        }
 
        if (unlikely(defer_queue.last_fct_in != fct)) {
@@ -261,13 +261,13 @@ void _defer_rcu(void (*fct)(void *p), void *p)
                         * marker, write DQ_FCT_MARK followed by the function
                         * pointer.
                         */
-                       _STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK],
+                       _CAA_STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK],
                                      DQ_FCT_MARK);
-                       _STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK],
+                       _CAA_STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK],
                                      fct);
                } else {
                        DQ_SET_FCT_BIT(fct);
-                       _STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK],
+                       _CAA_STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK],
                                      fct);
                }
        } else {
@@ -276,17 +276,17 @@ void _defer_rcu(void (*fct)(void *p), void *p)
                         * If the data to encode is not aligned or the marker,
                         * write DQ_FCT_MARK followed by the function pointer.
                         */
-                       _STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK],
+                       _CAA_STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK],
                                      DQ_FCT_MARK);
-                       _STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK],
+                       _CAA_STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK],
                                      fct);
                }
        }
-       _STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK], p);
-       smp_wmb();      /* Publish new pointer before head */
+       _CAA_STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK], p);
+       cmm_smp_wmb();  /* Publish new pointer before head */
                        /* Write q[] before head. */
-       STORE_SHARED(defer_queue.head, head);
-       smp_mb();       /* Write queue head before read futex */
+       CAA_STORE_SHARED(defer_queue.head, head);
+       cmm_smp_mb();   /* Write queue head before read futex */
        /*
         * Wake-up any waiting defer thread.
         */
This page took 0.024924 seconds and 4 git commands to generate.