Rename all arch primitives with prefix caa_
[urcu.git] / urcu-defer.c
index 3cc1a0c0b32b9132673b47a361ea116f9a8bf338..c28e8488354e4138f24114200258eb3abdf7f3d3 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Userspace RCU library - batch memory reclamation
  *
- * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
+ * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
  *
  * This library is free software; you can redistribute it and/or
  * modify it under the terms of the GNU Lesser General Public
 /* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */
 #include "urcu-defer.h"
 
-void __attribute__((destructor)) urcu_defer_exit(void);
+void __attribute__((destructor)) rcu_defer_exit(void);
 
 extern void synchronize_rcu(void);
 
 /*
- * urcu_defer_mutex nests inside defer_thread_mutex.
+ * rcu_defer_mutex nests inside defer_thread_mutex.
  */
-static pthread_mutex_t urcu_defer_mutex = PTHREAD_MUTEX_INITIALIZER;
+static pthread_mutex_t rcu_defer_mutex = PTHREAD_MUTEX_INITIALIZER;
 static pthread_mutex_t defer_thread_mutex = PTHREAD_MUTEX_INITIALIZER;
 
 static int defer_thread_futex;
@@ -57,7 +57,7 @@ static struct defer_queue __thread defer_queue;
 static LIST_HEAD(registry);
 static pthread_t tid_defer;
 
-static void internal_urcu_lock(pthread_mutex_t *mutex)
+static void mutex_lock(pthread_mutex_t *mutex)
 {
        int ret;
 
@@ -80,7 +80,7 @@ static void internal_urcu_lock(pthread_mutex_t *mutex)
 #endif /* #else #ifndef DISTRUST_SIGNALS_EXTREME */
 }
 
-static void internal_urcu_unlock(pthread_mutex_t *mutex)
+static void mutex_unlock(pthread_mutex_t *mutex)
 {
        int ret;
 
@@ -108,12 +108,12 @@ static unsigned long rcu_defer_num_callbacks(void)
        unsigned long num_items = 0, head;
        struct defer_queue *index;
 
-       internal_urcu_lock(&urcu_defer_mutex);
+       mutex_lock(&rcu_defer_mutex);
        list_for_each_entry(index, &registry, list) {
-               head = LOAD_SHARED(index->head);
+               head = CAA_LOAD_SHARED(index->head);
                num_items += head - index->tail;
        }
-       internal_urcu_unlock(&urcu_defer_mutex);
+       mutex_unlock(&rcu_defer_mutex);
        return num_items;
 }
 
@@ -123,13 +123,13 @@ static unsigned long rcu_defer_num_callbacks(void)
 static void wait_defer(void)
 {
        uatomic_dec(&defer_thread_futex);
-       smp_mb();       /* Write futex before read queue */
+       cmm_smp_mb();   /* Write futex before read queue */
        if (rcu_defer_num_callbacks()) {
-               smp_mb();       /* Read queue before write futex */
+               cmm_smp_mb();   /* Read queue before write futex */
                /* Callbacks are queued, don't wait. */
                uatomic_set(&defer_thread_futex, 0);
        } else {
-               smp_rmb();      /* Read queue before read futex */
+               cmm_smp_rmb();  /* Read queue before read futex */
                if (uatomic_read(&defer_thread_futex) == -1)
                        futex_noasync(&defer_thread_futex, FUTEX_WAIT, -1,
                              NULL, NULL, 0);
@@ -152,22 +152,22 @@ static void rcu_defer_barrier_queue(struct defer_queue *queue,
         */
 
        for (i = queue->tail; i != head;) {
-               smp_rmb();       /* read head before q[]. */
-               p = LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]);
+               cmm_smp_rmb();       /* read head before q[]. */
+               p = CAA_LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]);
                if (unlikely(DQ_IS_FCT_BIT(p))) {
                        DQ_CLEAR_FCT_BIT(p);
                        queue->last_fct_out = p;
-                       p = LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]);
+                       p = CAA_LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]);
                } else if (unlikely(p == DQ_FCT_MARK)) {
-                       p = LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]);
+                       p = CAA_LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]);
                        queue->last_fct_out = p;
-                       p = LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]);
+                       p = CAA_LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]);
                }
                fct = queue->last_fct_out;
                fct(p);
        }
-       smp_mb();       /* push tail after having used q[] */
-       STORE_SHARED(queue->tail, i);
+       cmm_smp_mb();   /* push tail after having used q[] */
+       CAA_STORE_SHARED(queue->tail, i);
 }
 
 static void _rcu_defer_barrier_thread(void)
@@ -184,9 +184,9 @@ static void _rcu_defer_barrier_thread(void)
 
 void rcu_defer_barrier_thread(void)
 {
-       internal_urcu_lock(&urcu_defer_mutex);
+       mutex_lock(&rcu_defer_mutex);
        _rcu_defer_barrier_thread();
-       internal_urcu_unlock(&urcu_defer_mutex);
+       mutex_unlock(&rcu_defer_mutex);
 }
 
 /*
@@ -210,9 +210,9 @@ void rcu_defer_barrier(void)
        if (list_empty(&registry))
                return;
 
-       internal_urcu_lock(&urcu_defer_mutex);
+       mutex_lock(&rcu_defer_mutex);
        list_for_each_entry(index, &registry, list) {
-               index->last_head = LOAD_SHARED(index->head);
+               index->last_head = CAA_LOAD_SHARED(index->head);
                num_items += index->last_head - index->tail;
        }
        if (likely(!num_items)) {
@@ -226,37 +226,31 @@ void rcu_defer_barrier(void)
        list_for_each_entry(index, &registry, list)
                rcu_defer_barrier_queue(index, index->last_head);
 end:
-       internal_urcu_unlock(&urcu_defer_mutex);
+       mutex_unlock(&rcu_defer_mutex);
 }
 
 /*
  * _defer_rcu - Queue a RCU callback.
  */
-void _defer_rcu_ratelimit(void (*fct)(void *p), void *p, int (*rl)(void *p))
+void _defer_rcu(void (*fct)(void *p), void *p)
 {
        unsigned long head, tail;
-       int sync;
-
-       /*
-        * Verify if we reached the rate limiter threshold.
-        */
-       sync = rl ? rl(p) : 0;
 
        /*
         * Head is only modified by ourself. Tail can be modified by reclamation
         * thread.
         */
        head = defer_queue.head;
-       tail = LOAD_SHARED(defer_queue.tail);
+       tail = CAA_LOAD_SHARED(defer_queue.tail);
 
        /*
         * If queue is full, or reached threshold. Empty queue ourself.
         * Worse-case: must allow 2 supplementary entries for fct pointer.
         */
-       if (unlikely(sync || (head - tail >= DEFER_QUEUE_SIZE - 2))) {
+       if (unlikely(head - tail >= DEFER_QUEUE_SIZE - 2)) {
                assert(head - tail <= DEFER_QUEUE_SIZE);
                rcu_defer_barrier_thread();
-               assert(head - LOAD_SHARED(defer_queue.tail) == 0);
+               assert(head - CAA_LOAD_SHARED(defer_queue.tail) == 0);
        }
 
        if (unlikely(defer_queue.last_fct_in != fct)) {
@@ -267,13 +261,13 @@ void _defer_rcu_ratelimit(void (*fct)(void *p), void *p, int (*rl)(void *p))
                         * marker, write DQ_FCT_MARK followed by the function
                         * pointer.
                         */
-                       _STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK],
+                       _CAA_STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK],
                                      DQ_FCT_MARK);
-                       _STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK],
+                       _CAA_STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK],
                                      fct);
                } else {
                        DQ_SET_FCT_BIT(fct);
-                       _STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK],
+                       _CAA_STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK],
                                      fct);
                }
        } else {
@@ -282,17 +276,17 @@ void _defer_rcu_ratelimit(void (*fct)(void *p), void *p, int (*rl)(void *p))
                         * If the data to encode is not aligned or the marker,
                         * write DQ_FCT_MARK followed by the function pointer.
                         */
-                       _STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK],
+                       _CAA_STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK],
                                      DQ_FCT_MARK);
-                       _STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK],
+                       _CAA_STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK],
                                      fct);
                }
        }
-       _STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK], p);
-       smp_wmb();      /* Publish new pointer before head */
+       _CAA_STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK], p);
+       cmm_smp_wmb();  /* Publish new pointer before head */
                        /* Write q[] before head. */
-       STORE_SHARED(defer_queue.head, head);
-       smp_mb();       /* Write queue head before read futex */
+       CAA_STORE_SHARED(defer_queue.head, head);
+       cmm_smp_mb();   /* Write queue head before read futex */
        /*
         * Wake-up any waiting defer thread.
         */
@@ -321,9 +315,9 @@ void *thr_defer(void *args)
  * library wrappers to be used by non-LGPL compatible source code.
  */
 
-void defer_rcu_ratelimit(void (*fct)(void *p), void *p, int (*rl)(void *p))
+void defer_rcu(void (*fct)(void *p), void *p)
 {
-       _defer_rcu_ratelimit(fct, p, rl);
+       _defer_rcu(fct, p);
 }
 
 static void start_defer_thread(void)
@@ -353,36 +347,36 @@ void rcu_defer_register_thread(void)
        assert(defer_queue.q == NULL);
        defer_queue.q = malloc(sizeof(void *) * DEFER_QUEUE_SIZE);
 
-       internal_urcu_lock(&defer_thread_mutex);
-       internal_urcu_lock(&urcu_defer_mutex);
+       mutex_lock(&defer_thread_mutex);
+       mutex_lock(&rcu_defer_mutex);
        was_empty = list_empty(&registry);
        list_add(&defer_queue.list, &registry);
-       internal_urcu_unlock(&urcu_defer_mutex);
+       mutex_unlock(&rcu_defer_mutex);
 
        if (was_empty)
                start_defer_thread();
-       internal_urcu_unlock(&defer_thread_mutex);
+       mutex_unlock(&defer_thread_mutex);
 }
 
 void rcu_defer_unregister_thread(void)
 {
        int is_empty;
 
-       internal_urcu_lock(&defer_thread_mutex);
-       internal_urcu_lock(&urcu_defer_mutex);
+       mutex_lock(&defer_thread_mutex);
+       mutex_lock(&rcu_defer_mutex);
        list_del(&defer_queue.list);
        _rcu_defer_barrier_thread();
        free(defer_queue.q);
        defer_queue.q = NULL;
        is_empty = list_empty(&registry);
-       internal_urcu_unlock(&urcu_defer_mutex);
+       mutex_unlock(&rcu_defer_mutex);
 
        if (is_empty)
                stop_defer_thread();
-       internal_urcu_unlock(&defer_thread_mutex);
+       mutex_unlock(&defer_thread_mutex);
 }
 
-void urcu_defer_exit(void)
+void rcu_defer_exit(void)
 {
        assert(list_empty(&registry));
 }
This page took 0.027342 seconds and 4 git commands to generate.