ifdef C, wait-free stack adaptative pop wait
[urcu.git] / urcu / rculfqueue.h
index 76da6b16da648eadef645337d69d49d4e8a22543..01c2092610350155247b87cadd6149aa3b26b4b1 100644 (file)
@@ -1,3 +1,6 @@
+#ifndef _URCU_RCULFQUEUE_H
+#define _URCU_RCULFQUEUE_H
+
 /*
  * rculfqueue.h
  *
 #include <urcu/urcu_ref.h>
 #include <assert.h>
 
+#ifdef __cplusplus
+extern "C" {
+#endif
+
 #if (!defined(_GNU_SOURCE) && !defined(_LGPL_SOURCE))
 #error "Dynamic loader LGPL wrappers not implemented yet"
 #endif
@@ -72,11 +79,11 @@ void rcu_lfq_enqueue(struct rcu_lfq_queue *q, struct rcu_lfq_node *node)
         * node before publication.
         */
 
-       rcu_read_lock();
        for (;;) {
-               struct rcu_lfq_node *tail = rcu_dereference(q->tail);
-               struct rcu_lfq_node *next;
+               struct rcu_lfq_node *tail, *next;
 
+               rcu_read_lock();
+               tail = rcu_dereference(q->tail);
                /*
                 * Typically expect tail->next to be NULL.
                 */
@@ -97,6 +104,7 @@ void rcu_lfq_enqueue(struct rcu_lfq_queue *q, struct rcu_lfq_node *node)
                         * further and retry.
                         */
                        uatomic_cmpxchg(&q->tail, tail, next);
+                       rcu_read_unlock();
                        continue;
                }
        }
@@ -107,17 +115,19 @@ void rcu_lfq_enqueue(struct rcu_lfq_queue *q, struct rcu_lfq_node *node)
  * which calls the release primitive when the reference count drops to zero. A
  * grace period must be waited before performing the actual memory reclamation
  * in the release primitive.
- * The entry lfq node returned by dequeue must no be re-used before the
- * reference count reaches zero.
+ * The entry lfq node returned by dequeue must not be modified/re-used/freed
+ * until the reference count reaches zero and a grace period has elapsed (after
+ * the refcount reached 0).
  */
 struct rcu_lfq_node *
 rcu_lfq_dequeue(struct rcu_lfq_queue *q, void (*release)(struct urcu_ref *))
 {
-       rcu_read_lock();
        for (;;) {
-               struct rcu_lfq_node *head = rcu_dereference(q->head);
-               struct rcu_lfq_node *next = rcu_dereference(head->next);
+               struct rcu_lfq_node *head, *next;
 
+               rcu_read_lock();
+               head = rcu_dereference(q->head);
+               next = rcu_dereference(head->next);
                if (next) {
                        if (uatomic_cmpxchg(&q->head, head, next) == head) {
                                rcu_read_unlock();
@@ -125,6 +135,7 @@ rcu_lfq_dequeue(struct rcu_lfq_queue *q, void (*release)(struct urcu_ref *))
                                return next;
                        } else {
                                /* Concurrently pushed, retry */
+                               rcu_read_unlock();
                                continue;
                        }
                } else {
@@ -134,3 +145,9 @@ rcu_lfq_dequeue(struct rcu_lfq_queue *q, void (*release)(struct urcu_ref *))
                }
        }
 }
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _URCU_RCULFQUEUE_H */
This page took 0.023355 seconds and 4 git commands to generate.