#include <urcu-call-rcu.h>
#include <urcu/uatomic.h>
+#include <urcu-pointer.h>
#include <assert.h>
#include <errno.h>
* grace period.
*/
-static inline
-int is_dummy(struct cds_lfq_queue_rcu *q, struct cds_lfq_node_rcu *node)
-{
- return node == q->dummy;
-}
-
static inline
struct cds_lfq_node_rcu *make_dummy(struct cds_lfq_queue_rcu *q,
struct cds_lfq_node_rcu *next)
dummy = malloc(sizeof(struct cds_lfq_node_rcu_dummy));
assert(dummy);
dummy->parent.next = next;
+ dummy->parent.dummy = 1;
dummy->q = q;
return &dummy->parent;
}
static inline
-void free_dummy(struct rcu_head *head)
+void free_dummy_cb(struct rcu_head *head)
{
struct cds_lfq_node_rcu_dummy *dummy =
caa_container_of(head, struct cds_lfq_node_rcu_dummy, head);
{
struct cds_lfq_node_rcu_dummy *dummy;
+ assert(node->dummy);
+ dummy = caa_container_of(node, struct cds_lfq_node_rcu_dummy, parent);
+ dummy->q->queue_call_rcu(&dummy->head, free_dummy_cb);
+}
+
+static inline
+void free_dummy(struct cds_lfq_node_rcu *node)
+{
+ struct cds_lfq_node_rcu_dummy *dummy;
+
+ assert(node->dummy);
dummy = caa_container_of(node, struct cds_lfq_node_rcu_dummy, parent);
- dummy->q->queue_call_rcu(&dummy->head, free_dummy);
+ free(dummy);
}
static inline
void _cds_lfq_node_init_rcu(struct cds_lfq_node_rcu *node)
{
node->next = NULL;
+ node->dummy = 0;
}
static inline
void (*func)(struct rcu_head *head)))
{
q->tail = make_dummy(q, NULL);
- q->dummy = q->tail;
q->head = q->tail;
q->queue_call_rcu = queue_call_rcu;
}
struct cds_lfq_node_rcu *head;
head = rcu_dereference(q->head);
- if (!(is_dummy(q, head) && head->next == NULL))
+ if (!(head->dummy && head->next == NULL))
return -EPERM; /* not empty */
- rcu_free_dummy(head);
+ free_dummy(head);
return 0;
}
}
}
+static inline
+void enqueue_dummy(struct cds_lfq_queue_rcu *q)
+{
+ struct cds_lfq_node_rcu *node;
+
+ /* We need to reallocate to protect from ABA. */
+ node = make_dummy(q, NULL);
+ _cds_lfq_enqueue_rcu(q, node);
+}
+
/*
* Should be called under rcu read lock critical section.
*
head = rcu_dereference(q->head);
next = rcu_dereference(head->next);
- if (is_dummy(q, head) && next == NULL)
+ if (head->dummy && next == NULL)
return NULL; /* empty */
/*
* We never, ever allow dequeue to get to a state where
* the queue is empty (we need at least one node in the
* queue). This is ensured by checking if the head next
- * is NULL and retry in that case (this means a
- * concurrent dummy node re-enqueue is in progress).
+ * is NULL, which means we need to enqueue a dummy node
+ * before we can hope dequeuing anything.
*/
- if (next) {
- if (uatomic_cmpxchg(&q->head, head, next) == head) {
- if (is_dummy(q, head)) {
- struct cds_lfq_node_rcu *node;
- /*
- * Requeue dummy. We need to
- * reallocate to protect from
- * ABA.
- */
- rcu_free_dummy(head);
- node = make_dummy(q, NULL);
- /*
- * We are the only thread
- * allowed to update dummy (we
- * own the old dummy).
- */
- q->dummy = node;
- _cds_lfq_enqueue_rcu(q, node);
- continue; /* try again */
- }
- return head;
- } else {
- /* Concurrently pushed, retry */
- continue;
- }
- } else {
- /* Dummy node re-enqueue is in progress, retry. */
- continue;
+ if (!next) {
+ enqueue_dummy(q);
+ next = rcu_dereference(head->next);
+ }
+ if (uatomic_cmpxchg(&q->head, head, next) != head)
+ continue; /* Concurrently pushed. */
+ if (head->dummy) {
+ /* Free dummy after grace period. */
+ rcu_free_dummy(head);
+ continue; /* try again */
}
+ return head;
}
}