#define _URCU_WFCQUEUE_STATIC_H
/*
- * wfcqueue-static.h
+ * urcu/static/wfcqueue.h
*
* Userspace RCU library - Concurrent Queue with Wait-Free Enqueue/Blocking Dequeue
*
- * TO BE INCLUDED ONLY IN LGPL-COMPATIBLE CODE. See wfcqueue.h for linking
- * dynamically with the userspace rcu library.
+ * TO BE INCLUDED ONLY IN LGPL-COMPATIBLE CODE. See urcu/wfcqueue.h for
+ * linking dynamically with the userspace rcu library.
*
* Copyright 2010-2012 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
* Copyright 2011-2012 - Lai Jiangshan <laijs@cn.fujitsu.com>
/*
* Concurrent queue with wait-free enqueue/blocking dequeue.
*
- * Inspired from half-wait-free/half-blocking queue implementation done by
- * Paul E. McKenney.
+ * This queue has been designed and implemented collaboratively by
+ * Mathieu Desnoyers and Lai Jiangshan. Inspired from
+ * half-wait-free/half-blocking queue implementation done by Paul E.
+ * McKenney.
*
- * Mutual exclusion of __cds_wfcq_* API
+ * Mutual exclusion of cds_wfcq_* / __cds_wfcq_* API
+ *
+ * Synchronization table:
+ *
+ * External synchronization techniques described in the API below is
+ * required between pairs marked with "X". No external synchronization
+ * required between pairs marked with "-".
+ *
+ * Legend:
+ * [1] cds_wfcq_enqueue
+ * [2] __cds_wfcq_splice (destination queue)
+ * [3] __cds_wfcq_dequeue
+ * [4] __cds_wfcq_splice (source queue)
+ * [5] __cds_wfcq_first
+ * [6] __cds_wfcq_next
+ *
+ * [1] [2] [3] [4] [5] [6]
+ * [1] - - - - - -
+ * [2] - - - - - -
+ * [3] - - X X X X
+ * [4] - - X - X X
+ * [5] - - X X - -
+ * [6] - - X X - -
*
- * Unless otherwise stated, the caller must ensure mutual exclusion of
- * queue update operations "dequeue" and "splice" (for source queue).
- * Queue read operations "first" and "next" need to be protected against
- * concurrent "dequeue" and "splice" (for source queue) by the caller.
- * "enqueue", "splice" (for destination queue), and "empty" are the only
- * operations that can be used without any mutual exclusion.
* Mutual exclusion can be ensured by holding cds_wfcq_dequeue_lock().
*
* For convenience, cds_wfcq_dequeue_blocking() and
* cds_wfcq_splice_blocking() hold the dequeue lock.
+ *
+ * Besides locking, mutual exclusion of dequeue, splice and iteration
+ * can be ensured by performing all of those operations from a single
+ * thread, without requiring any lock.
*/
#define WFCQ_ADAPT_ATTEMPTS 10 /* Retry if being set */
* cds_wfcq_empty: return whether wait-free queue is empty.
*
* No memory barrier is issued. No mutual exclusion is required.
+ *
+ * We perform the test on head->node.next to check if the queue is
+ * possibly empty, but we confirm this by checking if the tail pointer
+ * points to the head node because the tail pointer is the linearisation
+ * point of the enqueuers. Just checking the head next pointer could
+ * make a queue appear empty if an enqueuer is preempted for a long time
+ * between xchg() and setting the previous node's next pointer.
*/
static inline bool _cds_wfcq_empty(struct cds_wfcq_head *head,
struct cds_wfcq_tail *tail)
assert(!ret);
}
-static inline void ___cds_wfcq_append(struct cds_wfcq_head *head,
+static inline bool ___cds_wfcq_append(struct cds_wfcq_head *head,
struct cds_wfcq_tail *tail,
struct cds_wfcq_node *new_head,
struct cds_wfcq_node *new_tail)
* perspective.
*/
CMM_STORE_SHARED(old_tail->next, new_head);
+ /*
+ * Return false if queue was empty prior to adding the node,
+ * else return true.
+ */
+ return old_tail != &head->node;
}
/*
*
* Issues a full memory barrier before enqueue. No mutual exclusion is
* required.
+ *
+ * Returns false if the queue was empty prior to adding the node.
+ * Returns true otherwise.
*/
-static inline void _cds_wfcq_enqueue(struct cds_wfcq_head *head,
+static inline bool _cds_wfcq_enqueue(struct cds_wfcq_head *head,
struct cds_wfcq_tail *tail,
struct cds_wfcq_node *new_tail)
{
- ___cds_wfcq_append(head, tail, new_tail, new_tail);
+ return ___cds_wfcq_append(head, tail, new_tail, new_tail);
+}
+
+/*
+ * ___cds_wfcq_busy_wait: adaptative busy-wait.
+ *
+ * Returns 1 if nonblocking and needs to block, 0 otherwise.
+ */
+static inline bool
+___cds_wfcq_busy_wait(int *attempt, int blocking)
+{
+ if (!blocking)
+ return 1;
+ if (++(*attempt) >= WFCQ_ADAPT_ATTEMPTS) {
+ poll(NULL, 0, WFCQ_WAIT); /* Wait for 10ms */
+ *attempt = 0;
+ } else {
+ caa_cpu_relax();
+ }
+ return 0;
}
/*
* Waiting for enqueuer to complete enqueue and return the next node.
*/
static inline struct cds_wfcq_node *
-___cds_wfcq_node_sync_next(struct cds_wfcq_node *node)
+___cds_wfcq_node_sync_next(struct cds_wfcq_node *node, int blocking)
{
struct cds_wfcq_node *next;
int attempt = 0;
* Adaptative busy-looping waiting for enqueuer to complete enqueue.
*/
while ((next = CMM_LOAD_SHARED(node->next)) == NULL) {
- if (++attempt >= WFCQ_ADAPT_ATTEMPTS) {
- poll(NULL, 0, WFCQ_WAIT); /* Wait for 10ms */
- attempt = 0;
- } else {
- caa_cpu_relax();
- }
+ if (___cds_wfcq_busy_wait(&attempt, blocking))
+ return CDS_WFCQ_WOULDBLOCK;
}
return next;
}
-/*
- * __cds_wfcq_first_blocking: get first node of a queue, without dequeuing.
- *
- * Content written into the node before enqueue is guaranteed to be
- * consistent, but no other memory ordering is ensured.
- * Should be called with cds_wfcq_dequeue_lock() held.
- */
static inline struct cds_wfcq_node *
-___cds_wfcq_first_blocking(struct cds_wfcq_head *head,
- struct cds_wfcq_tail *tail)
+___cds_wfcq_first(struct cds_wfcq_head *head,
+ struct cds_wfcq_tail *tail,
+ int blocking)
{
struct cds_wfcq_node *node;
if (_cds_wfcq_empty(head, tail))
return NULL;
- node = ___cds_wfcq_node_sync_next(&head->node);
+ node = ___cds_wfcq_node_sync_next(&head->node, blocking);
/* Load head->node.next before loading node's content */
cmm_smp_read_barrier_depends();
return node;
}
/*
- * __cds_wfcq_next_blocking: get next node of a queue, without dequeuing.
+ * __cds_wfcq_first_blocking: get first node of a queue, without dequeuing.
*
* Content written into the node before enqueue is guaranteed to be
* consistent, but no other memory ordering is ensured.
- * Should be called with cds_wfcq_dequeue_lock() held.
+ * Dequeue/splice/iteration mutual exclusion should be ensured by the
+ * caller.
+ *
+ * Used by for-like iteration macros in urcu/wfqueue.h:
+ * __cds_wfcq_for_each_blocking()
+ * __cds_wfcq_for_each_blocking_safe()
+ *
+ * Returns NULL if queue is empty, first node otherwise.
*/
static inline struct cds_wfcq_node *
-___cds_wfcq_next_blocking(struct cds_wfcq_head *head,
+___cds_wfcq_first_blocking(struct cds_wfcq_head *head,
+ struct cds_wfcq_tail *tail)
+{
+ return ___cds_wfcq_first(head, tail, 1);
+}
+
+
+/*
+ * __cds_wfcq_first_nonblocking: get first node of a queue, without dequeuing.
+ *
+ * Same as __cds_wfcq_first_blocking, but returns CDS_WFCQ_WOULDBLOCK if
+ * it needs to block.
+ */
+static inline struct cds_wfcq_node *
+___cds_wfcq_first_nonblocking(struct cds_wfcq_head *head,
+ struct cds_wfcq_tail *tail)
+{
+ return ___cds_wfcq_first(head, tail, 0);
+}
+
+static inline struct cds_wfcq_node *
+___cds_wfcq_next(struct cds_wfcq_head *head,
struct cds_wfcq_tail *tail,
- struct cds_wfcq_node *node)
+ struct cds_wfcq_node *node,
+ int blocking)
{
struct cds_wfcq_node *next;
cmm_smp_rmb();
if (CMM_LOAD_SHARED(tail->p) == node)
return NULL;
- next = ___cds_wfcq_node_sync_next(node);
+ next = ___cds_wfcq_node_sync_next(node, blocking);
}
/* Load node->next before loading next's content */
cmm_smp_read_barrier_depends();
}
/*
- * __cds_wfcq_dequeue_blocking: dequeue a node from the queue.
- *
- * No need to go on a waitqueue here, as there is no possible state in which the
- * list could cause dequeue to busy-loop needlessly while waiting for another
- * thread to be scheduled. The queue appears empty until tail->next is set by
- * enqueue.
+ * __cds_wfcq_next_blocking: get next node of a queue, without dequeuing.
*
* Content written into the node before enqueue is guaranteed to be
* consistent, but no other memory ordering is ensured.
- * It is valid to reuse and free a dequeued node immediately.
- * Should be called with cds_wfcq_dequeue_lock() held.
+ * Dequeue/splice/iteration mutual exclusion should be ensured by the
+ * caller.
+ *
+ * Used by for-like iteration macros in urcu/wfqueue.h:
+ * __cds_wfcq_for_each_blocking()
+ * __cds_wfcq_for_each_blocking_safe()
+ *
+ * Returns NULL if reached end of queue, non-NULL next queue node
+ * otherwise.
*/
static inline struct cds_wfcq_node *
-___cds_wfcq_dequeue_blocking(struct cds_wfcq_head *head,
- struct cds_wfcq_tail *tail)
+___cds_wfcq_next_blocking(struct cds_wfcq_head *head,
+ struct cds_wfcq_tail *tail,
+ struct cds_wfcq_node *node)
+{
+ return ___cds_wfcq_next(head, tail, node, 1);
+}
+
+/*
+ * __cds_wfcq_next_blocking: get next node of a queue, without dequeuing.
+ *
+ * Same as __cds_wfcq_next_blocking, but returns CDS_WFCQ_WOULDBLOCK if
+ * it needs to block.
+ */
+static inline struct cds_wfcq_node *
+___cds_wfcq_next_nonblocking(struct cds_wfcq_head *head,
+ struct cds_wfcq_tail *tail,
+ struct cds_wfcq_node *node)
+{
+ return ___cds_wfcq_next(head, tail, node, 0);
+}
+
+static inline struct cds_wfcq_node *
+___cds_wfcq_dequeue_with_state(struct cds_wfcq_head *head,
+ struct cds_wfcq_tail *tail,
+ int *state,
+ int blocking)
{
struct cds_wfcq_node *node, *next;
- if (_cds_wfcq_empty(head, tail))
+ if (state)
+ *state = 0;
+
+ if (_cds_wfcq_empty(head, tail)) {
return NULL;
+ }
- node = ___cds_wfcq_node_sync_next(&head->node);
+ node = ___cds_wfcq_node_sync_next(&head->node, blocking);
+ if (!blocking && node == CDS_WFCQ_WOULDBLOCK) {
+ return CDS_WFCQ_WOULDBLOCK;
+ }
if ((next = CMM_LOAD_SHARED(node->next)) == NULL) {
/*
* content.
*/
_cds_wfcq_node_init(&head->node);
- if (uatomic_cmpxchg(&tail->p, node, &head->node) == node)
+ if (uatomic_cmpxchg(&tail->p, node, &head->node) == node) {
+ if (state)
+ *state |= CDS_WFCQ_STATE_LAST;
return node;
- next = ___cds_wfcq_node_sync_next(node);
+ }
+ next = ___cds_wfcq_node_sync_next(node, blocking);
+ /*
+ * In nonblocking mode, if we would need to block to
+ * get node's next, set the head next node pointer
+ * (currently NULL) back to its original value.
+ */
+ if (!blocking && next == CDS_WFCQ_WOULDBLOCK) {
+ head->node.next = node;
+ return CDS_WFCQ_WOULDBLOCK;
+ }
}
/*
}
/*
- * __cds_wfcq_splice_blocking: enqueue all src_q nodes at the end of dest_q.
+ * __cds_wfcq_dequeue_with_state_blocking: dequeue node from queue, with state.
+ *
+ * Content written into the node before enqueue is guaranteed to be
+ * consistent, but no other memory ordering is ensured.
+ * It is valid to reuse and free a dequeued node immediately.
+ * Dequeue/splice/iteration mutual exclusion should be ensured by the
+ * caller.
+ */
+static inline struct cds_wfcq_node *
+___cds_wfcq_dequeue_with_state_blocking(struct cds_wfcq_head *head,
+ struct cds_wfcq_tail *tail, int *state)
+{
+ return ___cds_wfcq_dequeue_with_state(head, tail, state, 1);
+}
+
+/*
+ * ___cds_wfcq_dequeue_blocking: dequeue node from queue.
+ *
+ * Same as __cds_wfcq_dequeue_with_state_blocking, but without saving
+ * state.
+ */
+static inline struct cds_wfcq_node *
+___cds_wfcq_dequeue_blocking(struct cds_wfcq_head *head,
+ struct cds_wfcq_tail *tail)
+{
+ return ___cds_wfcq_dequeue_with_state_blocking(head, tail, NULL);
+}
+
+/*
+ * __cds_wfcq_dequeue_with_state_nonblocking: dequeue node, with state.
+ *
+ * Same as __cds_wfcq_dequeue_blocking, but returns CDS_WFCQ_WOULDBLOCK
+ * if it needs to block.
+ */
+static inline struct cds_wfcq_node *
+___cds_wfcq_dequeue_with_state_nonblocking(struct cds_wfcq_head *head,
+ struct cds_wfcq_tail *tail, int *state)
+{
+ return ___cds_wfcq_dequeue_with_state(head, tail, state, 0);
+}
+
+/*
+ * ___cds_wfcq_dequeue_nonblocking: dequeue node from queue.
+ *
+ * Same as __cds_wfcq_dequeue_with_state_nonblocking, but without saving
+ * state.
+ */
+static inline struct cds_wfcq_node *
+___cds_wfcq_dequeue_nonblocking(struct cds_wfcq_head *head,
+ struct cds_wfcq_tail *tail)
+{
+ return ___cds_wfcq_dequeue_with_state_nonblocking(head, tail, NULL);
+}
+
+/*
+ * __cds_wfcq_splice: enqueue all src_q nodes at the end of dest_q.
*
* Dequeue all nodes from src_q.
* dest_q must be already initialized.
- * Should be called with cds_wfcq_dequeue_lock() held on src_q.
+ * Mutual exclusion for src_q should be ensured by the caller as
+ * specified in the "Synchronisation table".
+ * Returns enum cds_wfcq_ret which indicates the state of the src or
+ * dest queue.
*/
-static inline void
-___cds_wfcq_splice_blocking(
+static inline enum cds_wfcq_ret
+___cds_wfcq_splice(
struct cds_wfcq_head *dest_q_head,
struct cds_wfcq_tail *dest_q_tail,
struct cds_wfcq_head *src_q_head,
- struct cds_wfcq_tail *src_q_tail)
+ struct cds_wfcq_tail *src_q_tail,
+ int blocking)
{
struct cds_wfcq_node *head, *tail;
+ int attempt = 0;
+ /*
+ * Initial emptiness check to speed up cases where queue is
+ * empty: only require loads to check if queue is empty.
+ */
if (_cds_wfcq_empty(src_q_head, src_q_tail))
- return;
+ return CDS_WFCQ_RET_SRC_EMPTY;
- head = ___cds_wfcq_node_sync_next(&src_q_head->node);
- _cds_wfcq_node_init(&src_q_head->node);
+ for (;;) {
+ /*
+ * Open-coded _cds_wfcq_empty() by testing result of
+ * uatomic_xchg, as well as tail pointer vs head node
+ * address.
+ */
+ head = uatomic_xchg(&src_q_head->node.next, NULL);
+ if (head)
+ break; /* non-empty */
+ if (CMM_LOAD_SHARED(src_q_tail->p) == &src_q_head->node)
+ return CDS_WFCQ_RET_SRC_EMPTY;
+ if (___cds_wfcq_busy_wait(&attempt, blocking))
+ return CDS_WFCQ_RET_WOULDBLOCK;
+ }
/*
* Memory barrier implied before uatomic_xchg() orders store to
* Append the spliced content of src_q into dest_q. Does not
* require mutual exclusion on dest_q (wait-free).
*/
- ___cds_wfcq_append(dest_q_head, dest_q_tail, head, tail);
+ if (___cds_wfcq_append(dest_q_head, dest_q_tail, head, tail))
+ return CDS_WFCQ_RET_DEST_NON_EMPTY;
+ else
+ return CDS_WFCQ_RET_DEST_EMPTY;
+}
+
+/*
+ * __cds_wfcq_splice_blocking: enqueue all src_q nodes at the end of dest_q.
+ *
+ * Dequeue all nodes from src_q.
+ * dest_q must be already initialized.
+ * Mutual exclusion for src_q should be ensured by the caller as
+ * specified in the "Synchronisation table".
+ * Returns enum cds_wfcq_ret which indicates the state of the src or
+ * dest queue. Never returns CDS_WFCQ_RET_WOULDBLOCK.
+ */
+static inline enum cds_wfcq_ret
+___cds_wfcq_splice_blocking(
+ struct cds_wfcq_head *dest_q_head,
+ struct cds_wfcq_tail *dest_q_tail,
+ struct cds_wfcq_head *src_q_head,
+ struct cds_wfcq_tail *src_q_tail)
+{
+ return ___cds_wfcq_splice(dest_q_head, dest_q_tail,
+ src_q_head, src_q_tail, 1);
}
/*
- * cds_wfcq_dequeue_blocking: dequeue a node from a wait-free queue.
+ * __cds_wfcq_splice_nonblocking: enqueue all src_q nodes at the end of dest_q.
+ *
+ * Same as __cds_wfcq_splice_blocking, but returns
+ * CDS_WFCQ_RET_WOULDBLOCK if it needs to block.
+ */
+static inline enum cds_wfcq_ret
+___cds_wfcq_splice_nonblocking(
+ struct cds_wfcq_head *dest_q_head,
+ struct cds_wfcq_tail *dest_q_tail,
+ struct cds_wfcq_head *src_q_head,
+ struct cds_wfcq_tail *src_q_tail)
+{
+ return ___cds_wfcq_splice(dest_q_head, dest_q_tail,
+ src_q_head, src_q_tail, 0);
+}
+
+/*
+ * cds_wfcq_dequeue_with_state_blocking: dequeue a node from a wait-free queue.
*
* Content written into the node before enqueue is guaranteed to be
* consistent, but no other memory ordering is ensured.
- * Mutual exlusion with (and only with) cds_wfcq_splice_blocking is
+ * Mutual exclusion with cds_wfcq_splice_blocking and dequeue lock is
* ensured.
* It is valid to reuse and free a dequeued node immediately.
*/
static inline struct cds_wfcq_node *
-_cds_wfcq_dequeue_blocking(struct cds_wfcq_head *head,
- struct cds_wfcq_tail *tail)
+_cds_wfcq_dequeue_with_state_blocking(struct cds_wfcq_head *head,
+ struct cds_wfcq_tail *tail, int *state)
{
struct cds_wfcq_node *retval;
_cds_wfcq_dequeue_lock(head, tail);
- retval = ___cds_wfcq_dequeue_blocking(head, tail);
+ retval = ___cds_wfcq_dequeue_with_state_blocking(head, tail, state);
_cds_wfcq_dequeue_unlock(head, tail);
return retval;
}
+/*
+ * cds_wfcq_dequeue_blocking: dequeue node from queue.
+ *
+ * Same as cds_wfcq_dequeue_blocking, but without saving state.
+ */
+static inline struct cds_wfcq_node *
+_cds_wfcq_dequeue_blocking(struct cds_wfcq_head *head,
+ struct cds_wfcq_tail *tail)
+{
+ return _cds_wfcq_dequeue_with_state_blocking(head, tail, NULL);
+}
+
/*
* cds_wfcq_splice_blocking: enqueue all src_q nodes at the end of dest_q.
*
* dest_q must be already initialized.
* Content written into the node before enqueue is guaranteed to be
* consistent, but no other memory ordering is ensured.
- * Mutual exlusion with (and only with) cds_wfcq_dequeue_blocking is
+ * Mutual exclusion with cds_wfcq_dequeue_blocking and dequeue lock is
* ensured.
+ * Returns enum cds_wfcq_ret which indicates the state of the src or
+ * dest queue. Never returns CDS_WFCQ_RET_WOULDBLOCK.
*/
-static inline void
+static inline enum cds_wfcq_ret
_cds_wfcq_splice_blocking(
struct cds_wfcq_head *dest_q_head,
struct cds_wfcq_tail *dest_q_tail,
struct cds_wfcq_head *src_q_head,
struct cds_wfcq_tail *src_q_tail)
{
+ enum cds_wfcq_ret ret;
+
_cds_wfcq_dequeue_lock(src_q_head, src_q_tail);
- ___cds_wfcq_splice_blocking(dest_q_head, dest_q_tail,
+ ret = ___cds_wfcq_splice_blocking(dest_q_head, dest_q_tail,
src_q_head, src_q_tail);
_cds_wfcq_dequeue_unlock(src_q_head, src_q_tail);
+ return ret;
}
#ifdef __cplusplus