X-Git-Url: https://git.liburcu.org/?p=urcu.git;a=blobdiff_plain;f=urcu%2Fstatic%2Fwfcqueue.h;h=af7a8c6e7d4e3b1a7876347eb5cfb52db8e88046;hp=a284b580af7d8777087ba17e6094c40e03abecc9;hb=ffa11a1830c532f3b052146eb9f0dd450cb2a0f2;hpb=dfb65fd3cc75f660f35feedd2e14b000dbe39a4b diff --git a/urcu/static/wfcqueue.h b/urcu/static/wfcqueue.h index a284b58..af7a8c6 100644 --- a/urcu/static/wfcqueue.h +++ b/urcu/static/wfcqueue.h @@ -46,15 +46,30 @@ extern "C" { * half-wait-free/half-blocking queue implementation done by Paul E. * McKenney. * - * Mutual exclusion of __cds_wfcq_* API - * - * Unless otherwise stated, the caller must ensure mutual exclusion of - * queue update operations "dequeue" and "splice" (for source queue). - * Queue read operations "first" and "next", which are used by - * "for_each" iterations, need to be protected against concurrent - * "dequeue" and "splice" (for source queue) by the caller. - * "enqueue", "splice" (for destination queue), and "empty" are the only - * operations that can be used without any mutual exclusion. + * Mutual exclusion of cds_wfcq_* / __cds_wfcq_* API + * + * Synchronization table: + * + * External synchronization techniques described in the API below is + * required between pairs marked with "X". No external synchronization + * required between pairs marked with "-". + * + * Legend: + * [1] cds_wfcq_enqueue + * [2] __cds_wfcq_splice (destination queue) + * [3] __cds_wfcq_dequeue + * [4] __cds_wfcq_splice (source queue) + * [5] __cds_wfcq_first + * [6] __cds_wfcq_next + * + * [1] [2] [3] [4] [5] [6] + * [1] - - - - - - + * [2] - - - - - - + * [3] - - X X X X + * [4] - - X - X X + * [5] - - X X - - + * [6] - - X X - - + * * Mutual exclusion can be ensured by holding cds_wfcq_dequeue_lock(). * * For convenience, cds_wfcq_dequeue_blocking() and @@ -95,6 +110,13 @@ static inline void _cds_wfcq_init(struct cds_wfcq_head *head, * cds_wfcq_empty: return whether wait-free queue is empty. * * No memory barrier is issued. No mutual exclusion is required. + * + * We perform the test on head->node.next to check if the queue is + * possibly empty, but we confirm this by checking if the tail pointer + * points to the head node because the tail pointer is the linearisation + * point of the enqueuers. Just checking the head next pointer could + * make a queue appear empty if an enqueuer is preempted for a long time + * between xchg() and setting the previous node's next pointer. */ static inline bool _cds_wfcq_empty(struct cds_wfcq_head *head, struct cds_wfcq_tail *tail) @@ -128,7 +150,7 @@ static inline void _cds_wfcq_dequeue_unlock(struct cds_wfcq_head *head, assert(!ret); } -static inline void ___cds_wfcq_append(struct cds_wfcq_head *head, +static inline bool ___cds_wfcq_append(struct cds_wfcq_head *head, struct cds_wfcq_tail *tail, struct cds_wfcq_node *new_head, struct cds_wfcq_node *new_tail) @@ -152,6 +174,11 @@ static inline void ___cds_wfcq_append(struct cds_wfcq_head *head, * perspective. */ CMM_STORE_SHARED(old_tail->next, new_head); + /* + * Return false if queue was empty prior to adding the node, + * else return true. + */ + return old_tail != &head->node; } /* @@ -159,12 +186,34 @@ static inline void ___cds_wfcq_append(struct cds_wfcq_head *head, * * Issues a full memory barrier before enqueue. No mutual exclusion is * required. + * + * Returns false if the queue was empty prior to adding the node. + * Returns true otherwise. */ -static inline void _cds_wfcq_enqueue(struct cds_wfcq_head *head, +static inline bool _cds_wfcq_enqueue(struct cds_wfcq_head *head, struct cds_wfcq_tail *tail, struct cds_wfcq_node *new_tail) { - ___cds_wfcq_append(head, tail, new_tail, new_tail); + return ___cds_wfcq_append(head, tail, new_tail, new_tail); +} + +/* + * ___cds_wfcq_busy_wait: adaptative busy-wait. + * + * Returns 1 if nonblocking and needs to block, 0 otherwise. + */ +static inline bool +___cds_wfcq_busy_wait(int *attempt, int blocking) +{ + if (!blocking) + return 1; + if (++(*attempt) >= WFCQ_ADAPT_ATTEMPTS) { + poll(NULL, 0, WFCQ_WAIT); /* Wait for 10ms */ + *attempt = 0; + } else { + caa_cpu_relax(); + } + return 0; } /* @@ -180,14 +229,8 @@ ___cds_wfcq_node_sync_next(struct cds_wfcq_node *node, int blocking) * Adaptative busy-looping waiting for enqueuer to complete enqueue. */ while ((next = CMM_LOAD_SHARED(node->next)) == NULL) { - if (!blocking) + if (___cds_wfcq_busy_wait(&attempt, blocking)) return CDS_WFCQ_WOULDBLOCK; - if (++attempt >= WFCQ_ADAPT_ATTEMPTS) { - poll(NULL, 0, WFCQ_WAIT); /* Wait for 10ms */ - attempt = 0; - } else { - caa_cpu_relax(); - } } return next; @@ -219,6 +262,8 @@ ___cds_wfcq_first(struct cds_wfcq_head *head, * Used by for-like iteration macros in urcu/wfqueue.h: * __cds_wfcq_for_each_blocking() * __cds_wfcq_for_each_blocking_safe() + * + * Returns NULL if queue is empty, first node otherwise. */ static inline struct cds_wfcq_node * ___cds_wfcq_first_blocking(struct cds_wfcq_head *head, @@ -278,6 +323,9 @@ ___cds_wfcq_next(struct cds_wfcq_head *head, * Used by for-like iteration macros in urcu/wfqueue.h: * __cds_wfcq_for_each_blocking() * __cds_wfcq_for_each_blocking_safe() + * + * Returns NULL if reached end of queue, non-NULL next queue node + * otherwise. */ static inline struct cds_wfcq_node * ___cds_wfcq_next_blocking(struct cds_wfcq_head *head, @@ -384,7 +432,17 @@ ___cds_wfcq_dequeue_nonblocking(struct cds_wfcq_head *head, return ___cds_wfcq_dequeue(head, tail, 0); } -static inline int +/* + * __cds_wfcq_splice: enqueue all src_q nodes at the end of dest_q. + * + * Dequeue all nodes from src_q. + * dest_q must be already initialized. + * Mutual exclusion for src_q should be ensured by the caller as + * specified in the "Synchronisation table". + * Returns enum cds_wfcq_ret which indicates the state of the src or + * dest queue. + */ +static inline enum cds_wfcq_ret ___cds_wfcq_splice( struct cds_wfcq_head *dest_q_head, struct cds_wfcq_tail *dest_q_tail, @@ -393,14 +451,29 @@ ___cds_wfcq_splice( int blocking) { struct cds_wfcq_node *head, *tail; + int attempt = 0; + /* + * Initial emptiness check to speed up cases where queue is + * empty: only require loads to check if queue is empty. + */ if (_cds_wfcq_empty(src_q_head, src_q_tail)) - return 0; + return CDS_WFCQ_RET_SRC_EMPTY; - head = ___cds_wfcq_node_sync_next(&src_q_head->node, blocking); - if (head == CDS_WFCQ_WOULDBLOCK) - return -1; - _cds_wfcq_node_init(&src_q_head->node); + for (;;) { + /* + * Open-coded _cds_wfcq_empty() by testing result of + * uatomic_xchg, as well as tail pointer vs head node + * address. + */ + head = uatomic_xchg(&src_q_head->node.next, NULL); + if (head) + break; /* non-empty */ + if (CMM_LOAD_SHARED(src_q_tail->p) == &src_q_head->node) + return CDS_WFCQ_RET_SRC_EMPTY; + if (___cds_wfcq_busy_wait(&attempt, blocking)) + return CDS_WFCQ_RET_WOULDBLOCK; + } /* * Memory barrier implied before uatomic_xchg() orders store to @@ -414,37 +487,40 @@ ___cds_wfcq_splice( * Append the spliced content of src_q into dest_q. Does not * require mutual exclusion on dest_q (wait-free). */ - ___cds_wfcq_append(dest_q_head, dest_q_tail, head, tail); - return 0; + if (___cds_wfcq_append(dest_q_head, dest_q_tail, head, tail)) + return CDS_WFCQ_RET_DEST_NON_EMPTY; + else + return CDS_WFCQ_RET_DEST_EMPTY; } - /* * __cds_wfcq_splice_blocking: enqueue all src_q nodes at the end of dest_q. * * Dequeue all nodes from src_q. * dest_q must be already initialized. - * Dequeue/splice/iteration mutual exclusion for src_q should be ensured - * by the caller. + * Mutual exclusion for src_q should be ensured by the caller as + * specified in the "Synchronisation table". + * Returns enum cds_wfcq_ret which indicates the state of the src or + * dest queue. Never returns CDS_WFCQ_RET_WOULDBLOCK. */ -static inline void +static inline enum cds_wfcq_ret ___cds_wfcq_splice_blocking( struct cds_wfcq_head *dest_q_head, struct cds_wfcq_tail *dest_q_tail, struct cds_wfcq_head *src_q_head, struct cds_wfcq_tail *src_q_tail) { - (void) ___cds_wfcq_splice(dest_q_head, dest_q_tail, + return ___cds_wfcq_splice(dest_q_head, dest_q_tail, src_q_head, src_q_tail, 1); } /* * __cds_wfcq_splice_nonblocking: enqueue all src_q nodes at the end of dest_q. * - * Same as __cds_wfcq_splice_blocking, but returns nonzero if it needs to - * block. + * Same as __cds_wfcq_splice_blocking, but returns + * CDS_WFCQ_RET_WOULDBLOCK if it needs to block. */ -static inline int +static inline enum cds_wfcq_ret ___cds_wfcq_splice_nonblocking( struct cds_wfcq_head *dest_q_head, struct cds_wfcq_tail *dest_q_tail, @@ -460,7 +536,7 @@ ___cds_wfcq_splice_nonblocking( * * Content written into the node before enqueue is guaranteed to be * consistent, but no other memory ordering is ensured. - * Mutual exlusion with cds_wfcq_splice_blocking and dequeue lock is + * Mutual exclusion with cds_wfcq_splice_blocking and dequeue lock is * ensured. * It is valid to reuse and free a dequeued node immediately. */ @@ -483,20 +559,25 @@ _cds_wfcq_dequeue_blocking(struct cds_wfcq_head *head, * dest_q must be already initialized. * Content written into the node before enqueue is guaranteed to be * consistent, but no other memory ordering is ensured. - * Mutual exlusion with cds_wfcq_dequeue_blocking and dequeue lock is + * Mutual exclusion with cds_wfcq_dequeue_blocking and dequeue lock is * ensured. + * Returns enum cds_wfcq_ret which indicates the state of the src or + * dest queue. Never returns CDS_WFCQ_RET_WOULDBLOCK. */ -static inline void +static inline enum cds_wfcq_ret _cds_wfcq_splice_blocking( struct cds_wfcq_head *dest_q_head, struct cds_wfcq_tail *dest_q_tail, struct cds_wfcq_head *src_q_head, struct cds_wfcq_tail *src_q_tail) { + enum cds_wfcq_ret ret; + _cds_wfcq_dequeue_lock(src_q_head, src_q_tail); - ___cds_wfcq_splice_blocking(dest_q_head, dest_q_tail, + ret = ___cds_wfcq_splice_blocking(dest_q_head, dest_q_tail, src_q_head, src_q_tail); _cds_wfcq_dequeue_unlock(src_q_head, src_q_tail); + return ret; } #ifdef __cplusplus