X-Git-Url: https://git.liburcu.org/?p=urcu.git;a=blobdiff_plain;f=urcu%2Fstatic%2Fwfcqueue.h;h=1200227f4d3f6ade1f118b47ec77d592b3a508b5;hp=944ee88c74d855f78035b6f826c8b73eb6de79b6;hb=23773356a9fd12bf12627df437d0c7bd20e8ef01;hpb=f94061a3df4c9eab9ac869a19e4228de54771fcb diff --git a/urcu/static/wfcqueue.h b/urcu/static/wfcqueue.h index 944ee88..1200227 100644 --- a/urcu/static/wfcqueue.h +++ b/urcu/static/wfcqueue.h @@ -2,12 +2,12 @@ #define _URCU_WFCQUEUE_STATIC_H /* - * wfcqueue-static.h + * urcu/static/wfcqueue.h * * Userspace RCU library - Concurrent Queue with Wait-Free Enqueue/Blocking Dequeue * - * TO BE INCLUDED ONLY IN LGPL-COMPATIBLE CODE. See wfcqueue.h for linking - * dynamically with the userspace rcu library. + * TO BE INCLUDED ONLY IN LGPL-COMPATIBLE CODE. See urcu/wfcqueue.h for + * linking dynamically with the userspace rcu library. * * Copyright 2010-2012 - Mathieu Desnoyers * Copyright 2011-2012 - Lai Jiangshan @@ -41,8 +41,10 @@ extern "C" { /* * Concurrent queue with wait-free enqueue/blocking dequeue. * - * Inspired from half-wait-free/half-blocking queue implementation done by - * Paul E. McKenney. + * This queue has been designed and implemented collaboratively by + * Mathieu Desnoyers and Lai Jiangshan. Inspired from + * half-wait-free/half-blocking queue implementation done by Paul E. + * McKenney. * * Mutual exclusion of __cds_wfcq_* API * @@ -57,6 +59,10 @@ extern "C" { * * For convenience, cds_wfcq_dequeue_blocking() and * cds_wfcq_splice_blocking() hold the dequeue lock. + * + * Besides locking, mutual exclusion of dequeue, splice and iteration + * can be ensured by performing all of those operations from a single + * thread, without requiring any lock. */ #define WFCQ_ADAPT_ATTEMPTS 10 /* Retry if being set */ @@ -122,7 +128,7 @@ static inline void _cds_wfcq_dequeue_unlock(struct cds_wfcq_head *head, assert(!ret); } -static inline void ___cds_wfcq_append(struct cds_wfcq_head *head, +static inline bool ___cds_wfcq_append(struct cds_wfcq_head *head, struct cds_wfcq_tail *tail, struct cds_wfcq_node *new_head, struct cds_wfcq_node *new_tail) @@ -146,6 +152,11 @@ static inline void ___cds_wfcq_append(struct cds_wfcq_head *head, * perspective. */ CMM_STORE_SHARED(old_tail->next, new_head); + /* + * Return false if queue was empty prior to adding the node, + * else return true. + */ + return old_tail != &head->node; } /* @@ -153,19 +164,22 @@ static inline void ___cds_wfcq_append(struct cds_wfcq_head *head, * * Issues a full memory barrier before enqueue. No mutual exclusion is * required. + * + * Returns false if the queue was empty prior to adding the node. + * Returns true otherwise. */ -static inline void _cds_wfcq_enqueue(struct cds_wfcq_head *head, +static inline bool _cds_wfcq_enqueue(struct cds_wfcq_head *head, struct cds_wfcq_tail *tail, struct cds_wfcq_node *new_tail) { - ___cds_wfcq_append(head, tail, new_tail, new_tail); + return ___cds_wfcq_append(head, tail, new_tail, new_tail); } /* * Waiting for enqueuer to complete enqueue and return the next node. */ static inline struct cds_wfcq_node * -___cds_wfcq_node_sync_next(struct cds_wfcq_node *node) +___cds_wfcq_node_sync_next(struct cds_wfcq_node *node, int blocking) { struct cds_wfcq_node *next; int attempt = 0; @@ -174,6 +188,8 @@ ___cds_wfcq_node_sync_next(struct cds_wfcq_node *node) * Adaptative busy-looping waiting for enqueuer to complete enqueue. */ while ((next = CMM_LOAD_SHARED(node->next)) == NULL) { + if (!blocking) + return CDS_WFCQ_WOULDBLOCK; if (++attempt >= WFCQ_ADAPT_ATTEMPTS) { poll(NULL, 0, WFCQ_WAIT); /* Wait for 10ms */ attempt = 0; @@ -185,46 +201,59 @@ ___cds_wfcq_node_sync_next(struct cds_wfcq_node *node) return next; } -/* - * __cds_wfcq_first_blocking: get first node of a queue, without dequeuing. - * - * Content written into the node before enqueue is guaranteed to be - * consistent, but no other memory ordering is ensured. - * Should be called with cds_wfcq_dequeue_lock() held. - * - * Used by for-like iteration macros in urcu/wfqueue.h: - * __cds_wfcq_for_each_blocking() - * __cds_wfcq_for_each_blocking_safe() - */ static inline struct cds_wfcq_node * -___cds_wfcq_first_blocking(struct cds_wfcq_head *head, - struct cds_wfcq_tail *tail) +___cds_wfcq_first(struct cds_wfcq_head *head, + struct cds_wfcq_tail *tail, + int blocking) { struct cds_wfcq_node *node; if (_cds_wfcq_empty(head, tail)) return NULL; - node = ___cds_wfcq_node_sync_next(&head->node); + node = ___cds_wfcq_node_sync_next(&head->node, blocking); /* Load head->node.next before loading node's content */ cmm_smp_read_barrier_depends(); return node; } /* - * __cds_wfcq_next_blocking: get next node of a queue, without dequeuing. + * __cds_wfcq_first_blocking: get first node of a queue, without dequeuing. * * Content written into the node before enqueue is guaranteed to be * consistent, but no other memory ordering is ensured. - * Should be called with cds_wfcq_dequeue_lock() held. + * Dequeue/splice/iteration mutual exclusion should be ensured by the + * caller. * * Used by for-like iteration macros in urcu/wfqueue.h: * __cds_wfcq_for_each_blocking() * __cds_wfcq_for_each_blocking_safe() */ static inline struct cds_wfcq_node * -___cds_wfcq_next_blocking(struct cds_wfcq_head *head, +___cds_wfcq_first_blocking(struct cds_wfcq_head *head, + struct cds_wfcq_tail *tail) +{ + return ___cds_wfcq_first(head, tail, 1); +} + + +/* + * __cds_wfcq_first_nonblocking: get first node of a queue, without dequeuing. + * + * Same as __cds_wfcq_first_blocking, but returns CDS_WFCQ_WOULDBLOCK if + * it needs to block. + */ +static inline struct cds_wfcq_node * +___cds_wfcq_first_nonblocking(struct cds_wfcq_head *head, + struct cds_wfcq_tail *tail) +{ + return ___cds_wfcq_first(head, tail, 0); +} + +static inline struct cds_wfcq_node * +___cds_wfcq_next(struct cds_wfcq_head *head, struct cds_wfcq_tail *tail, - struct cds_wfcq_node *node) + struct cds_wfcq_node *node, + int blocking) { struct cds_wfcq_node *next; @@ -239,7 +268,7 @@ ___cds_wfcq_next_blocking(struct cds_wfcq_head *head, cmm_smp_rmb(); if (CMM_LOAD_SHARED(tail->p) == node) return NULL; - next = ___cds_wfcq_node_sync_next(node); + next = ___cds_wfcq_node_sync_next(node, blocking); } /* Load node->next before loading next's content */ cmm_smp_read_barrier_depends(); @@ -247,28 +276,52 @@ ___cds_wfcq_next_blocking(struct cds_wfcq_head *head, } /* - * __cds_wfcq_dequeue_blocking: dequeue a node from the queue. - * - * No need to go on a waitqueue here, as there is no possible state in which the - * list could cause dequeue to busy-loop needlessly while waiting for another - * thread to be scheduled. The queue appears empty until tail->next is set by - * enqueue. + * __cds_wfcq_next_blocking: get next node of a queue, without dequeuing. * * Content written into the node before enqueue is guaranteed to be * consistent, but no other memory ordering is ensured. - * It is valid to reuse and free a dequeued node immediately. - * Should be called with cds_wfcq_dequeue_lock() held. + * Dequeue/splice/iteration mutual exclusion should be ensured by the + * caller. + * + * Used by for-like iteration macros in urcu/wfqueue.h: + * __cds_wfcq_for_each_blocking() + * __cds_wfcq_for_each_blocking_safe() */ static inline struct cds_wfcq_node * -___cds_wfcq_dequeue_blocking(struct cds_wfcq_head *head, - struct cds_wfcq_tail *tail) +___cds_wfcq_next_blocking(struct cds_wfcq_head *head, + struct cds_wfcq_tail *tail, + struct cds_wfcq_node *node) +{ + return ___cds_wfcq_next(head, tail, node, 1); +} + +/* + * __cds_wfcq_next_blocking: get next node of a queue, without dequeuing. + * + * Same as __cds_wfcq_next_blocking, but returns CDS_WFCQ_WOULDBLOCK if + * it needs to block. + */ +static inline struct cds_wfcq_node * +___cds_wfcq_next_nonblocking(struct cds_wfcq_head *head, + struct cds_wfcq_tail *tail, + struct cds_wfcq_node *node) +{ + return ___cds_wfcq_next(head, tail, node, 0); +} + +static inline struct cds_wfcq_node * +___cds_wfcq_dequeue(struct cds_wfcq_head *head, + struct cds_wfcq_tail *tail, + int blocking) { struct cds_wfcq_node *node, *next; if (_cds_wfcq_empty(head, tail)) return NULL; - node = ___cds_wfcq_node_sync_next(&head->node); + node = ___cds_wfcq_node_sync_next(&head->node, blocking); + if (!blocking && node == CDS_WFCQ_WOULDBLOCK) + return CDS_WFCQ_WOULDBLOCK; if ((next = CMM_LOAD_SHARED(node->next)) == NULL) { /* @@ -288,7 +341,16 @@ ___cds_wfcq_dequeue_blocking(struct cds_wfcq_head *head, _cds_wfcq_node_init(&head->node); if (uatomic_cmpxchg(&tail->p, node, &head->node) == node) return node; - next = ___cds_wfcq_node_sync_next(node); + next = ___cds_wfcq_node_sync_next(node, blocking); + /* + * In nonblocking mode, if we would need to block to + * get node's next, set the head next node pointer + * (currently NULL) back to its original value. + */ + if (!blocking && next == CDS_WFCQ_WOULDBLOCK) { + head->node.next = node; + return CDS_WFCQ_WOULDBLOCK; + } } /* @@ -302,25 +364,50 @@ ___cds_wfcq_dequeue_blocking(struct cds_wfcq_head *head, } /* - * __cds_wfcq_splice_blocking: enqueue all src_q nodes at the end of dest_q. + * __cds_wfcq_dequeue_blocking: dequeue a node from the queue. * - * Dequeue all nodes from src_q. - * dest_q must be already initialized. - * Should be called with cds_wfcq_dequeue_lock() held on src_q. + * Content written into the node before enqueue is guaranteed to be + * consistent, but no other memory ordering is ensured. + * It is valid to reuse and free a dequeued node immediately. + * Dequeue/splice/iteration mutual exclusion should be ensured by the + * caller. */ -static inline void -___cds_wfcq_splice_blocking( +static inline struct cds_wfcq_node * +___cds_wfcq_dequeue_blocking(struct cds_wfcq_head *head, + struct cds_wfcq_tail *tail) +{ + return ___cds_wfcq_dequeue(head, tail, 1); +} + +/* + * __cds_wfcq_dequeue_nonblocking: dequeue a node from a wait-free queue. + * + * Same as __cds_wfcq_dequeue_blocking, but returns CDS_WFCQ_WOULDBLOCK + * if it needs to block. + */ +static inline struct cds_wfcq_node * +___cds_wfcq_dequeue_nonblocking(struct cds_wfcq_head *head, + struct cds_wfcq_tail *tail) +{ + return ___cds_wfcq_dequeue(head, tail, 0); +} + +static inline enum cds_wfcq_ret +___cds_wfcq_splice( struct cds_wfcq_head *dest_q_head, struct cds_wfcq_tail *dest_q_tail, struct cds_wfcq_head *src_q_head, - struct cds_wfcq_tail *src_q_tail) + struct cds_wfcq_tail *src_q_tail, + int blocking) { struct cds_wfcq_node *head, *tail; if (_cds_wfcq_empty(src_q_head, src_q_tail)) - return; + return CDS_WFCQ_RET_SRC_EMPTY; - head = ___cds_wfcq_node_sync_next(&src_q_head->node); + head = ___cds_wfcq_node_sync_next(&src_q_head->node, blocking); + if (head == CDS_WFCQ_WOULDBLOCK) + return CDS_WFCQ_RET_WOULDBLOCK; _cds_wfcq_node_init(&src_q_head->node); /* @@ -335,7 +422,49 @@ ___cds_wfcq_splice_blocking( * Append the spliced content of src_q into dest_q. Does not * require mutual exclusion on dest_q (wait-free). */ - ___cds_wfcq_append(dest_q_head, dest_q_tail, head, tail); + if (___cds_wfcq_append(dest_q_head, dest_q_tail, head, tail)) + return CDS_WFCQ_RET_DEST_NON_EMPTY; + else + return CDS_WFCQ_RET_DEST_EMPTY; +} + + +/* + * __cds_wfcq_splice_blocking: enqueue all src_q nodes at the end of dest_q. + * + * Dequeue all nodes from src_q. + * dest_q must be already initialized. + * Dequeue/splice/iteration mutual exclusion for src_q should be ensured + * by the caller. + * Returns enum cds_wfcq_ret which indicates the state of the src or + * dest queue. Never returns CDS_WFCQ_RET_WOULDBLOCK. + */ +static inline enum cds_wfcq_ret +___cds_wfcq_splice_blocking( + struct cds_wfcq_head *dest_q_head, + struct cds_wfcq_tail *dest_q_tail, + struct cds_wfcq_head *src_q_head, + struct cds_wfcq_tail *src_q_tail) +{ + return ___cds_wfcq_splice(dest_q_head, dest_q_tail, + src_q_head, src_q_tail, 1); +} + +/* + * __cds_wfcq_splice_nonblocking: enqueue all src_q nodes at the end of dest_q. + * + * Same as __cds_wfcq_splice_blocking, but returns + * CDS_WFCQ_RET_WOULDBLOCK if it needs to block. + */ +static inline enum cds_wfcq_ret +___cds_wfcq_splice_nonblocking( + struct cds_wfcq_head *dest_q_head, + struct cds_wfcq_tail *dest_q_tail, + struct cds_wfcq_head *src_q_head, + struct cds_wfcq_tail *src_q_tail) +{ + return ___cds_wfcq_splice(dest_q_head, dest_q_tail, + src_q_head, src_q_tail, 0); } /* @@ -343,7 +472,7 @@ ___cds_wfcq_splice_blocking( * * Content written into the node before enqueue is guaranteed to be * consistent, but no other memory ordering is ensured. - * Mutual exlusion with (and only with) cds_wfcq_splice_blocking is + * Mutual exlusion with cds_wfcq_splice_blocking and dequeue lock is * ensured. * It is valid to reuse and free a dequeued node immediately. */ @@ -366,20 +495,25 @@ _cds_wfcq_dequeue_blocking(struct cds_wfcq_head *head, * dest_q must be already initialized. * Content written into the node before enqueue is guaranteed to be * consistent, but no other memory ordering is ensured. - * Mutual exlusion with (and only with) cds_wfcq_dequeue_blocking is + * Mutual exlusion with cds_wfcq_dequeue_blocking and dequeue lock is * ensured. + * Returns enum cds_wfcq_ret which indicates the state of the src or + * dest queue. Never returns CDS_WFCQ_RET_WOULDBLOCK. */ -static inline void +static inline enum cds_wfcq_ret _cds_wfcq_splice_blocking( struct cds_wfcq_head *dest_q_head, struct cds_wfcq_tail *dest_q_tail, struct cds_wfcq_head *src_q_head, struct cds_wfcq_tail *src_q_tail) { + enum cds_wfcq_ret ret; + _cds_wfcq_dequeue_lock(src_q_head, src_q_tail); - ___cds_wfcq_splice_blocking(dest_q_head, dest_q_tail, + ret = ___cds_wfcq_splice_blocking(dest_q_head, dest_q_tail, src_q_head, src_q_tail); _cds_wfcq_dequeue_unlock(src_q_head, src_q_tail); + return ret; } #ifdef __cplusplus