X-Git-Url: https://git.liburcu.org/?p=urcu.git;a=blobdiff_plain;f=urcu%2Fwfqueue-static.h;h=790931bef25db04d2156fe0cd39dd260b31a0474;hp=0f7e68f7a40fea14b208708ae9bd7425b79d35b2;hb=b57aee663af988b7f686c076ce6aef2a0d2487c8;hpb=b0dd35e28adda68dc6a564cf770a5d422d62259f diff --git a/urcu/wfqueue-static.h b/urcu/wfqueue-static.h index 0f7e68f..790931b 100644 --- a/urcu/wfqueue-static.h +++ b/urcu/wfqueue-static.h @@ -28,6 +28,7 @@ #include #include +#include #include #include @@ -47,16 +48,16 @@ extern "C" { #define WFQ_ADAPT_ATTEMPTS 10 /* Retry if being set */ #define WFQ_WAIT 10 /* Wait 10 ms if being set */ -void _wfq_node_init(struct wfq_node *node) +static inline void _cds_wfq_node_init(struct cds_wfq_node *node) { node->next = NULL; } -void _wfq_init(struct wfq_queue *q) +static inline void _cds_wfq_init(struct cds_wfq_queue *q) { int ret; - _wfq_node_init(&q->dummy); + _cds_wfq_node_init(&q->dummy); /* Set queue head and tail */ q->head = &q->dummy; q->tail = &q->dummy.next; @@ -64,9 +65,10 @@ void _wfq_init(struct wfq_queue *q) assert(!ret); } -void _wfq_enqueue(struct wfq_queue *q, struct wfq_node *node) +static inline void _cds_wfq_enqueue(struct cds_wfq_queue *q, + struct cds_wfq_node *node) { - struct wfq_node **old_tail; + struct cds_wfq_node **old_tail; /* * uatomic_xchg() implicit memory barrier orders earlier stores to data @@ -79,7 +81,7 @@ void _wfq_enqueue(struct wfq_queue *q, struct wfq_node *node) * that the queue is being appended to. The following store will append * "node" to the queue from a dequeuer perspective. */ - STORE_SHARED(*old_tail, node); + CMM_STORE_SHARED(*old_tail, node); } /* @@ -90,28 +92,28 @@ void _wfq_enqueue(struct wfq_queue *q, struct wfq_node *node) * thread to be scheduled. The queue appears empty until tail->next is set by * enqueue. */ -struct wfq_node * -___wfq_dequeue_blocking(struct wfq_queue *q) +static inline struct cds_wfq_node * +___cds_wfq_dequeue_blocking(struct cds_wfq_queue *q) { - struct wfq_node *node, *next; + struct cds_wfq_node *node, *next; int attempt = 0; /* * Queue is empty if it only contains the dummy node. */ - if (q->head == &q->dummy && LOAD_SHARED(q->tail) == &q->dummy.next) + if (q->head == &q->dummy && CMM_LOAD_SHARED(q->tail) == &q->dummy.next) return NULL; node = q->head; /* * Adaptative busy-looping waiting for enqueuer to complete enqueue. */ - while ((next = LOAD_SHARED(node->next)) == NULL) { + while ((next = CMM_LOAD_SHARED(node->next)) == NULL) { if (++attempt >= WFQ_ADAPT_ATTEMPTS) { poll(NULL, 0, WFQ_WAIT); /* Wait for 10ms */ attempt = 0; } else - cpu_relax(); + caa_cpu_relax(); } /* * Move queue head forward. @@ -121,22 +123,22 @@ ___wfq_dequeue_blocking(struct wfq_queue *q) * Requeue dummy node if we just dequeued it. */ if (node == &q->dummy) { - _wfq_node_init(node); - _wfq_enqueue(q, node); - return ___wfq_dequeue_blocking(q); + _cds_wfq_node_init(node); + _cds_wfq_enqueue(q, node); + return ___cds_wfq_dequeue_blocking(q); } return node; } -struct wfq_node * -_wfq_dequeue_blocking(struct wfq_queue *q) +static inline struct cds_wfq_node * +_cds_wfq_dequeue_blocking(struct cds_wfq_queue *q) { - struct wfq_node *retnode; + struct cds_wfq_node *retnode; int ret; ret = pthread_mutex_lock(&q->lock); assert(!ret); - retnode = ___wfq_dequeue_blocking(q); + retnode = ___cds_wfq_dequeue_blocking(q); ret = pthread_mutex_unlock(&q->lock); assert(!ret); return retnode;