| 1 | // SPDX-FileCopyrightText: 2010 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> |
| 2 | // |
| 3 | // SPDX-License-Identifier: LGPL-2.1-or-later |
| 4 | |
| 5 | #ifndef _URCU_WFQUEUE_STATIC_H |
| 6 | #define _URCU_WFQUEUE_STATIC_H |
| 7 | |
| 8 | /* |
| 9 | * Userspace RCU library - Queue with Wait-Free Enqueue/Blocking Dequeue |
| 10 | * |
| 11 | * TO BE INCLUDED ONLY IN LGPL-COMPATIBLE CODE. See wfqueue.h for linking |
| 12 | * dynamically with the userspace rcu library. |
| 13 | */ |
| 14 | |
| 15 | #include <pthread.h> |
| 16 | #include <poll.h> |
| 17 | #include <urcu/assert.h> |
| 18 | #include <urcu/compiler.h> |
| 19 | #include <urcu/uatomic.h> |
| 20 | |
| 21 | #ifdef __cplusplus |
| 22 | extern "C" { |
| 23 | #endif |
| 24 | |
| 25 | /* |
| 26 | * Queue with wait-free enqueue/blocking dequeue. |
| 27 | * This implementation adds a dummy head node when the queue is empty to ensure |
| 28 | * we can always update the queue locklessly. |
| 29 | * |
| 30 | * Inspired from half-wait-free/half-blocking queue implementation done by |
| 31 | * Paul E. McKenney. |
| 32 | */ |
| 33 | |
| 34 | #define WFQ_ADAPT_ATTEMPTS 10 /* Retry if being set */ |
| 35 | #define WFQ_WAIT 10 /* Wait 10 ms if being set */ |
| 36 | |
| 37 | static inline void _cds_wfq_node_init(struct cds_wfq_node *node) |
| 38 | { |
| 39 | node->next = NULL; |
| 40 | } |
| 41 | |
| 42 | static inline void _cds_wfq_init(struct cds_wfq_queue *q) |
| 43 | { |
| 44 | int ret; |
| 45 | |
| 46 | _cds_wfq_node_init(&q->dummy); |
| 47 | /* Set queue head and tail */ |
| 48 | q->head = &q->dummy; |
| 49 | q->tail = &q->dummy.next; |
| 50 | ret = pthread_mutex_init(&q->lock, NULL); |
| 51 | urcu_posix_assert(!ret); |
| 52 | } |
| 53 | |
| 54 | static inline void _cds_wfq_destroy(struct cds_wfq_queue *q) |
| 55 | { |
| 56 | int ret = pthread_mutex_destroy(&q->lock); |
| 57 | urcu_posix_assert(!ret); |
| 58 | } |
| 59 | |
| 60 | static inline void _cds_wfq_enqueue(struct cds_wfq_queue *q, |
| 61 | struct cds_wfq_node *node) |
| 62 | { |
| 63 | struct cds_wfq_node **old_tail; |
| 64 | |
| 65 | /* |
| 66 | * uatomic_xchg() implicit memory barrier orders earlier stores to data |
| 67 | * structure containing node and setting node->next to NULL before |
| 68 | * publication. |
| 69 | */ |
| 70 | cmm_emit_legacy_smp_mb(); |
| 71 | old_tail = uatomic_xchg_mo(&q->tail, &node->next, CMM_SEQ_CST); |
| 72 | /* |
| 73 | * At this point, dequeuers see a NULL old_tail->next, which indicates |
| 74 | * that the queue is being appended to. The following store will append |
| 75 | * "node" to the queue from a dequeuer perspective. |
| 76 | */ |
| 77 | uatomic_store(old_tail, node, CMM_RELEASE); |
| 78 | } |
| 79 | |
| 80 | /* |
| 81 | * Waiting for enqueuer to complete enqueue and return the next node |
| 82 | */ |
| 83 | static inline struct cds_wfq_node * |
| 84 | ___cds_wfq_node_sync_next(struct cds_wfq_node *node) |
| 85 | { |
| 86 | struct cds_wfq_node *next; |
| 87 | int attempt = 0; |
| 88 | |
| 89 | /* |
| 90 | * Adaptative busy-looping waiting for enqueuer to complete enqueue. |
| 91 | */ |
| 92 | while ((next = uatomic_load(&node->next, CMM_CONSUME)) == NULL) { |
| 93 | if (++attempt >= WFQ_ADAPT_ATTEMPTS) { |
| 94 | (void) poll(NULL, 0, WFQ_WAIT); /* Wait for 10ms */ |
| 95 | attempt = 0; |
| 96 | } else |
| 97 | caa_cpu_relax(); |
| 98 | } |
| 99 | |
| 100 | return next; |
| 101 | } |
| 102 | |
| 103 | /* |
| 104 | * It is valid to reuse and free a dequeued node immediately. |
| 105 | * |
| 106 | * No need to go on a waitqueue here, as there is no possible state in which the |
| 107 | * list could cause dequeue to busy-loop needlessly while waiting for another |
| 108 | * thread to be scheduled. The queue appears empty until tail->next is set by |
| 109 | * enqueue. |
| 110 | */ |
| 111 | static inline struct cds_wfq_node * |
| 112 | ___cds_wfq_dequeue_blocking(struct cds_wfq_queue *q) |
| 113 | { |
| 114 | struct cds_wfq_node *node, *next; |
| 115 | |
| 116 | /* |
| 117 | * Queue is empty if it only contains the dummy node. |
| 118 | */ |
| 119 | if (q->head == &q->dummy && uatomic_load(&q->tail, CMM_CONSUME) == &q->dummy.next) |
| 120 | return NULL; |
| 121 | node = q->head; |
| 122 | |
| 123 | next = ___cds_wfq_node_sync_next(node); |
| 124 | |
| 125 | /* |
| 126 | * Move queue head forward. |
| 127 | */ |
| 128 | q->head = next; |
| 129 | /* |
| 130 | * Requeue dummy node if we just dequeued it. |
| 131 | */ |
| 132 | if (node == &q->dummy) { |
| 133 | _cds_wfq_node_init(node); |
| 134 | _cds_wfq_enqueue(q, node); |
| 135 | return ___cds_wfq_dequeue_blocking(q); |
| 136 | } |
| 137 | return node; |
| 138 | } |
| 139 | |
| 140 | static inline struct cds_wfq_node * |
| 141 | _cds_wfq_dequeue_blocking(struct cds_wfq_queue *q) |
| 142 | { |
| 143 | struct cds_wfq_node *retnode; |
| 144 | int ret; |
| 145 | |
| 146 | ret = pthread_mutex_lock(&q->lock); |
| 147 | urcu_posix_assert(!ret); |
| 148 | retnode = ___cds_wfq_dequeue_blocking(q); |
| 149 | ret = pthread_mutex_unlock(&q->lock); |
| 150 | urcu_posix_assert(!ret); |
| 151 | return retnode; |
| 152 | } |
| 153 | |
| 154 | #ifdef __cplusplus |
| 155 | } |
| 156 | #endif |
| 157 | |
| 158 | #endif /* _URCU_WFQUEUE_STATIC_H */ |