Commit | Line | Data |
---|---|---|
4d001e96 MD |
1 | #ifndef _URCU_WFQUEUE_STATIC_H |
2 | #define _URCU_WFQUEUE_STATIC_H | |
3 | ||
4 | /* | |
5 | * wfqueue-static.h | |
6 | * | |
7 | * Userspace RCU library - Queue with Wait-Free Enqueue/Blocking Dequeue | |
8 | * | |
9 | * TO BE INCLUDED ONLY IN LGPL-COMPATIBLE CODE. See wfqueue.h for linking | |
10 | * dynamically with the userspace rcu library. | |
11 | * | |
12 | * Copyright 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com> | |
13 | * | |
14 | * This library is free software; you can redistribute it and/or | |
15 | * modify it under the terms of the GNU Lesser General Public | |
16 | * License as published by the Free Software Foundation; either | |
17 | * version 2.1 of the License, or (at your option) any later version. | |
18 | * | |
19 | * This library is distributed in the hope that it will be useful, | |
20 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
21 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
22 | * Lesser General Public License for more details. | |
23 | * | |
24 | * You should have received a copy of the GNU Lesser General Public | |
25 | * License along with this library; if not, write to the Free Software | |
26 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |
27 | */ | |
28 | ||
29 | #include <pthread.h> | |
30 | #include <assert.h> | |
31 | #include <urcu/compiler.h> | |
32 | #include <urcu/uatomic_arch.h> | |
33 | ||
34 | #ifdef __cplusplus | |
35 | extern "C" { | |
36 | #endif | |
37 | ||
38 | /* | |
39 | * Queue with wait-free enqueue/blocking dequeue. | |
40 | * This implementation adds a dummy head node when the queue is empty to ensure | |
41 | * we can always update the queue locklessly. | |
42 | * | |
43 | * Inspired from half-wait-free/half-blocking queue implementation done by | |
44 | * Paul E. McKenney. | |
45 | */ | |
46 | ||
47 | #define WFQ_ADAPT_ATTEMPTS 10 /* Retry if being set */ | |
48 | #define WFQ_WAIT 10 /* Wait 10 ms if being set */ | |
49 | ||
16aa9ee8 | 50 | void _cds_wfq_node_init(struct cds_wfq_node *node) |
4d001e96 MD |
51 | { |
52 | node->next = NULL; | |
53 | } | |
54 | ||
16aa9ee8 | 55 | void _cds_wfq_init(struct cds_wfq_queue *q) |
4d001e96 MD |
56 | { |
57 | int ret; | |
58 | ||
16aa9ee8 | 59 | _cds_wfq_node_init(&q->dummy); |
4d001e96 MD |
60 | /* Set queue head and tail */ |
61 | q->head = &q->dummy; | |
62 | q->tail = &q->dummy.next; | |
63 | ret = pthread_mutex_init(&q->lock, NULL); | |
64 | assert(!ret); | |
65 | } | |
66 | ||
16aa9ee8 | 67 | void _cds_wfq_enqueue(struct cds_wfq_queue *q, struct cds_wfq_node *node) |
4d001e96 | 68 | { |
16aa9ee8 | 69 | struct cds_wfq_node **old_tail; |
4d001e96 MD |
70 | |
71 | /* | |
72 | * uatomic_xchg() implicit memory barrier orders earlier stores to data | |
73 | * structure containing node and setting node->next to NULL before | |
74 | * publication. | |
75 | */ | |
76 | old_tail = uatomic_xchg(&q->tail, node); | |
77 | /* | |
78 | * At this point, dequeuers see a NULL old_tail->next, which indicates | |
79 | * that the queue is being appended to. The following store will append | |
80 | * "node" to the queue from a dequeuer perspective. | |
81 | */ | |
6cf3827c | 82 | CMM_STORE_SHARED(*old_tail, node); |
4d001e96 MD |
83 | } |
84 | ||
85 | /* | |
86 | * It is valid to reuse and free a dequeued node immediately. | |
87 | * | |
88 | * No need to go on a waitqueue here, as there is no possible state in which the | |
89 | * list could cause dequeue to busy-loop needlessly while waiting for another | |
90 | * thread to be scheduled. The queue appears empty until tail->next is set by | |
91 | * enqueue. | |
92 | */ | |
16aa9ee8 DG |
93 | struct cds_wfq_node * |
94 | ___cds_wfq_dequeue_blocking(struct cds_wfq_queue *q) | |
4d001e96 | 95 | { |
16aa9ee8 | 96 | struct cds_wfq_node *node, *next; |
4d001e96 MD |
97 | int attempt = 0; |
98 | ||
99 | /* | |
100 | * Queue is empty if it only contains the dummy node. | |
101 | */ | |
6cf3827c | 102 | if (q->head == &q->dummy && CMM_LOAD_SHARED(q->tail) == &q->dummy.next) |
4d001e96 MD |
103 | return NULL; |
104 | node = q->head; | |
105 | ||
106 | /* | |
107 | * Adaptative busy-looping waiting for enqueuer to complete enqueue. | |
108 | */ | |
6cf3827c | 109 | while ((next = CMM_LOAD_SHARED(node->next)) == NULL) { |
4d001e96 MD |
110 | if (++attempt >= WFQ_ADAPT_ATTEMPTS) { |
111 | poll(NULL, 0, WFQ_WAIT); /* Wait for 10ms */ | |
112 | attempt = 0; | |
113 | } else | |
06f22bdb | 114 | caa_cpu_relax(); |
4d001e96 MD |
115 | } |
116 | /* | |
117 | * Move queue head forward. | |
118 | */ | |
119 | q->head = next; | |
120 | /* | |
121 | * Requeue dummy node if we just dequeued it. | |
122 | */ | |
123 | if (node == &q->dummy) { | |
16aa9ee8 DG |
124 | _cds_wfq_node_init(node); |
125 | _cds_wfq_enqueue(q, node); | |
126 | return ___cds_wfq_dequeue_blocking(q); | |
4d001e96 MD |
127 | } |
128 | return node; | |
129 | } | |
130 | ||
16aa9ee8 DG |
131 | struct cds_wfq_node * |
132 | _cds_wfq_dequeue_blocking(struct cds_wfq_queue *q) | |
4d001e96 | 133 | { |
16aa9ee8 | 134 | struct cds_wfq_node *retnode; |
4d001e96 MD |
135 | int ret; |
136 | ||
137 | ret = pthread_mutex_lock(&q->lock); | |
138 | assert(!ret); | |
16aa9ee8 | 139 | retnode = ___cds_wfq_dequeue_blocking(q); |
4d001e96 MD |
140 | ret = pthread_mutex_unlock(&q->lock); |
141 | assert(!ret); | |
142 | return retnode; | |
143 | } | |
144 | ||
145 | #ifdef __cplusplus | |
146 | } | |
147 | #endif | |
148 | ||
149 | #endif /* _URCU_WFQUEUE_STATIC_H */ |