Commit | Line | Data |
---|---|---|
8ad4ce58 MD |
1 | #ifndef _URCU_WFCQUEUE_STATIC_H |
2 | #define _URCU_WFCQUEUE_STATIC_H | |
3 | ||
4 | /* | |
47215721 | 5 | * urcu/static/wfcqueue.h |
8ad4ce58 MD |
6 | * |
7 | * Userspace RCU library - Concurrent Queue with Wait-Free Enqueue/Blocking Dequeue | |
8 | * | |
07c2a4fd MD |
9 | * TO BE INCLUDED ONLY IN LGPL-COMPATIBLE CODE. See urcu/wfcqueue.h for |
10 | * linking dynamically with the userspace rcu library. | |
8ad4ce58 MD |
11 | * |
12 | * Copyright 2010-2012 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com> | |
13 | * Copyright 2011-2012 - Lai Jiangshan <laijs@cn.fujitsu.com> | |
14 | * | |
15 | * This library is free software; you can redistribute it and/or | |
16 | * modify it under the terms of the GNU Lesser General Public | |
17 | * License as published by the Free Software Foundation; either | |
18 | * version 2.1 of the License, or (at your option) any later version. | |
19 | * | |
20 | * This library is distributed in the hope that it will be useful, | |
21 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
22 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
23 | * Lesser General Public License for more details. | |
24 | * | |
25 | * You should have received a copy of the GNU Lesser General Public | |
26 | * License along with this library; if not, write to the Free Software | |
27 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |
28 | */ | |
29 | ||
30 | #include <pthread.h> | |
31 | #include <assert.h> | |
32 | #include <poll.h> | |
33 | #include <stdbool.h> | |
34 | #include <urcu/compiler.h> | |
35 | #include <urcu/uatomic.h> | |
36 | ||
37 | #ifdef __cplusplus | |
38 | extern "C" { | |
39 | #endif | |
40 | ||
41 | /* | |
42 | * Concurrent queue with wait-free enqueue/blocking dequeue. | |
43 | * | |
ebfd2673 MD |
44 | * This queue has been designed and implemented collaboratively by |
45 | * Mathieu Desnoyers and Lai Jiangshan. Inspired from | |
46 | * half-wait-free/half-blocking queue implementation done by Paul E. | |
47 | * McKenney. | |
8ad4ce58 | 48 | * |
f878b49e MD |
49 | * Mutual exclusion of cds_wfcq_* / __cds_wfcq_* API |
50 | * | |
51 | * Synchronization table: | |
52 | * | |
53 | * External synchronization techniques described in the API below is | |
54 | * required between pairs marked with "X". No external synchronization | |
55 | * required between pairs marked with "-". | |
56 | * | |
57 | * Legend: | |
58 | * [1] cds_wfcq_enqueue | |
59 | * [2] __cds_wfcq_splice (destination queue) | |
60 | * [3] __cds_wfcq_dequeue | |
61 | * [4] __cds_wfcq_splice (source queue) | |
62 | * [5] __cds_wfcq_first | |
63 | * [6] __cds_wfcq_next | |
64 | * | |
65 | * [1] [2] [3] [4] [5] [6] | |
66 | * [1] - - - - - - | |
67 | * [2] - - - - - - | |
68 | * [3] - - X X X X | |
69 | * [4] - - X - X X | |
70 | * [5] - - X X - - | |
71 | * [6] - - X X - - | |
8ad4ce58 | 72 | * |
8ad4ce58 MD |
73 | * Mutual exclusion can be ensured by holding cds_wfcq_dequeue_lock(). |
74 | * | |
75 | * For convenience, cds_wfcq_dequeue_blocking() and | |
76 | * cds_wfcq_splice_blocking() hold the dequeue lock. | |
1fe734e1 MD |
77 | * |
78 | * Besides locking, mutual exclusion of dequeue, splice and iteration | |
79 | * can be ensured by performing all of those operations from a single | |
80 | * thread, without requiring any lock. | |
8ad4ce58 MD |
81 | */ |
82 | ||
83 | #define WFCQ_ADAPT_ATTEMPTS 10 /* Retry if being set */ | |
84 | #define WFCQ_WAIT 10 /* Wait 10 ms if being set */ | |
85 | ||
86 | /* | |
87 | * cds_wfcq_node_init: initialize wait-free queue node. | |
88 | */ | |
89 | static inline void _cds_wfcq_node_init(struct cds_wfcq_node *node) | |
90 | { | |
91 | node->next = NULL; | |
92 | } | |
93 | ||
94 | /* | |
200d100e MD |
95 | * cds_wfcq_init: initialize wait-free queue (with lock). Pair with |
96 | * cds_wfcq_destroy(). | |
8ad4ce58 MD |
97 | */ |
98 | static inline void _cds_wfcq_init(struct cds_wfcq_head *head, | |
99 | struct cds_wfcq_tail *tail) | |
100 | { | |
101 | int ret; | |
102 | ||
103 | /* Set queue head and tail */ | |
104 | _cds_wfcq_node_init(&head->node); | |
105 | tail->p = &head->node; | |
106 | ret = pthread_mutex_init(&head->lock, NULL); | |
107 | assert(!ret); | |
108 | } | |
109 | ||
f637f191 | 110 | /* |
200d100e MD |
111 | * cds_wfcq_destroy: destroy wait-free queue (with lock). Pair with |
112 | * cds_wfcq_init(). | |
113 | */ | |
114 | static inline void _cds_wfcq_destroy(struct cds_wfcq_head *head, | |
70469b43 | 115 | struct cds_wfcq_tail *tail __attribute__((unused))) |
200d100e MD |
116 | { |
117 | int ret = pthread_mutex_destroy(&head->lock); | |
118 | assert(!ret); | |
119 | } | |
120 | ||
121 | /* | |
122 | * __cds_wfcq_init: initialize wait-free queue (without lock). Don't | |
123 | * pair with any destroy function. | |
f637f191 MD |
124 | */ |
125 | static inline void ___cds_wfcq_init(struct __cds_wfcq_head *head, | |
126 | struct cds_wfcq_tail *tail) | |
127 | { | |
128 | /* Set queue head and tail */ | |
129 | _cds_wfcq_node_init(&head->node); | |
130 | tail->p = &head->node; | |
131 | } | |
132 | ||
8ad4ce58 MD |
133 | /* |
134 | * cds_wfcq_empty: return whether wait-free queue is empty. | |
135 | * | |
136 | * No memory barrier is issued. No mutual exclusion is required. | |
6d5729f7 MD |
137 | * |
138 | * We perform the test on head->node.next to check if the queue is | |
139 | * possibly empty, but we confirm this by checking if the tail pointer | |
140 | * points to the head node because the tail pointer is the linearisation | |
141 | * point of the enqueuers. Just checking the head next pointer could | |
142 | * make a queue appear empty if an enqueuer is preempted for a long time | |
143 | * between xchg() and setting the previous node's next pointer. | |
8ad4ce58 | 144 | */ |
f637f191 | 145 | static inline bool _cds_wfcq_empty(cds_wfcq_head_ptr_t u_head, |
8ad4ce58 MD |
146 | struct cds_wfcq_tail *tail) |
147 | { | |
f637f191 | 148 | struct __cds_wfcq_head *head = u_head._h; |
8ad4ce58 MD |
149 | /* |
150 | * Queue is empty if no node is pointed by head->node.next nor | |
151 | * tail->p. Even though the tail->p check is sufficient to find | |
152 | * out of the queue is empty, we first check head->node.next as a | |
153 | * common case to ensure that dequeuers do not frequently access | |
154 | * enqueuer's tail->p cache line. | |
155 | */ | |
156 | return CMM_LOAD_SHARED(head->node.next) == NULL | |
157 | && CMM_LOAD_SHARED(tail->p) == &head->node; | |
158 | } | |
159 | ||
160 | static inline void _cds_wfcq_dequeue_lock(struct cds_wfcq_head *head, | |
70469b43 | 161 | struct cds_wfcq_tail *tail __attribute__((unused))) |
8ad4ce58 MD |
162 | { |
163 | int ret; | |
164 | ||
165 | ret = pthread_mutex_lock(&head->lock); | |
166 | assert(!ret); | |
167 | } | |
168 | ||
169 | static inline void _cds_wfcq_dequeue_unlock(struct cds_wfcq_head *head, | |
70469b43 | 170 | struct cds_wfcq_tail *tail __attribute__((unused))) |
8ad4ce58 MD |
171 | { |
172 | int ret; | |
173 | ||
174 | ret = pthread_mutex_unlock(&head->lock); | |
175 | assert(!ret); | |
176 | } | |
177 | ||
f637f191 | 178 | static inline bool ___cds_wfcq_append(cds_wfcq_head_ptr_t u_head, |
8ad4ce58 MD |
179 | struct cds_wfcq_tail *tail, |
180 | struct cds_wfcq_node *new_head, | |
181 | struct cds_wfcq_node *new_tail) | |
182 | { | |
f637f191 | 183 | struct __cds_wfcq_head *head = u_head._h; |
8ad4ce58 MD |
184 | struct cds_wfcq_node *old_tail; |
185 | ||
186 | /* | |
187 | * Implicit memory barrier before uatomic_xchg() orders earlier | |
188 | * stores to data structure containing node and setting | |
189 | * node->next to NULL before publication. | |
190 | */ | |
191 | old_tail = uatomic_xchg(&tail->p, new_tail); | |
192 | ||
193 | /* | |
194 | * Implicit memory barrier after uatomic_xchg() orders store to | |
195 | * q->tail before store to old_tail->next. | |
196 | * | |
197 | * At this point, dequeuers see a NULL tail->p->next, which | |
198 | * indicates that the queue is being appended to. The following | |
199 | * store will append "node" to the queue from a dequeuer | |
200 | * perspective. | |
201 | */ | |
202 | CMM_STORE_SHARED(old_tail->next, new_head); | |
23773356 MD |
203 | /* |
204 | * Return false if queue was empty prior to adding the node, | |
205 | * else return true. | |
206 | */ | |
207 | return old_tail != &head->node; | |
8ad4ce58 MD |
208 | } |
209 | ||
210 | /* | |
211 | * cds_wfcq_enqueue: enqueue a node into a wait-free queue. | |
212 | * | |
213 | * Issues a full memory barrier before enqueue. No mutual exclusion is | |
214 | * required. | |
23773356 MD |
215 | * |
216 | * Returns false if the queue was empty prior to adding the node. | |
217 | * Returns true otherwise. | |
8ad4ce58 | 218 | */ |
f637f191 | 219 | static inline bool _cds_wfcq_enqueue(cds_wfcq_head_ptr_t head, |
8ad4ce58 MD |
220 | struct cds_wfcq_tail *tail, |
221 | struct cds_wfcq_node *new_tail) | |
222 | { | |
23773356 | 223 | return ___cds_wfcq_append(head, tail, new_tail, new_tail); |
8ad4ce58 MD |
224 | } |
225 | ||
34b11dd4 EW |
226 | /* |
227 | * CDS_WFCQ_WAIT_SLEEP: | |
228 | * | |
229 | * By default, this sleeps for the given @msec milliseconds. | |
230 | * This is a macro which LGPL users may #define themselves before | |
231 | * including wfcqueue.h to override the default behavior (e.g. | |
232 | * to log a warning or perform other background work). | |
233 | */ | |
234 | #ifndef CDS_WFCQ_WAIT_SLEEP | |
235 | #define CDS_WFCQ_WAIT_SLEEP(msec) ___cds_wfcq_wait_sleep(msec) | |
236 | #endif | |
237 | ||
238 | static inline void ___cds_wfcq_wait_sleep(int msec) | |
239 | { | |
240 | (void) poll(NULL, 0, msec); | |
241 | } | |
242 | ||
f878b49e MD |
243 | /* |
244 | * ___cds_wfcq_busy_wait: adaptative busy-wait. | |
245 | * | |
246 | * Returns 1 if nonblocking and needs to block, 0 otherwise. | |
247 | */ | |
248 | static inline bool | |
249 | ___cds_wfcq_busy_wait(int *attempt, int blocking) | |
250 | { | |
251 | if (!blocking) | |
252 | return 1; | |
253 | if (++(*attempt) >= WFCQ_ADAPT_ATTEMPTS) { | |
34b11dd4 | 254 | CDS_WFCQ_WAIT_SLEEP(WFCQ_WAIT); /* Wait for 10ms */ |
f878b49e MD |
255 | *attempt = 0; |
256 | } else { | |
257 | caa_cpu_relax(); | |
258 | } | |
259 | return 0; | |
260 | } | |
261 | ||
8ad4ce58 MD |
262 | /* |
263 | * Waiting for enqueuer to complete enqueue and return the next node. | |
264 | */ | |
265 | static inline struct cds_wfcq_node * | |
47215721 | 266 | ___cds_wfcq_node_sync_next(struct cds_wfcq_node *node, int blocking) |
8ad4ce58 MD |
267 | { |
268 | struct cds_wfcq_node *next; | |
269 | int attempt = 0; | |
270 | ||
271 | /* | |
272 | * Adaptative busy-looping waiting for enqueuer to complete enqueue. | |
273 | */ | |
274 | while ((next = CMM_LOAD_SHARED(node->next)) == NULL) { | |
f878b49e | 275 | if (___cds_wfcq_busy_wait(&attempt, blocking)) |
47215721 | 276 | return CDS_WFCQ_WOULDBLOCK; |
8ad4ce58 MD |
277 | } |
278 | ||
279 | return next; | |
280 | } | |
281 | ||
8ad4ce58 | 282 | static inline struct cds_wfcq_node * |
f637f191 | 283 | ___cds_wfcq_first(cds_wfcq_head_ptr_t u_head, |
47215721 MD |
284 | struct cds_wfcq_tail *tail, |
285 | int blocking) | |
8ad4ce58 | 286 | { |
f637f191 | 287 | struct __cds_wfcq_head *head = u_head._h; |
8ad4ce58 MD |
288 | struct cds_wfcq_node *node; |
289 | ||
4d0d7cbc | 290 | if (_cds_wfcq_empty(__cds_wfcq_head_cast(head), tail)) |
8ad4ce58 | 291 | return NULL; |
47215721 | 292 | node = ___cds_wfcq_node_sync_next(&head->node, blocking); |
8ad4ce58 MD |
293 | /* Load head->node.next before loading node's content */ |
294 | cmm_smp_read_barrier_depends(); | |
295 | return node; | |
296 | } | |
297 | ||
298 | /* | |
47215721 | 299 | * __cds_wfcq_first_blocking: get first node of a queue, without dequeuing. |
8ad4ce58 MD |
300 | * |
301 | * Content written into the node before enqueue is guaranteed to be | |
302 | * consistent, but no other memory ordering is ensured. | |
1fe734e1 MD |
303 | * Dequeue/splice/iteration mutual exclusion should be ensured by the |
304 | * caller. | |
f94061a3 MD |
305 | * |
306 | * Used by for-like iteration macros in urcu/wfqueue.h: | |
307 | * __cds_wfcq_for_each_blocking() | |
308 | * __cds_wfcq_for_each_blocking_safe() | |
131a29a6 MD |
309 | * |
310 | * Returns NULL if queue is empty, first node otherwise. | |
8ad4ce58 MD |
311 | */ |
312 | static inline struct cds_wfcq_node * | |
f637f191 | 313 | ___cds_wfcq_first_blocking(cds_wfcq_head_ptr_t head, |
47215721 MD |
314 | struct cds_wfcq_tail *tail) |
315 | { | |
316 | return ___cds_wfcq_first(head, tail, 1); | |
317 | } | |
318 | ||
319 | ||
320 | /* | |
321 | * __cds_wfcq_first_nonblocking: get first node of a queue, without dequeuing. | |
322 | * | |
323 | * Same as __cds_wfcq_first_blocking, but returns CDS_WFCQ_WOULDBLOCK if | |
324 | * it needs to block. | |
325 | */ | |
326 | static inline struct cds_wfcq_node * | |
f637f191 | 327 | ___cds_wfcq_first_nonblocking(cds_wfcq_head_ptr_t head, |
47215721 MD |
328 | struct cds_wfcq_tail *tail) |
329 | { | |
330 | return ___cds_wfcq_first(head, tail, 0); | |
331 | } | |
332 | ||
333 | static inline struct cds_wfcq_node * | |
70469b43 | 334 | ___cds_wfcq_next(cds_wfcq_head_ptr_t head __attribute__((unused)), |
8ad4ce58 | 335 | struct cds_wfcq_tail *tail, |
47215721 MD |
336 | struct cds_wfcq_node *node, |
337 | int blocking) | |
8ad4ce58 MD |
338 | { |
339 | struct cds_wfcq_node *next; | |
340 | ||
341 | /* | |
342 | * Even though the following tail->p check is sufficient to find | |
343 | * out if we reached the end of the queue, we first check | |
344 | * node->next as a common case to ensure that iteration on nodes | |
345 | * do not frequently access enqueuer's tail->p cache line. | |
346 | */ | |
347 | if ((next = CMM_LOAD_SHARED(node->next)) == NULL) { | |
348 | /* Load node->next before tail->p */ | |
349 | cmm_smp_rmb(); | |
350 | if (CMM_LOAD_SHARED(tail->p) == node) | |
351 | return NULL; | |
47215721 | 352 | next = ___cds_wfcq_node_sync_next(node, blocking); |
8ad4ce58 MD |
353 | } |
354 | /* Load node->next before loading next's content */ | |
355 | cmm_smp_read_barrier_depends(); | |
356 | return next; | |
357 | } | |
358 | ||
359 | /* | |
47215721 | 360 | * __cds_wfcq_next_blocking: get next node of a queue, without dequeuing. |
8ad4ce58 | 361 | * |
8ad4ce58 MD |
362 | * Content written into the node before enqueue is guaranteed to be |
363 | * consistent, but no other memory ordering is ensured. | |
1fe734e1 MD |
364 | * Dequeue/splice/iteration mutual exclusion should be ensured by the |
365 | * caller. | |
47215721 MD |
366 | * |
367 | * Used by for-like iteration macros in urcu/wfqueue.h: | |
368 | * __cds_wfcq_for_each_blocking() | |
369 | * __cds_wfcq_for_each_blocking_safe() | |
131a29a6 MD |
370 | * |
371 | * Returns NULL if reached end of queue, non-NULL next queue node | |
372 | * otherwise. | |
8ad4ce58 MD |
373 | */ |
374 | static inline struct cds_wfcq_node * | |
f637f191 | 375 | ___cds_wfcq_next_blocking(cds_wfcq_head_ptr_t head, |
47215721 MD |
376 | struct cds_wfcq_tail *tail, |
377 | struct cds_wfcq_node *node) | |
378 | { | |
379 | return ___cds_wfcq_next(head, tail, node, 1); | |
380 | } | |
381 | ||
382 | /* | |
383 | * __cds_wfcq_next_blocking: get next node of a queue, without dequeuing. | |
384 | * | |
385 | * Same as __cds_wfcq_next_blocking, but returns CDS_WFCQ_WOULDBLOCK if | |
386 | * it needs to block. | |
387 | */ | |
388 | static inline struct cds_wfcq_node * | |
f637f191 | 389 | ___cds_wfcq_next_nonblocking(cds_wfcq_head_ptr_t head, |
47215721 MD |
390 | struct cds_wfcq_tail *tail, |
391 | struct cds_wfcq_node *node) | |
392 | { | |
393 | return ___cds_wfcq_next(head, tail, node, 0); | |
394 | } | |
395 | ||
396 | static inline struct cds_wfcq_node * | |
f637f191 | 397 | ___cds_wfcq_dequeue_with_state(cds_wfcq_head_ptr_t u_head, |
47215721 | 398 | struct cds_wfcq_tail *tail, |
eec791af | 399 | int *state, |
47215721 | 400 | int blocking) |
8ad4ce58 | 401 | { |
f637f191 | 402 | struct __cds_wfcq_head *head = u_head._h; |
8ad4ce58 MD |
403 | struct cds_wfcq_node *node, *next; |
404 | ||
eec791af MD |
405 | if (state) |
406 | *state = 0; | |
407 | ||
4d0d7cbc | 408 | if (_cds_wfcq_empty(__cds_wfcq_head_cast(head), tail)) { |
8ad4ce58 | 409 | return NULL; |
eec791af | 410 | } |
8ad4ce58 | 411 | |
47215721 | 412 | node = ___cds_wfcq_node_sync_next(&head->node, blocking); |
eec791af | 413 | if (!blocking && node == CDS_WFCQ_WOULDBLOCK) { |
dfb65fd3 | 414 | return CDS_WFCQ_WOULDBLOCK; |
eec791af | 415 | } |
8ad4ce58 MD |
416 | |
417 | if ((next = CMM_LOAD_SHARED(node->next)) == NULL) { | |
418 | /* | |
419 | * @node is probably the only node in the queue. | |
420 | * Try to move the tail to &q->head. | |
421 | * q->head.next is set to NULL here, and stays | |
422 | * NULL if the cmpxchg succeeds. Should the | |
423 | * cmpxchg fail due to a concurrent enqueue, the | |
424 | * q->head.next will be set to the next node. | |
425 | * The implicit memory barrier before | |
426 | * uatomic_cmpxchg() orders load node->next | |
427 | * before loading q->tail. | |
428 | * The implicit memory barrier before uatomic_cmpxchg | |
429 | * orders load q->head.next before loading node's | |
430 | * content. | |
431 | */ | |
432 | _cds_wfcq_node_init(&head->node); | |
eec791af MD |
433 | if (uatomic_cmpxchg(&tail->p, node, &head->node) == node) { |
434 | if (state) | |
435 | *state |= CDS_WFCQ_STATE_LAST; | |
8ad4ce58 | 436 | return node; |
eec791af | 437 | } |
47215721 | 438 | next = ___cds_wfcq_node_sync_next(node, blocking); |
dfb65fd3 MD |
439 | /* |
440 | * In nonblocking mode, if we would need to block to | |
441 | * get node's next, set the head next node pointer | |
442 | * (currently NULL) back to its original value. | |
443 | */ | |
444 | if (!blocking && next == CDS_WFCQ_WOULDBLOCK) { | |
445 | head->node.next = node; | |
446 | return CDS_WFCQ_WOULDBLOCK; | |
447 | } | |
8ad4ce58 MD |
448 | } |
449 | ||
450 | /* | |
451 | * Move queue head forward. | |
452 | */ | |
453 | head->node.next = next; | |
454 | ||
455 | /* Load q->head.next before loading node's content */ | |
456 | cmm_smp_read_barrier_depends(); | |
457 | return node; | |
458 | } | |
459 | ||
460 | /* | |
eec791af | 461 | * __cds_wfcq_dequeue_with_state_blocking: dequeue node from queue, with state. |
8ad4ce58 | 462 | * |
47215721 MD |
463 | * Content written into the node before enqueue is guaranteed to be |
464 | * consistent, but no other memory ordering is ensured. | |
465 | * It is valid to reuse and free a dequeued node immediately. | |
466 | * Dequeue/splice/iteration mutual exclusion should be ensured by the | |
467 | * caller. | |
8ad4ce58 | 468 | */ |
47215721 | 469 | static inline struct cds_wfcq_node * |
f637f191 | 470 | ___cds_wfcq_dequeue_with_state_blocking(cds_wfcq_head_ptr_t head, |
eec791af MD |
471 | struct cds_wfcq_tail *tail, int *state) |
472 | { | |
473 | return ___cds_wfcq_dequeue_with_state(head, tail, state, 1); | |
474 | } | |
475 | ||
476 | /* | |
477 | * ___cds_wfcq_dequeue_blocking: dequeue node from queue. | |
478 | * | |
479 | * Same as __cds_wfcq_dequeue_with_state_blocking, but without saving | |
480 | * state. | |
481 | */ | |
482 | static inline struct cds_wfcq_node * | |
f637f191 | 483 | ___cds_wfcq_dequeue_blocking(cds_wfcq_head_ptr_t head, |
47215721 MD |
484 | struct cds_wfcq_tail *tail) |
485 | { | |
eec791af | 486 | return ___cds_wfcq_dequeue_with_state_blocking(head, tail, NULL); |
47215721 MD |
487 | } |
488 | ||
489 | /* | |
eec791af | 490 | * __cds_wfcq_dequeue_with_state_nonblocking: dequeue node, with state. |
47215721 MD |
491 | * |
492 | * Same as __cds_wfcq_dequeue_blocking, but returns CDS_WFCQ_WOULDBLOCK | |
493 | * if it needs to block. | |
494 | */ | |
495 | static inline struct cds_wfcq_node * | |
f637f191 | 496 | ___cds_wfcq_dequeue_with_state_nonblocking(cds_wfcq_head_ptr_t head, |
eec791af MD |
497 | struct cds_wfcq_tail *tail, int *state) |
498 | { | |
499 | return ___cds_wfcq_dequeue_with_state(head, tail, state, 0); | |
500 | } | |
501 | ||
502 | /* | |
503 | * ___cds_wfcq_dequeue_nonblocking: dequeue node from queue. | |
504 | * | |
505 | * Same as __cds_wfcq_dequeue_with_state_nonblocking, but without saving | |
506 | * state. | |
507 | */ | |
508 | static inline struct cds_wfcq_node * | |
f637f191 | 509 | ___cds_wfcq_dequeue_nonblocking(cds_wfcq_head_ptr_t head, |
47215721 MD |
510 | struct cds_wfcq_tail *tail) |
511 | { | |
eec791af | 512 | return ___cds_wfcq_dequeue_with_state_nonblocking(head, tail, NULL); |
47215721 MD |
513 | } |
514 | ||
f878b49e MD |
515 | /* |
516 | * __cds_wfcq_splice: enqueue all src_q nodes at the end of dest_q. | |
517 | * | |
518 | * Dequeue all nodes from src_q. | |
519 | * dest_q must be already initialized. | |
520 | * Mutual exclusion for src_q should be ensured by the caller as | |
521 | * specified in the "Synchronisation table". | |
522 | * Returns enum cds_wfcq_ret which indicates the state of the src or | |
523 | * dest queue. | |
524 | */ | |
23773356 | 525 | static inline enum cds_wfcq_ret |
47215721 | 526 | ___cds_wfcq_splice( |
f637f191 | 527 | cds_wfcq_head_ptr_t u_dest_q_head, |
8ad4ce58 | 528 | struct cds_wfcq_tail *dest_q_tail, |
f637f191 | 529 | cds_wfcq_head_ptr_t u_src_q_head, |
47215721 MD |
530 | struct cds_wfcq_tail *src_q_tail, |
531 | int blocking) | |
8ad4ce58 | 532 | { |
f637f191 MD |
533 | struct __cds_wfcq_head *dest_q_head = u_dest_q_head._h; |
534 | struct __cds_wfcq_head *src_q_head = u_src_q_head._h; | |
8ad4ce58 | 535 | struct cds_wfcq_node *head, *tail; |
f878b49e | 536 | int attempt = 0; |
8ad4ce58 | 537 | |
f878b49e MD |
538 | /* |
539 | * Initial emptiness check to speed up cases where queue is | |
540 | * empty: only require loads to check if queue is empty. | |
541 | */ | |
4d0d7cbc | 542 | if (_cds_wfcq_empty(__cds_wfcq_head_cast(src_q_head), src_q_tail)) |
23773356 | 543 | return CDS_WFCQ_RET_SRC_EMPTY; |
8ad4ce58 | 544 | |
f878b49e MD |
545 | for (;;) { |
546 | /* | |
547 | * Open-coded _cds_wfcq_empty() by testing result of | |
548 | * uatomic_xchg, as well as tail pointer vs head node | |
549 | * address. | |
550 | */ | |
551 | head = uatomic_xchg(&src_q_head->node.next, NULL); | |
552 | if (head) | |
553 | break; /* non-empty */ | |
554 | if (CMM_LOAD_SHARED(src_q_tail->p) == &src_q_head->node) | |
555 | return CDS_WFCQ_RET_SRC_EMPTY; | |
556 | if (___cds_wfcq_busy_wait(&attempt, blocking)) | |
557 | return CDS_WFCQ_RET_WOULDBLOCK; | |
558 | } | |
8ad4ce58 MD |
559 | |
560 | /* | |
561 | * Memory barrier implied before uatomic_xchg() orders store to | |
562 | * src_q->head before store to src_q->tail. This is required by | |
563 | * concurrent enqueue on src_q, which exchanges the tail before | |
564 | * updating the previous tail's next pointer. | |
565 | */ | |
566 | tail = uatomic_xchg(&src_q_tail->p, &src_q_head->node); | |
567 | ||
568 | /* | |
569 | * Append the spliced content of src_q into dest_q. Does not | |
570 | * require mutual exclusion on dest_q (wait-free). | |
571 | */ | |
4d0d7cbc MD |
572 | if (___cds_wfcq_append(__cds_wfcq_head_cast(dest_q_head), dest_q_tail, |
573 | head, tail)) | |
23773356 MD |
574 | return CDS_WFCQ_RET_DEST_NON_EMPTY; |
575 | else | |
576 | return CDS_WFCQ_RET_DEST_EMPTY; | |
47215721 MD |
577 | } |
578 | ||
47215721 MD |
579 | /* |
580 | * __cds_wfcq_splice_blocking: enqueue all src_q nodes at the end of dest_q. | |
581 | * | |
582 | * Dequeue all nodes from src_q. | |
583 | * dest_q must be already initialized. | |
f878b49e MD |
584 | * Mutual exclusion for src_q should be ensured by the caller as |
585 | * specified in the "Synchronisation table". | |
23773356 MD |
586 | * Returns enum cds_wfcq_ret which indicates the state of the src or |
587 | * dest queue. Never returns CDS_WFCQ_RET_WOULDBLOCK. | |
47215721 | 588 | */ |
23773356 | 589 | static inline enum cds_wfcq_ret |
47215721 | 590 | ___cds_wfcq_splice_blocking( |
f637f191 | 591 | cds_wfcq_head_ptr_t dest_q_head, |
47215721 | 592 | struct cds_wfcq_tail *dest_q_tail, |
f637f191 | 593 | cds_wfcq_head_ptr_t src_q_head, |
47215721 MD |
594 | struct cds_wfcq_tail *src_q_tail) |
595 | { | |
23773356 | 596 | return ___cds_wfcq_splice(dest_q_head, dest_q_tail, |
47215721 MD |
597 | src_q_head, src_q_tail, 1); |
598 | } | |
599 | ||
600 | /* | |
601 | * __cds_wfcq_splice_nonblocking: enqueue all src_q nodes at the end of dest_q. | |
602 | * | |
23773356 MD |
603 | * Same as __cds_wfcq_splice_blocking, but returns |
604 | * CDS_WFCQ_RET_WOULDBLOCK if it needs to block. | |
47215721 | 605 | */ |
23773356 | 606 | static inline enum cds_wfcq_ret |
47215721 | 607 | ___cds_wfcq_splice_nonblocking( |
f637f191 | 608 | cds_wfcq_head_ptr_t dest_q_head, |
47215721 | 609 | struct cds_wfcq_tail *dest_q_tail, |
f637f191 | 610 | cds_wfcq_head_ptr_t src_q_head, |
47215721 MD |
611 | struct cds_wfcq_tail *src_q_tail) |
612 | { | |
613 | return ___cds_wfcq_splice(dest_q_head, dest_q_tail, | |
614 | src_q_head, src_q_tail, 0); | |
8ad4ce58 MD |
615 | } |
616 | ||
617 | /* | |
eec791af | 618 | * cds_wfcq_dequeue_with_state_blocking: dequeue a node from a wait-free queue. |
8ad4ce58 MD |
619 | * |
620 | * Content written into the node before enqueue is guaranteed to be | |
621 | * consistent, but no other memory ordering is ensured. | |
ffa11a18 | 622 | * Mutual exclusion with cds_wfcq_splice_blocking and dequeue lock is |
8ad4ce58 MD |
623 | * ensured. |
624 | * It is valid to reuse and free a dequeued node immediately. | |
625 | */ | |
626 | static inline struct cds_wfcq_node * | |
eec791af MD |
627 | _cds_wfcq_dequeue_with_state_blocking(struct cds_wfcq_head *head, |
628 | struct cds_wfcq_tail *tail, int *state) | |
8ad4ce58 MD |
629 | { |
630 | struct cds_wfcq_node *retval; | |
631 | ||
632 | _cds_wfcq_dequeue_lock(head, tail); | |
4d0d7cbc MD |
633 | retval = ___cds_wfcq_dequeue_with_state_blocking(cds_wfcq_head_cast(head), |
634 | tail, state); | |
8ad4ce58 MD |
635 | _cds_wfcq_dequeue_unlock(head, tail); |
636 | return retval; | |
637 | } | |
638 | ||
eec791af MD |
639 | /* |
640 | * cds_wfcq_dequeue_blocking: dequeue node from queue. | |
641 | * | |
642 | * Same as cds_wfcq_dequeue_blocking, but without saving state. | |
643 | */ | |
644 | static inline struct cds_wfcq_node * | |
645 | _cds_wfcq_dequeue_blocking(struct cds_wfcq_head *head, | |
646 | struct cds_wfcq_tail *tail) | |
647 | { | |
648 | return _cds_wfcq_dequeue_with_state_blocking(head, tail, NULL); | |
649 | } | |
650 | ||
8ad4ce58 MD |
651 | /* |
652 | * cds_wfcq_splice_blocking: enqueue all src_q nodes at the end of dest_q. | |
653 | * | |
654 | * Dequeue all nodes from src_q. | |
655 | * dest_q must be already initialized. | |
656 | * Content written into the node before enqueue is guaranteed to be | |
657 | * consistent, but no other memory ordering is ensured. | |
ffa11a18 | 658 | * Mutual exclusion with cds_wfcq_dequeue_blocking and dequeue lock is |
8ad4ce58 | 659 | * ensured. |
23773356 MD |
660 | * Returns enum cds_wfcq_ret which indicates the state of the src or |
661 | * dest queue. Never returns CDS_WFCQ_RET_WOULDBLOCK. | |
8ad4ce58 | 662 | */ |
23773356 | 663 | static inline enum cds_wfcq_ret |
8ad4ce58 MD |
664 | _cds_wfcq_splice_blocking( |
665 | struct cds_wfcq_head *dest_q_head, | |
666 | struct cds_wfcq_tail *dest_q_tail, | |
667 | struct cds_wfcq_head *src_q_head, | |
668 | struct cds_wfcq_tail *src_q_tail) | |
669 | { | |
23773356 MD |
670 | enum cds_wfcq_ret ret; |
671 | ||
8ad4ce58 | 672 | _cds_wfcq_dequeue_lock(src_q_head, src_q_tail); |
4d0d7cbc MD |
673 | ret = ___cds_wfcq_splice_blocking(cds_wfcq_head_cast(dest_q_head), dest_q_tail, |
674 | cds_wfcq_head_cast(src_q_head), src_q_tail); | |
8ad4ce58 | 675 | _cds_wfcq_dequeue_unlock(src_q_head, src_q_tail); |
23773356 | 676 | return ret; |
8ad4ce58 MD |
677 | } |
678 | ||
679 | #ifdef __cplusplus | |
680 | } | |
681 | #endif | |
682 | ||
683 | #endif /* _URCU_WFCQUEUE_STATIC_H */ |