Commit | Line | Data |
---|---|---|
8ad4ce58 MD |
1 | #ifndef _URCU_WFCQUEUE_STATIC_H |
2 | #define _URCU_WFCQUEUE_STATIC_H | |
3 | ||
4 | /* | |
47215721 | 5 | * urcu/static/wfcqueue.h |
8ad4ce58 MD |
6 | * |
7 | * Userspace RCU library - Concurrent Queue with Wait-Free Enqueue/Blocking Dequeue | |
8 | * | |
07c2a4fd MD |
9 | * TO BE INCLUDED ONLY IN LGPL-COMPATIBLE CODE. See urcu/wfcqueue.h for |
10 | * linking dynamically with the userspace rcu library. | |
8ad4ce58 MD |
11 | * |
12 | * Copyright 2010-2012 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com> | |
13 | * Copyright 2011-2012 - Lai Jiangshan <laijs@cn.fujitsu.com> | |
14 | * | |
15 | * This library is free software; you can redistribute it and/or | |
16 | * modify it under the terms of the GNU Lesser General Public | |
17 | * License as published by the Free Software Foundation; either | |
18 | * version 2.1 of the License, or (at your option) any later version. | |
19 | * | |
20 | * This library is distributed in the hope that it will be useful, | |
21 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
22 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
23 | * Lesser General Public License for more details. | |
24 | * | |
25 | * You should have received a copy of the GNU Lesser General Public | |
26 | * License along with this library; if not, write to the Free Software | |
27 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |
28 | */ | |
29 | ||
30 | #include <pthread.h> | |
31 | #include <assert.h> | |
32 | #include <poll.h> | |
33 | #include <stdbool.h> | |
34 | #include <urcu/compiler.h> | |
35 | #include <urcu/uatomic.h> | |
36 | ||
37 | #ifdef __cplusplus | |
38 | extern "C" { | |
39 | #endif | |
40 | ||
41 | /* | |
42 | * Concurrent queue with wait-free enqueue/blocking dequeue. | |
43 | * | |
ebfd2673 MD |
44 | * This queue has been designed and implemented collaboratively by |
45 | * Mathieu Desnoyers and Lai Jiangshan. Inspired from | |
46 | * half-wait-free/half-blocking queue implementation done by Paul E. | |
47 | * McKenney. | |
8ad4ce58 | 48 | * |
f878b49e MD |
49 | * Mutual exclusion of cds_wfcq_* / __cds_wfcq_* API |
50 | * | |
51 | * Synchronization table: | |
52 | * | |
53 | * External synchronization techniques described in the API below is | |
54 | * required between pairs marked with "X". No external synchronization | |
55 | * required between pairs marked with "-". | |
56 | * | |
57 | * Legend: | |
58 | * [1] cds_wfcq_enqueue | |
59 | * [2] __cds_wfcq_splice (destination queue) | |
60 | * [3] __cds_wfcq_dequeue | |
61 | * [4] __cds_wfcq_splice (source queue) | |
62 | * [5] __cds_wfcq_first | |
63 | * [6] __cds_wfcq_next | |
64 | * | |
65 | * [1] [2] [3] [4] [5] [6] | |
66 | * [1] - - - - - - | |
67 | * [2] - - - - - - | |
68 | * [3] - - X X X X | |
69 | * [4] - - X - X X | |
70 | * [5] - - X X - - | |
71 | * [6] - - X X - - | |
8ad4ce58 | 72 | * |
8ad4ce58 MD |
73 | * Mutual exclusion can be ensured by holding cds_wfcq_dequeue_lock(). |
74 | * | |
75 | * For convenience, cds_wfcq_dequeue_blocking() and | |
76 | * cds_wfcq_splice_blocking() hold the dequeue lock. | |
1fe734e1 MD |
77 | * |
78 | * Besides locking, mutual exclusion of dequeue, splice and iteration | |
79 | * can be ensured by performing all of those operations from a single | |
80 | * thread, without requiring any lock. | |
8ad4ce58 MD |
81 | */ |
82 | ||
83 | #define WFCQ_ADAPT_ATTEMPTS 10 /* Retry if being set */ | |
84 | #define WFCQ_WAIT 10 /* Wait 10 ms if being set */ | |
85 | ||
86 | /* | |
87 | * cds_wfcq_node_init: initialize wait-free queue node. | |
88 | */ | |
89 | static inline void _cds_wfcq_node_init(struct cds_wfcq_node *node) | |
90 | { | |
91 | node->next = NULL; | |
92 | } | |
93 | ||
94 | /* | |
95 | * cds_wfcq_init: initialize wait-free queue. | |
96 | */ | |
97 | static inline void _cds_wfcq_init(struct cds_wfcq_head *head, | |
98 | struct cds_wfcq_tail *tail) | |
99 | { | |
100 | int ret; | |
101 | ||
102 | /* Set queue head and tail */ | |
103 | _cds_wfcq_node_init(&head->node); | |
104 | tail->p = &head->node; | |
105 | ret = pthread_mutex_init(&head->lock, NULL); | |
106 | assert(!ret); | |
107 | } | |
108 | ||
f637f191 MD |
109 | /* |
110 | * __cds_wfcq_init: initialize wait-free queue. | |
111 | */ | |
112 | static inline void ___cds_wfcq_init(struct __cds_wfcq_head *head, | |
113 | struct cds_wfcq_tail *tail) | |
114 | { | |
115 | /* Set queue head and tail */ | |
116 | _cds_wfcq_node_init(&head->node); | |
117 | tail->p = &head->node; | |
118 | } | |
119 | ||
8ad4ce58 MD |
120 | /* |
121 | * cds_wfcq_empty: return whether wait-free queue is empty. | |
122 | * | |
123 | * No memory barrier is issued. No mutual exclusion is required. | |
6d5729f7 MD |
124 | * |
125 | * We perform the test on head->node.next to check if the queue is | |
126 | * possibly empty, but we confirm this by checking if the tail pointer | |
127 | * points to the head node because the tail pointer is the linearisation | |
128 | * point of the enqueuers. Just checking the head next pointer could | |
129 | * make a queue appear empty if an enqueuer is preempted for a long time | |
130 | * between xchg() and setting the previous node's next pointer. | |
8ad4ce58 | 131 | */ |
f637f191 | 132 | static inline bool _cds_wfcq_empty(cds_wfcq_head_ptr_t u_head, |
8ad4ce58 MD |
133 | struct cds_wfcq_tail *tail) |
134 | { | |
f637f191 | 135 | struct __cds_wfcq_head *head = u_head._h; |
8ad4ce58 MD |
136 | /* |
137 | * Queue is empty if no node is pointed by head->node.next nor | |
138 | * tail->p. Even though the tail->p check is sufficient to find | |
139 | * out of the queue is empty, we first check head->node.next as a | |
140 | * common case to ensure that dequeuers do not frequently access | |
141 | * enqueuer's tail->p cache line. | |
142 | */ | |
143 | return CMM_LOAD_SHARED(head->node.next) == NULL | |
144 | && CMM_LOAD_SHARED(tail->p) == &head->node; | |
145 | } | |
146 | ||
147 | static inline void _cds_wfcq_dequeue_lock(struct cds_wfcq_head *head, | |
148 | struct cds_wfcq_tail *tail) | |
149 | { | |
150 | int ret; | |
151 | ||
152 | ret = pthread_mutex_lock(&head->lock); | |
153 | assert(!ret); | |
154 | } | |
155 | ||
156 | static inline void _cds_wfcq_dequeue_unlock(struct cds_wfcq_head *head, | |
157 | struct cds_wfcq_tail *tail) | |
158 | { | |
159 | int ret; | |
160 | ||
161 | ret = pthread_mutex_unlock(&head->lock); | |
162 | assert(!ret); | |
163 | } | |
164 | ||
f637f191 | 165 | static inline bool ___cds_wfcq_append(cds_wfcq_head_ptr_t u_head, |
8ad4ce58 MD |
166 | struct cds_wfcq_tail *tail, |
167 | struct cds_wfcq_node *new_head, | |
168 | struct cds_wfcq_node *new_tail) | |
169 | { | |
f637f191 | 170 | struct __cds_wfcq_head *head = u_head._h; |
8ad4ce58 MD |
171 | struct cds_wfcq_node *old_tail; |
172 | ||
173 | /* | |
174 | * Implicit memory barrier before uatomic_xchg() orders earlier | |
175 | * stores to data structure containing node and setting | |
176 | * node->next to NULL before publication. | |
177 | */ | |
178 | old_tail = uatomic_xchg(&tail->p, new_tail); | |
179 | ||
180 | /* | |
181 | * Implicit memory barrier after uatomic_xchg() orders store to | |
182 | * q->tail before store to old_tail->next. | |
183 | * | |
184 | * At this point, dequeuers see a NULL tail->p->next, which | |
185 | * indicates that the queue is being appended to. The following | |
186 | * store will append "node" to the queue from a dequeuer | |
187 | * perspective. | |
188 | */ | |
189 | CMM_STORE_SHARED(old_tail->next, new_head); | |
23773356 MD |
190 | /* |
191 | * Return false if queue was empty prior to adding the node, | |
192 | * else return true. | |
193 | */ | |
194 | return old_tail != &head->node; | |
8ad4ce58 MD |
195 | } |
196 | ||
197 | /* | |
198 | * cds_wfcq_enqueue: enqueue a node into a wait-free queue. | |
199 | * | |
200 | * Issues a full memory barrier before enqueue. No mutual exclusion is | |
201 | * required. | |
23773356 MD |
202 | * |
203 | * Returns false if the queue was empty prior to adding the node. | |
204 | * Returns true otherwise. | |
8ad4ce58 | 205 | */ |
f637f191 | 206 | static inline bool _cds_wfcq_enqueue(cds_wfcq_head_ptr_t head, |
8ad4ce58 MD |
207 | struct cds_wfcq_tail *tail, |
208 | struct cds_wfcq_node *new_tail) | |
209 | { | |
23773356 | 210 | return ___cds_wfcq_append(head, tail, new_tail, new_tail); |
8ad4ce58 MD |
211 | } |
212 | ||
f878b49e MD |
213 | /* |
214 | * ___cds_wfcq_busy_wait: adaptative busy-wait. | |
215 | * | |
216 | * Returns 1 if nonblocking and needs to block, 0 otherwise. | |
217 | */ | |
218 | static inline bool | |
219 | ___cds_wfcq_busy_wait(int *attempt, int blocking) | |
220 | { | |
221 | if (!blocking) | |
222 | return 1; | |
223 | if (++(*attempt) >= WFCQ_ADAPT_ATTEMPTS) { | |
224 | poll(NULL, 0, WFCQ_WAIT); /* Wait for 10ms */ | |
225 | *attempt = 0; | |
226 | } else { | |
227 | caa_cpu_relax(); | |
228 | } | |
229 | return 0; | |
230 | } | |
231 | ||
8ad4ce58 MD |
232 | /* |
233 | * Waiting for enqueuer to complete enqueue and return the next node. | |
234 | */ | |
235 | static inline struct cds_wfcq_node * | |
47215721 | 236 | ___cds_wfcq_node_sync_next(struct cds_wfcq_node *node, int blocking) |
8ad4ce58 MD |
237 | { |
238 | struct cds_wfcq_node *next; | |
239 | int attempt = 0; | |
240 | ||
241 | /* | |
242 | * Adaptative busy-looping waiting for enqueuer to complete enqueue. | |
243 | */ | |
244 | while ((next = CMM_LOAD_SHARED(node->next)) == NULL) { | |
f878b49e | 245 | if (___cds_wfcq_busy_wait(&attempt, blocking)) |
47215721 | 246 | return CDS_WFCQ_WOULDBLOCK; |
8ad4ce58 MD |
247 | } |
248 | ||
249 | return next; | |
250 | } | |
251 | ||
8ad4ce58 | 252 | static inline struct cds_wfcq_node * |
f637f191 | 253 | ___cds_wfcq_first(cds_wfcq_head_ptr_t u_head, |
47215721 MD |
254 | struct cds_wfcq_tail *tail, |
255 | int blocking) | |
8ad4ce58 | 256 | { |
f637f191 | 257 | struct __cds_wfcq_head *head = u_head._h; |
8ad4ce58 MD |
258 | struct cds_wfcq_node *node; |
259 | ||
260 | if (_cds_wfcq_empty(head, tail)) | |
261 | return NULL; | |
47215721 | 262 | node = ___cds_wfcq_node_sync_next(&head->node, blocking); |
8ad4ce58 MD |
263 | /* Load head->node.next before loading node's content */ |
264 | cmm_smp_read_barrier_depends(); | |
265 | return node; | |
266 | } | |
267 | ||
268 | /* | |
47215721 | 269 | * __cds_wfcq_first_blocking: get first node of a queue, without dequeuing. |
8ad4ce58 MD |
270 | * |
271 | * Content written into the node before enqueue is guaranteed to be | |
272 | * consistent, but no other memory ordering is ensured. | |
1fe734e1 MD |
273 | * Dequeue/splice/iteration mutual exclusion should be ensured by the |
274 | * caller. | |
f94061a3 MD |
275 | * |
276 | * Used by for-like iteration macros in urcu/wfqueue.h: | |
277 | * __cds_wfcq_for_each_blocking() | |
278 | * __cds_wfcq_for_each_blocking_safe() | |
131a29a6 MD |
279 | * |
280 | * Returns NULL if queue is empty, first node otherwise. | |
8ad4ce58 MD |
281 | */ |
282 | static inline struct cds_wfcq_node * | |
f637f191 | 283 | ___cds_wfcq_first_blocking(cds_wfcq_head_ptr_t head, |
47215721 MD |
284 | struct cds_wfcq_tail *tail) |
285 | { | |
286 | return ___cds_wfcq_first(head, tail, 1); | |
287 | } | |
288 | ||
289 | ||
290 | /* | |
291 | * __cds_wfcq_first_nonblocking: get first node of a queue, without dequeuing. | |
292 | * | |
293 | * Same as __cds_wfcq_first_blocking, but returns CDS_WFCQ_WOULDBLOCK if | |
294 | * it needs to block. | |
295 | */ | |
296 | static inline struct cds_wfcq_node * | |
f637f191 | 297 | ___cds_wfcq_first_nonblocking(cds_wfcq_head_ptr_t head, |
47215721 MD |
298 | struct cds_wfcq_tail *tail) |
299 | { | |
300 | return ___cds_wfcq_first(head, tail, 0); | |
301 | } | |
302 | ||
303 | static inline struct cds_wfcq_node * | |
f637f191 | 304 | ___cds_wfcq_next(cds_wfcq_head_ptr_t head, |
8ad4ce58 | 305 | struct cds_wfcq_tail *tail, |
47215721 MD |
306 | struct cds_wfcq_node *node, |
307 | int blocking) | |
8ad4ce58 MD |
308 | { |
309 | struct cds_wfcq_node *next; | |
310 | ||
311 | /* | |
312 | * Even though the following tail->p check is sufficient to find | |
313 | * out if we reached the end of the queue, we first check | |
314 | * node->next as a common case to ensure that iteration on nodes | |
315 | * do not frequently access enqueuer's tail->p cache line. | |
316 | */ | |
317 | if ((next = CMM_LOAD_SHARED(node->next)) == NULL) { | |
318 | /* Load node->next before tail->p */ | |
319 | cmm_smp_rmb(); | |
320 | if (CMM_LOAD_SHARED(tail->p) == node) | |
321 | return NULL; | |
47215721 | 322 | next = ___cds_wfcq_node_sync_next(node, blocking); |
8ad4ce58 MD |
323 | } |
324 | /* Load node->next before loading next's content */ | |
325 | cmm_smp_read_barrier_depends(); | |
326 | return next; | |
327 | } | |
328 | ||
329 | /* | |
47215721 | 330 | * __cds_wfcq_next_blocking: get next node of a queue, without dequeuing. |
8ad4ce58 | 331 | * |
8ad4ce58 MD |
332 | * Content written into the node before enqueue is guaranteed to be |
333 | * consistent, but no other memory ordering is ensured. | |
1fe734e1 MD |
334 | * Dequeue/splice/iteration mutual exclusion should be ensured by the |
335 | * caller. | |
47215721 MD |
336 | * |
337 | * Used by for-like iteration macros in urcu/wfqueue.h: | |
338 | * __cds_wfcq_for_each_blocking() | |
339 | * __cds_wfcq_for_each_blocking_safe() | |
131a29a6 MD |
340 | * |
341 | * Returns NULL if reached end of queue, non-NULL next queue node | |
342 | * otherwise. | |
8ad4ce58 MD |
343 | */ |
344 | static inline struct cds_wfcq_node * | |
f637f191 | 345 | ___cds_wfcq_next_blocking(cds_wfcq_head_ptr_t head, |
47215721 MD |
346 | struct cds_wfcq_tail *tail, |
347 | struct cds_wfcq_node *node) | |
348 | { | |
349 | return ___cds_wfcq_next(head, tail, node, 1); | |
350 | } | |
351 | ||
352 | /* | |
353 | * __cds_wfcq_next_blocking: get next node of a queue, without dequeuing. | |
354 | * | |
355 | * Same as __cds_wfcq_next_blocking, but returns CDS_WFCQ_WOULDBLOCK if | |
356 | * it needs to block. | |
357 | */ | |
358 | static inline struct cds_wfcq_node * | |
f637f191 | 359 | ___cds_wfcq_next_nonblocking(cds_wfcq_head_ptr_t head, |
47215721 MD |
360 | struct cds_wfcq_tail *tail, |
361 | struct cds_wfcq_node *node) | |
362 | { | |
363 | return ___cds_wfcq_next(head, tail, node, 0); | |
364 | } | |
365 | ||
366 | static inline struct cds_wfcq_node * | |
f637f191 | 367 | ___cds_wfcq_dequeue_with_state(cds_wfcq_head_ptr_t u_head, |
47215721 | 368 | struct cds_wfcq_tail *tail, |
eec791af | 369 | int *state, |
13652c4b MD |
370 | int blocking, |
371 | int safe) | |
8ad4ce58 | 372 | { |
f637f191 | 373 | struct __cds_wfcq_head *head = u_head._h; |
8ad4ce58 MD |
374 | struct cds_wfcq_node *node, *next; |
375 | ||
eec791af MD |
376 | if (state) |
377 | *state = 0; | |
378 | ||
379 | if (_cds_wfcq_empty(head, tail)) { | |
8ad4ce58 | 380 | return NULL; |
eec791af | 381 | } |
8ad4ce58 | 382 | |
47215721 | 383 | node = ___cds_wfcq_node_sync_next(&head->node, blocking); |
eec791af | 384 | if (!blocking && node == CDS_WFCQ_WOULDBLOCK) { |
dfb65fd3 | 385 | return CDS_WFCQ_WOULDBLOCK; |
eec791af | 386 | } |
8ad4ce58 MD |
387 | |
388 | if ((next = CMM_LOAD_SHARED(node->next)) == NULL) { | |
13652c4b MD |
389 | struct cds_wfcq_node *ret_node; |
390 | ||
8ad4ce58 MD |
391 | /* |
392 | * @node is probably the only node in the queue. | |
393 | * Try to move the tail to &q->head. | |
394 | * q->head.next is set to NULL here, and stays | |
395 | * NULL if the cmpxchg succeeds. Should the | |
396 | * cmpxchg fail due to a concurrent enqueue, the | |
397 | * q->head.next will be set to the next node. | |
398 | * The implicit memory barrier before | |
399 | * uatomic_cmpxchg() orders load node->next | |
400 | * before loading q->tail. | |
401 | * The implicit memory barrier before uatomic_cmpxchg | |
402 | * orders load q->head.next before loading node's | |
403 | * content. | |
404 | */ | |
405 | _cds_wfcq_node_init(&head->node); | |
13652c4b MD |
406 | if (safe) { |
407 | ret_node = uatomic_cmpxchg(&tail->p, node, | |
408 | &head->node); | |
409 | } else { | |
410 | ret_node = tail->p; | |
411 | if (ret_node == node) | |
412 | tail->p = &head->node; | |
413 | } | |
414 | if (ret_node == node) { | |
eec791af MD |
415 | if (state) |
416 | *state |= CDS_WFCQ_STATE_LAST; | |
8ad4ce58 | 417 | return node; |
eec791af | 418 | } |
47215721 | 419 | next = ___cds_wfcq_node_sync_next(node, blocking); |
dfb65fd3 MD |
420 | /* |
421 | * In nonblocking mode, if we would need to block to | |
422 | * get node's next, set the head next node pointer | |
423 | * (currently NULL) back to its original value. | |
424 | */ | |
425 | if (!blocking && next == CDS_WFCQ_WOULDBLOCK) { | |
426 | head->node.next = node; | |
427 | return CDS_WFCQ_WOULDBLOCK; | |
428 | } | |
8ad4ce58 MD |
429 | } |
430 | ||
431 | /* | |
432 | * Move queue head forward. | |
433 | */ | |
434 | head->node.next = next; | |
435 | ||
436 | /* Load q->head.next before loading node's content */ | |
437 | cmm_smp_read_barrier_depends(); | |
438 | return node; | |
439 | } | |
440 | ||
441 | /* | |
eec791af | 442 | * __cds_wfcq_dequeue_with_state_blocking: dequeue node from queue, with state. |
8ad4ce58 | 443 | * |
47215721 MD |
444 | * Content written into the node before enqueue is guaranteed to be |
445 | * consistent, but no other memory ordering is ensured. | |
446 | * It is valid to reuse and free a dequeued node immediately. | |
447 | * Dequeue/splice/iteration mutual exclusion should be ensured by the | |
448 | * caller. | |
8ad4ce58 | 449 | */ |
47215721 | 450 | static inline struct cds_wfcq_node * |
f637f191 | 451 | ___cds_wfcq_dequeue_with_state_blocking(cds_wfcq_head_ptr_t head, |
eec791af MD |
452 | struct cds_wfcq_tail *tail, int *state) |
453 | { | |
13652c4b | 454 | return ___cds_wfcq_dequeue_with_state(head, tail, state, 1, 1); |
eec791af MD |
455 | } |
456 | ||
457 | /* | |
458 | * ___cds_wfcq_dequeue_blocking: dequeue node from queue. | |
459 | * | |
460 | * Same as __cds_wfcq_dequeue_with_state_blocking, but without saving | |
461 | * state. | |
462 | */ | |
463 | static inline struct cds_wfcq_node * | |
f637f191 | 464 | ___cds_wfcq_dequeue_blocking(cds_wfcq_head_ptr_t head, |
47215721 MD |
465 | struct cds_wfcq_tail *tail) |
466 | { | |
eec791af | 467 | return ___cds_wfcq_dequeue_with_state_blocking(head, tail, NULL); |
47215721 MD |
468 | } |
469 | ||
470 | /* | |
eec791af | 471 | * __cds_wfcq_dequeue_with_state_nonblocking: dequeue node, with state. |
47215721 MD |
472 | * |
473 | * Same as __cds_wfcq_dequeue_blocking, but returns CDS_WFCQ_WOULDBLOCK | |
474 | * if it needs to block. | |
475 | */ | |
476 | static inline struct cds_wfcq_node * | |
f637f191 | 477 | ___cds_wfcq_dequeue_with_state_nonblocking(cds_wfcq_head_ptr_t head, |
eec791af MD |
478 | struct cds_wfcq_tail *tail, int *state) |
479 | { | |
13652c4b | 480 | return ___cds_wfcq_dequeue_with_state(head, tail, state, 0, 1); |
eec791af MD |
481 | } |
482 | ||
483 | /* | |
484 | * ___cds_wfcq_dequeue_nonblocking: dequeue node from queue. | |
485 | * | |
486 | * Same as __cds_wfcq_dequeue_with_state_nonblocking, but without saving | |
487 | * state. | |
488 | */ | |
489 | static inline struct cds_wfcq_node * | |
f637f191 | 490 | ___cds_wfcq_dequeue_nonblocking(cds_wfcq_head_ptr_t head, |
47215721 MD |
491 | struct cds_wfcq_tail *tail) |
492 | { | |
eec791af | 493 | return ___cds_wfcq_dequeue_with_state_nonblocking(head, tail, NULL); |
47215721 MD |
494 | } |
495 | ||
f878b49e MD |
496 | /* |
497 | * __cds_wfcq_splice: enqueue all src_q nodes at the end of dest_q. | |
498 | * | |
499 | * Dequeue all nodes from src_q. | |
500 | * dest_q must be already initialized. | |
501 | * Mutual exclusion for src_q should be ensured by the caller as | |
502 | * specified in the "Synchronisation table". | |
503 | * Returns enum cds_wfcq_ret which indicates the state of the src or | |
504 | * dest queue. | |
505 | */ | |
23773356 | 506 | static inline enum cds_wfcq_ret |
47215721 | 507 | ___cds_wfcq_splice( |
f637f191 | 508 | cds_wfcq_head_ptr_t u_dest_q_head, |
8ad4ce58 | 509 | struct cds_wfcq_tail *dest_q_tail, |
f637f191 | 510 | cds_wfcq_head_ptr_t u_src_q_head, |
47215721 MD |
511 | struct cds_wfcq_tail *src_q_tail, |
512 | int blocking) | |
8ad4ce58 | 513 | { |
f637f191 MD |
514 | struct __cds_wfcq_head *dest_q_head = u_dest_q_head._h; |
515 | struct __cds_wfcq_head *src_q_head = u_src_q_head._h; | |
8ad4ce58 | 516 | struct cds_wfcq_node *head, *tail; |
f878b49e | 517 | int attempt = 0; |
8ad4ce58 | 518 | |
f878b49e MD |
519 | /* |
520 | * Initial emptiness check to speed up cases where queue is | |
521 | * empty: only require loads to check if queue is empty. | |
522 | */ | |
8ad4ce58 | 523 | if (_cds_wfcq_empty(src_q_head, src_q_tail)) |
23773356 | 524 | return CDS_WFCQ_RET_SRC_EMPTY; |
8ad4ce58 | 525 | |
f878b49e MD |
526 | for (;;) { |
527 | /* | |
528 | * Open-coded _cds_wfcq_empty() by testing result of | |
529 | * uatomic_xchg, as well as tail pointer vs head node | |
530 | * address. | |
531 | */ | |
532 | head = uatomic_xchg(&src_q_head->node.next, NULL); | |
533 | if (head) | |
534 | break; /* non-empty */ | |
535 | if (CMM_LOAD_SHARED(src_q_tail->p) == &src_q_head->node) | |
536 | return CDS_WFCQ_RET_SRC_EMPTY; | |
537 | if (___cds_wfcq_busy_wait(&attempt, blocking)) | |
538 | return CDS_WFCQ_RET_WOULDBLOCK; | |
539 | } | |
8ad4ce58 MD |
540 | |
541 | /* | |
542 | * Memory barrier implied before uatomic_xchg() orders store to | |
543 | * src_q->head before store to src_q->tail. This is required by | |
544 | * concurrent enqueue on src_q, which exchanges the tail before | |
545 | * updating the previous tail's next pointer. | |
546 | */ | |
547 | tail = uatomic_xchg(&src_q_tail->p, &src_q_head->node); | |
548 | ||
549 | /* | |
550 | * Append the spliced content of src_q into dest_q. Does not | |
551 | * require mutual exclusion on dest_q (wait-free). | |
552 | */ | |
23773356 MD |
553 | if (___cds_wfcq_append(dest_q_head, dest_q_tail, head, tail)) |
554 | return CDS_WFCQ_RET_DEST_NON_EMPTY; | |
555 | else | |
556 | return CDS_WFCQ_RET_DEST_EMPTY; | |
47215721 MD |
557 | } |
558 | ||
47215721 MD |
559 | /* |
560 | * __cds_wfcq_splice_blocking: enqueue all src_q nodes at the end of dest_q. | |
561 | * | |
562 | * Dequeue all nodes from src_q. | |
563 | * dest_q must be already initialized. | |
f878b49e MD |
564 | * Mutual exclusion for src_q should be ensured by the caller as |
565 | * specified in the "Synchronisation table". | |
23773356 MD |
566 | * Returns enum cds_wfcq_ret which indicates the state of the src or |
567 | * dest queue. Never returns CDS_WFCQ_RET_WOULDBLOCK. | |
47215721 | 568 | */ |
23773356 | 569 | static inline enum cds_wfcq_ret |
47215721 | 570 | ___cds_wfcq_splice_blocking( |
f637f191 | 571 | cds_wfcq_head_ptr_t dest_q_head, |
47215721 | 572 | struct cds_wfcq_tail *dest_q_tail, |
f637f191 | 573 | cds_wfcq_head_ptr_t src_q_head, |
47215721 MD |
574 | struct cds_wfcq_tail *src_q_tail) |
575 | { | |
23773356 | 576 | return ___cds_wfcq_splice(dest_q_head, dest_q_tail, |
47215721 MD |
577 | src_q_head, src_q_tail, 1); |
578 | } | |
579 | ||
580 | /* | |
581 | * __cds_wfcq_splice_nonblocking: enqueue all src_q nodes at the end of dest_q. | |
582 | * | |
23773356 MD |
583 | * Same as __cds_wfcq_splice_blocking, but returns |
584 | * CDS_WFCQ_RET_WOULDBLOCK if it needs to block. | |
47215721 | 585 | */ |
23773356 | 586 | static inline enum cds_wfcq_ret |
47215721 | 587 | ___cds_wfcq_splice_nonblocking( |
f637f191 | 588 | cds_wfcq_head_ptr_t dest_q_head, |
47215721 | 589 | struct cds_wfcq_tail *dest_q_tail, |
f637f191 | 590 | cds_wfcq_head_ptr_t src_q_head, |
47215721 MD |
591 | struct cds_wfcq_tail *src_q_tail) |
592 | { | |
593 | return ___cds_wfcq_splice(dest_q_head, dest_q_tail, | |
594 | src_q_head, src_q_tail, 0); | |
8ad4ce58 MD |
595 | } |
596 | ||
597 | /* | |
eec791af | 598 | * cds_wfcq_dequeue_with_state_blocking: dequeue a node from a wait-free queue. |
8ad4ce58 MD |
599 | * |
600 | * Content written into the node before enqueue is guaranteed to be | |
601 | * consistent, but no other memory ordering is ensured. | |
ffa11a18 | 602 | * Mutual exclusion with cds_wfcq_splice_blocking and dequeue lock is |
8ad4ce58 MD |
603 | * ensured. |
604 | * It is valid to reuse and free a dequeued node immediately. | |
605 | */ | |
606 | static inline struct cds_wfcq_node * | |
eec791af MD |
607 | _cds_wfcq_dequeue_with_state_blocking(struct cds_wfcq_head *head, |
608 | struct cds_wfcq_tail *tail, int *state) | |
8ad4ce58 MD |
609 | { |
610 | struct cds_wfcq_node *retval; | |
611 | ||
612 | _cds_wfcq_dequeue_lock(head, tail); | |
eec791af | 613 | retval = ___cds_wfcq_dequeue_with_state_blocking(head, tail, state); |
8ad4ce58 MD |
614 | _cds_wfcq_dequeue_unlock(head, tail); |
615 | return retval; | |
616 | } | |
617 | ||
eec791af MD |
618 | /* |
619 | * cds_wfcq_dequeue_blocking: dequeue node from queue. | |
620 | * | |
621 | * Same as cds_wfcq_dequeue_blocking, but without saving state. | |
622 | */ | |
623 | static inline struct cds_wfcq_node * | |
624 | _cds_wfcq_dequeue_blocking(struct cds_wfcq_head *head, | |
625 | struct cds_wfcq_tail *tail) | |
626 | { | |
627 | return _cds_wfcq_dequeue_with_state_blocking(head, tail, NULL); | |
628 | } | |
629 | ||
8ad4ce58 MD |
630 | /* |
631 | * cds_wfcq_splice_blocking: enqueue all src_q nodes at the end of dest_q. | |
632 | * | |
633 | * Dequeue all nodes from src_q. | |
634 | * dest_q must be already initialized. | |
635 | * Content written into the node before enqueue is guaranteed to be | |
636 | * consistent, but no other memory ordering is ensured. | |
ffa11a18 | 637 | * Mutual exclusion with cds_wfcq_dequeue_blocking and dequeue lock is |
8ad4ce58 | 638 | * ensured. |
23773356 MD |
639 | * Returns enum cds_wfcq_ret which indicates the state of the src or |
640 | * dest queue. Never returns CDS_WFCQ_RET_WOULDBLOCK. | |
8ad4ce58 | 641 | */ |
23773356 | 642 | static inline enum cds_wfcq_ret |
8ad4ce58 MD |
643 | _cds_wfcq_splice_blocking( |
644 | struct cds_wfcq_head *dest_q_head, | |
645 | struct cds_wfcq_tail *dest_q_tail, | |
646 | struct cds_wfcq_head *src_q_head, | |
647 | struct cds_wfcq_tail *src_q_tail) | |
648 | { | |
23773356 MD |
649 | enum cds_wfcq_ret ret; |
650 | ||
8ad4ce58 | 651 | _cds_wfcq_dequeue_lock(src_q_head, src_q_tail); |
23773356 | 652 | ret = ___cds_wfcq_splice_blocking(dest_q_head, dest_q_tail, |
8ad4ce58 MD |
653 | src_q_head, src_q_tail); |
654 | _cds_wfcq_dequeue_unlock(src_q_head, src_q_tail); | |
23773356 | 655 | return ret; |
8ad4ce58 MD |
656 | } |
657 | ||
658 | #ifdef __cplusplus | |
659 | } | |
660 | #endif | |
661 | ||
662 | #endif /* _URCU_WFCQUEUE_STATIC_H */ |