Add cmm_emit_legacy_smp_mb()
[urcu.git] / include / urcu / static / wfcqueue.h
CommitLineData
d3d3857f
MJ
1// SPDX-FileCopyrightText: 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
2// SPDX-FileCopyrightText: 2011-2012 Lai Jiangshan <laijs@cn.fujitsu.com>
3//
4// SPDX-License-Identifier: LGPL-2.1-or-later
5
8ad4ce58
MD
6#ifndef _URCU_WFCQUEUE_STATIC_H
7#define _URCU_WFCQUEUE_STATIC_H
8
9/*
8ad4ce58
MD
10 * Userspace RCU library - Concurrent Queue with Wait-Free Enqueue/Blocking Dequeue
11 *
07c2a4fd
MD
12 * TO BE INCLUDED ONLY IN LGPL-COMPATIBLE CODE. See urcu/wfcqueue.h for
13 * linking dynamically with the userspace rcu library.
8ad4ce58
MD
14 */
15
16#include <pthread.h>
8ad4ce58
MD
17#include <poll.h>
18#include <stdbool.h>
01477510 19#include <urcu/assert.h>
8ad4ce58
MD
20#include <urcu/compiler.h>
21#include <urcu/uatomic.h>
22
23#ifdef __cplusplus
24extern "C" {
25#endif
26
27/*
28 * Concurrent queue with wait-free enqueue/blocking dequeue.
29 *
ebfd2673
MD
30 * This queue has been designed and implemented collaboratively by
31 * Mathieu Desnoyers and Lai Jiangshan. Inspired from
32 * half-wait-free/half-blocking queue implementation done by Paul E.
33 * McKenney.
8ad4ce58 34 *
f878b49e
MD
35 * Mutual exclusion of cds_wfcq_* / __cds_wfcq_* API
36 *
37 * Synchronization table:
38 *
39 * External synchronization techniques described in the API below is
40 * required between pairs marked with "X". No external synchronization
41 * required between pairs marked with "-".
42 *
43 * Legend:
44 * [1] cds_wfcq_enqueue
45 * [2] __cds_wfcq_splice (destination queue)
46 * [3] __cds_wfcq_dequeue
47 * [4] __cds_wfcq_splice (source queue)
48 * [5] __cds_wfcq_first
49 * [6] __cds_wfcq_next
50 *
51 * [1] [2] [3] [4] [5] [6]
52 * [1] - - - - - -
53 * [2] - - - - - -
54 * [3] - - X X X X
55 * [4] - - X - X X
56 * [5] - - X X - -
57 * [6] - - X X - -
8ad4ce58 58 *
8ad4ce58
MD
59 * Mutual exclusion can be ensured by holding cds_wfcq_dequeue_lock().
60 *
61 * For convenience, cds_wfcq_dequeue_blocking() and
62 * cds_wfcq_splice_blocking() hold the dequeue lock.
1fe734e1
MD
63 *
64 * Besides locking, mutual exclusion of dequeue, splice and iteration
65 * can be ensured by performing all of those operations from a single
66 * thread, without requiring any lock.
8ad4ce58
MD
67 */
68
69#define WFCQ_ADAPT_ATTEMPTS 10 /* Retry if being set */
70#define WFCQ_WAIT 10 /* Wait 10 ms if being set */
71
72/*
73 * cds_wfcq_node_init: initialize wait-free queue node.
74 */
75static inline void _cds_wfcq_node_init(struct cds_wfcq_node *node)
76{
77 node->next = NULL;
78}
79
0e2125fb
OD
80static inline void _cds_wfcq_node_init_atomic(struct cds_wfcq_node *node)
81{
82 uatomic_store(&node->next, NULL, CMM_RELAXED);
83}
84
8ad4ce58 85/*
200d100e
MD
86 * cds_wfcq_init: initialize wait-free queue (with lock). Pair with
87 * cds_wfcq_destroy().
8ad4ce58
MD
88 */
89static inline void _cds_wfcq_init(struct cds_wfcq_head *head,
90 struct cds_wfcq_tail *tail)
91{
92 int ret;
93
94 /* Set queue head and tail */
95 _cds_wfcq_node_init(&head->node);
96 tail->p = &head->node;
97 ret = pthread_mutex_init(&head->lock, NULL);
01477510 98 urcu_posix_assert(!ret);
8ad4ce58
MD
99}
100
f637f191 101/*
200d100e
MD
102 * cds_wfcq_destroy: destroy wait-free queue (with lock). Pair with
103 * cds_wfcq_init().
104 */
105static inline void _cds_wfcq_destroy(struct cds_wfcq_head *head,
70469b43 106 struct cds_wfcq_tail *tail __attribute__((unused)))
200d100e
MD
107{
108 int ret = pthread_mutex_destroy(&head->lock);
01477510 109 urcu_posix_assert(!ret);
200d100e
MD
110}
111
112/*
113 * __cds_wfcq_init: initialize wait-free queue (without lock). Don't
114 * pair with any destroy function.
f637f191
MD
115 */
116static inline void ___cds_wfcq_init(struct __cds_wfcq_head *head,
117 struct cds_wfcq_tail *tail)
118{
119 /* Set queue head and tail */
120 _cds_wfcq_node_init(&head->node);
121 tail->p = &head->node;
122}
123
8ad4ce58
MD
124/*
125 * cds_wfcq_empty: return whether wait-free queue is empty.
126 *
127 * No memory barrier is issued. No mutual exclusion is required.
6d5729f7
MD
128 *
129 * We perform the test on head->node.next to check if the queue is
130 * possibly empty, but we confirm this by checking if the tail pointer
131 * points to the head node because the tail pointer is the linearisation
132 * point of the enqueuers. Just checking the head next pointer could
133 * make a queue appear empty if an enqueuer is preempted for a long time
134 * between xchg() and setting the previous node's next pointer.
8ad4ce58 135 */
f637f191 136static inline bool _cds_wfcq_empty(cds_wfcq_head_ptr_t u_head,
8ad4ce58
MD
137 struct cds_wfcq_tail *tail)
138{
f637f191 139 struct __cds_wfcq_head *head = u_head._h;
8ad4ce58
MD
140 /*
141 * Queue is empty if no node is pointed by head->node.next nor
142 * tail->p. Even though the tail->p check is sufficient to find
143 * out of the queue is empty, we first check head->node.next as a
144 * common case to ensure that dequeuers do not frequently access
145 * enqueuer's tail->p cache line.
146 */
0e2125fb
OD
147 return uatomic_load(&head->node.next, CMM_CONSUME) == NULL
148 && uatomic_load(&tail->p, CMM_CONSUME) == &head->node;
8ad4ce58
MD
149}
150
151static inline void _cds_wfcq_dequeue_lock(struct cds_wfcq_head *head,
70469b43 152 struct cds_wfcq_tail *tail __attribute__((unused)))
8ad4ce58
MD
153{
154 int ret;
155
156 ret = pthread_mutex_lock(&head->lock);
01477510 157 urcu_posix_assert(!ret);
8ad4ce58
MD
158}
159
160static inline void _cds_wfcq_dequeue_unlock(struct cds_wfcq_head *head,
70469b43 161 struct cds_wfcq_tail *tail __attribute__((unused)))
8ad4ce58
MD
162{
163 int ret;
164
165 ret = pthread_mutex_unlock(&head->lock);
01477510 166 urcu_posix_assert(!ret);
8ad4ce58
MD
167}
168
f637f191 169static inline bool ___cds_wfcq_append(cds_wfcq_head_ptr_t u_head,
8ad4ce58
MD
170 struct cds_wfcq_tail *tail,
171 struct cds_wfcq_node *new_head,
172 struct cds_wfcq_node *new_tail)
173{
f637f191 174 struct __cds_wfcq_head *head = u_head._h;
8ad4ce58
MD
175 struct cds_wfcq_node *old_tail;
176
177 /*
178 * Implicit memory barrier before uatomic_xchg() orders earlier
179 * stores to data structure containing node and setting
180 * node->next to NULL before publication.
181 */
0e2125fb 182 old_tail = uatomic_xchg_mo(&tail->p, new_tail, CMM_SEQ_CST);
8ad4ce58
MD
183
184 /*
185 * Implicit memory barrier after uatomic_xchg() orders store to
186 * q->tail before store to old_tail->next.
187 *
188 * At this point, dequeuers see a NULL tail->p->next, which
189 * indicates that the queue is being appended to. The following
190 * store will append "node" to the queue from a dequeuer
191 * perspective.
192 */
0e2125fb
OD
193 uatomic_store(&old_tail->next, new_head, CMM_RELEASE);
194
23773356
MD
195 /*
196 * Return false if queue was empty prior to adding the node,
197 * else return true.
198 */
199 return old_tail != &head->node;
8ad4ce58
MD
200}
201
202/*
203 * cds_wfcq_enqueue: enqueue a node into a wait-free queue.
204 *
0e2125fb
OD
205 * Operations prior to enqueue are consistant with respect to dequeuing or
206 * splicing and iterating.
23773356
MD
207 *
208 * Returns false if the queue was empty prior to adding the node.
209 * Returns true otherwise.
8ad4ce58 210 */
f637f191 211static inline bool _cds_wfcq_enqueue(cds_wfcq_head_ptr_t head,
8ad4ce58
MD
212 struct cds_wfcq_tail *tail,
213 struct cds_wfcq_node *new_tail)
214{
0e2125fb
OD
215 cmm_emit_legacy_smp_mb();
216
23773356 217 return ___cds_wfcq_append(head, tail, new_tail, new_tail);
8ad4ce58
MD
218}
219
34b11dd4
EW
220/*
221 * CDS_WFCQ_WAIT_SLEEP:
222 *
223 * By default, this sleeps for the given @msec milliseconds.
224 * This is a macro which LGPL users may #define themselves before
225 * including wfcqueue.h to override the default behavior (e.g.
226 * to log a warning or perform other background work).
227 */
228#ifndef CDS_WFCQ_WAIT_SLEEP
229#define CDS_WFCQ_WAIT_SLEEP(msec) ___cds_wfcq_wait_sleep(msec)
230#endif
231
232static inline void ___cds_wfcq_wait_sleep(int msec)
233{
234 (void) poll(NULL, 0, msec);
235}
236
f878b49e
MD
237/*
238 * ___cds_wfcq_busy_wait: adaptative busy-wait.
239 *
240 * Returns 1 if nonblocking and needs to block, 0 otherwise.
241 */
242static inline bool
243___cds_wfcq_busy_wait(int *attempt, int blocking)
244{
245 if (!blocking)
246 return 1;
247 if (++(*attempt) >= WFCQ_ADAPT_ATTEMPTS) {
34b11dd4 248 CDS_WFCQ_WAIT_SLEEP(WFCQ_WAIT); /* Wait for 10ms */
f878b49e
MD
249 *attempt = 0;
250 } else {
251 caa_cpu_relax();
252 }
253 return 0;
254}
255
8ad4ce58
MD
256/*
257 * Waiting for enqueuer to complete enqueue and return the next node.
258 */
259static inline struct cds_wfcq_node *
47215721 260___cds_wfcq_node_sync_next(struct cds_wfcq_node *node, int blocking)
8ad4ce58
MD
261{
262 struct cds_wfcq_node *next;
263 int attempt = 0;
264
265 /*
266 * Adaptative busy-looping waiting for enqueuer to complete enqueue.
0e2125fb
OD
267 *
268 * Load node.next before loading node's content
8ad4ce58 269 */
0e2125fb 270 while ((next = uatomic_load(&node->next, CMM_CONSUME)) == NULL) {
f878b49e 271 if (___cds_wfcq_busy_wait(&attempt, blocking))
47215721 272 return CDS_WFCQ_WOULDBLOCK;
8ad4ce58
MD
273 }
274
275 return next;
276}
277
8ad4ce58 278static inline struct cds_wfcq_node *
f637f191 279___cds_wfcq_first(cds_wfcq_head_ptr_t u_head,
47215721
MD
280 struct cds_wfcq_tail *tail,
281 int blocking)
8ad4ce58 282{
f637f191 283 struct __cds_wfcq_head *head = u_head._h;
8ad4ce58
MD
284 struct cds_wfcq_node *node;
285
4d0d7cbc 286 if (_cds_wfcq_empty(__cds_wfcq_head_cast(head), tail))
8ad4ce58 287 return NULL;
47215721 288 node = ___cds_wfcq_node_sync_next(&head->node, blocking);
0e2125fb 289
8ad4ce58
MD
290 return node;
291}
292
293/*
47215721 294 * __cds_wfcq_first_blocking: get first node of a queue, without dequeuing.
8ad4ce58
MD
295 *
296 * Content written into the node before enqueue is guaranteed to be
297 * consistent, but no other memory ordering is ensured.
1fe734e1
MD
298 * Dequeue/splice/iteration mutual exclusion should be ensured by the
299 * caller.
f94061a3
MD
300 *
301 * Used by for-like iteration macros in urcu/wfqueue.h:
302 * __cds_wfcq_for_each_blocking()
303 * __cds_wfcq_for_each_blocking_safe()
131a29a6
MD
304 *
305 * Returns NULL if queue is empty, first node otherwise.
8ad4ce58
MD
306 */
307static inline struct cds_wfcq_node *
f637f191 308___cds_wfcq_first_blocking(cds_wfcq_head_ptr_t head,
47215721
MD
309 struct cds_wfcq_tail *tail)
310{
311 return ___cds_wfcq_first(head, tail, 1);
312}
313
314
315/*
316 * __cds_wfcq_first_nonblocking: get first node of a queue, without dequeuing.
317 *
318 * Same as __cds_wfcq_first_blocking, but returns CDS_WFCQ_WOULDBLOCK if
319 * it needs to block.
320 */
321static inline struct cds_wfcq_node *
f637f191 322___cds_wfcq_first_nonblocking(cds_wfcq_head_ptr_t head,
47215721
MD
323 struct cds_wfcq_tail *tail)
324{
325 return ___cds_wfcq_first(head, tail, 0);
326}
327
328static inline struct cds_wfcq_node *
70469b43 329___cds_wfcq_next(cds_wfcq_head_ptr_t head __attribute__((unused)),
8ad4ce58 330 struct cds_wfcq_tail *tail,
47215721
MD
331 struct cds_wfcq_node *node,
332 int blocking)
8ad4ce58
MD
333{
334 struct cds_wfcq_node *next;
335
336 /*
337 * Even though the following tail->p check is sufficient to find
338 * out if we reached the end of the queue, we first check
339 * node->next as a common case to ensure that iteration on nodes
340 * do not frequently access enqueuer's tail->p cache line.
0e2125fb
OD
341 *
342 * Load node->next before loading next's content
8ad4ce58 343 */
0e2125fb
OD
344 if ((next = uatomic_load(&node->next, CMM_CONSUME)) == NULL) {
345 if (uatomic_load(&tail->p, CMM_RELAXED) == node)
8ad4ce58 346 return NULL;
47215721 347 next = ___cds_wfcq_node_sync_next(node, blocking);
8ad4ce58 348 }
0e2125fb 349
8ad4ce58
MD
350 return next;
351}
352
353/*
47215721 354 * __cds_wfcq_next_blocking: get next node of a queue, without dequeuing.
8ad4ce58 355 *
8ad4ce58
MD
356 * Content written into the node before enqueue is guaranteed to be
357 * consistent, but no other memory ordering is ensured.
1fe734e1
MD
358 * Dequeue/splice/iteration mutual exclusion should be ensured by the
359 * caller.
47215721
MD
360 *
361 * Used by for-like iteration macros in urcu/wfqueue.h:
362 * __cds_wfcq_for_each_blocking()
363 * __cds_wfcq_for_each_blocking_safe()
131a29a6
MD
364 *
365 * Returns NULL if reached end of queue, non-NULL next queue node
366 * otherwise.
8ad4ce58
MD
367 */
368static inline struct cds_wfcq_node *
f637f191 369___cds_wfcq_next_blocking(cds_wfcq_head_ptr_t head,
47215721
MD
370 struct cds_wfcq_tail *tail,
371 struct cds_wfcq_node *node)
372{
373 return ___cds_wfcq_next(head, tail, node, 1);
374}
375
376/*
377 * __cds_wfcq_next_blocking: get next node of a queue, without dequeuing.
378 *
379 * Same as __cds_wfcq_next_blocking, but returns CDS_WFCQ_WOULDBLOCK if
380 * it needs to block.
381 */
382static inline struct cds_wfcq_node *
f637f191 383___cds_wfcq_next_nonblocking(cds_wfcq_head_ptr_t head,
47215721
MD
384 struct cds_wfcq_tail *tail,
385 struct cds_wfcq_node *node)
386{
387 return ___cds_wfcq_next(head, tail, node, 0);
388}
389
390static inline struct cds_wfcq_node *
f637f191 391___cds_wfcq_dequeue_with_state(cds_wfcq_head_ptr_t u_head,
47215721 392 struct cds_wfcq_tail *tail,
eec791af 393 int *state,
47215721 394 int blocking)
8ad4ce58 395{
f637f191 396 struct __cds_wfcq_head *head = u_head._h;
8ad4ce58
MD
397 struct cds_wfcq_node *node, *next;
398
eec791af
MD
399 if (state)
400 *state = 0;
401
4d0d7cbc 402 if (_cds_wfcq_empty(__cds_wfcq_head_cast(head), tail)) {
8ad4ce58 403 return NULL;
eec791af 404 }
8ad4ce58 405
47215721 406 node = ___cds_wfcq_node_sync_next(&head->node, blocking);
eec791af 407 if (!blocking && node == CDS_WFCQ_WOULDBLOCK) {
dfb65fd3 408 return CDS_WFCQ_WOULDBLOCK;
eec791af 409 }
8ad4ce58 410
0e2125fb 411 if ((next = uatomic_load(&node->next, CMM_CONSUME)) == NULL) {
8ad4ce58
MD
412 /*
413 * @node is probably the only node in the queue.
414 * Try to move the tail to &q->head.
415 * q->head.next is set to NULL here, and stays
416 * NULL if the cmpxchg succeeds. Should the
417 * cmpxchg fail due to a concurrent enqueue, the
418 * q->head.next will be set to the next node.
8ad4ce58 419 */
0e2125fb
OD
420 _cds_wfcq_node_init_atomic(&head->node);
421 if (uatomic_cmpxchg_mo(&tail->p, node, &head->node,
422 CMM_SEQ_CST, CMM_SEQ_CST) == node) {
eec791af
MD
423 if (state)
424 *state |= CDS_WFCQ_STATE_LAST;
0e2125fb 425 cmm_emit_legacy_smp_mb();
8ad4ce58 426 return node;
eec791af 427 }
47215721 428 next = ___cds_wfcq_node_sync_next(node, blocking);
dfb65fd3
MD
429 /*
430 * In nonblocking mode, if we would need to block to
431 * get node's next, set the head next node pointer
432 * (currently NULL) back to its original value.
433 */
434 if (!blocking && next == CDS_WFCQ_WOULDBLOCK) {
0e2125fb 435 uatomic_store(&head->node.next, node, CMM_RELAXED);
dfb65fd3
MD
436 return CDS_WFCQ_WOULDBLOCK;
437 }
8ad4ce58
MD
438 }
439
440 /*
441 * Move queue head forward.
442 */
0e2125fb
OD
443 uatomic_store(&head->node.next, next, CMM_RELAXED);
444 cmm_emit_legacy_smp_mb();
8ad4ce58 445
8ad4ce58
MD
446 return node;
447}
448
449/*
eec791af 450 * __cds_wfcq_dequeue_with_state_blocking: dequeue node from queue, with state.
8ad4ce58 451 *
47215721
MD
452 * Content written into the node before enqueue is guaranteed to be
453 * consistent, but no other memory ordering is ensured.
454 * It is valid to reuse and free a dequeued node immediately.
455 * Dequeue/splice/iteration mutual exclusion should be ensured by the
456 * caller.
8ad4ce58 457 */
47215721 458static inline struct cds_wfcq_node *
f637f191 459___cds_wfcq_dequeue_with_state_blocking(cds_wfcq_head_ptr_t head,
eec791af
MD
460 struct cds_wfcq_tail *tail, int *state)
461{
462 return ___cds_wfcq_dequeue_with_state(head, tail, state, 1);
463}
464
465/*
466 * ___cds_wfcq_dequeue_blocking: dequeue node from queue.
467 *
468 * Same as __cds_wfcq_dequeue_with_state_blocking, but without saving
469 * state.
470 */
471static inline struct cds_wfcq_node *
f637f191 472___cds_wfcq_dequeue_blocking(cds_wfcq_head_ptr_t head,
47215721
MD
473 struct cds_wfcq_tail *tail)
474{
eec791af 475 return ___cds_wfcq_dequeue_with_state_blocking(head, tail, NULL);
47215721
MD
476}
477
478/*
eec791af 479 * __cds_wfcq_dequeue_with_state_nonblocking: dequeue node, with state.
47215721
MD
480 *
481 * Same as __cds_wfcq_dequeue_blocking, but returns CDS_WFCQ_WOULDBLOCK
482 * if it needs to block.
483 */
484static inline struct cds_wfcq_node *
f637f191 485___cds_wfcq_dequeue_with_state_nonblocking(cds_wfcq_head_ptr_t head,
eec791af
MD
486 struct cds_wfcq_tail *tail, int *state)
487{
488 return ___cds_wfcq_dequeue_with_state(head, tail, state, 0);
489}
490
491/*
492 * ___cds_wfcq_dequeue_nonblocking: dequeue node from queue.
493 *
494 * Same as __cds_wfcq_dequeue_with_state_nonblocking, but without saving
495 * state.
496 */
497static inline struct cds_wfcq_node *
f637f191 498___cds_wfcq_dequeue_nonblocking(cds_wfcq_head_ptr_t head,
47215721
MD
499 struct cds_wfcq_tail *tail)
500{
eec791af 501 return ___cds_wfcq_dequeue_with_state_nonblocking(head, tail, NULL);
47215721
MD
502}
503
f878b49e
MD
504/*
505 * __cds_wfcq_splice: enqueue all src_q nodes at the end of dest_q.
506 *
0e2125fb
OD
507 * Operations after splice are consistant with respect to enqueue.
508 *
f878b49e
MD
509 * Dequeue all nodes from src_q.
510 * dest_q must be already initialized.
511 * Mutual exclusion for src_q should be ensured by the caller as
512 * specified in the "Synchronisation table".
513 * Returns enum cds_wfcq_ret which indicates the state of the src or
514 * dest queue.
515 */
23773356 516static inline enum cds_wfcq_ret
47215721 517___cds_wfcq_splice(
f637f191 518 cds_wfcq_head_ptr_t u_dest_q_head,
8ad4ce58 519 struct cds_wfcq_tail *dest_q_tail,
f637f191 520 cds_wfcq_head_ptr_t u_src_q_head,
47215721
MD
521 struct cds_wfcq_tail *src_q_tail,
522 int blocking)
8ad4ce58 523{
f637f191
MD
524 struct __cds_wfcq_head *dest_q_head = u_dest_q_head._h;
525 struct __cds_wfcq_head *src_q_head = u_src_q_head._h;
8ad4ce58 526 struct cds_wfcq_node *head, *tail;
f878b49e 527 int attempt = 0;
8ad4ce58 528
f878b49e
MD
529 /*
530 * Initial emptiness check to speed up cases where queue is
531 * empty: only require loads to check if queue is empty.
532 */
4d0d7cbc 533 if (_cds_wfcq_empty(__cds_wfcq_head_cast(src_q_head), src_q_tail))
23773356 534 return CDS_WFCQ_RET_SRC_EMPTY;
8ad4ce58 535
f878b49e
MD
536 for (;;) {
537 /*
538 * Open-coded _cds_wfcq_empty() by testing result of
539 * uatomic_xchg, as well as tail pointer vs head node
540 * address.
541 */
0e2125fb 542 head = uatomic_xchg_mo(&src_q_head->node.next, NULL, CMM_SEQ_CST);
f878b49e
MD
543 if (head)
544 break; /* non-empty */
0e2125fb 545 if (uatomic_load(&src_q_tail->p, CMM_CONSUME) == &src_q_head->node)
f878b49e
MD
546 return CDS_WFCQ_RET_SRC_EMPTY;
547 if (___cds_wfcq_busy_wait(&attempt, blocking))
548 return CDS_WFCQ_RET_WOULDBLOCK;
549 }
8ad4ce58
MD
550
551 /*
552 * Memory barrier implied before uatomic_xchg() orders store to
553 * src_q->head before store to src_q->tail. This is required by
554 * concurrent enqueue on src_q, which exchanges the tail before
555 * updating the previous tail's next pointer.
556 */
0e2125fb
OD
557 cmm_emit_legacy_smp_mb();
558 tail = uatomic_xchg_mo(&src_q_tail->p, &src_q_head->node, CMM_SEQ_CST);
8ad4ce58
MD
559
560 /*
561 * Append the spliced content of src_q into dest_q. Does not
562 * require mutual exclusion on dest_q (wait-free).
563 */
4d0d7cbc
MD
564 if (___cds_wfcq_append(__cds_wfcq_head_cast(dest_q_head), dest_q_tail,
565 head, tail))
23773356
MD
566 return CDS_WFCQ_RET_DEST_NON_EMPTY;
567 else
568 return CDS_WFCQ_RET_DEST_EMPTY;
47215721
MD
569}
570
47215721
MD
571/*
572 * __cds_wfcq_splice_blocking: enqueue all src_q nodes at the end of dest_q.
573 *
574 * Dequeue all nodes from src_q.
575 * dest_q must be already initialized.
f878b49e
MD
576 * Mutual exclusion for src_q should be ensured by the caller as
577 * specified in the "Synchronisation table".
23773356
MD
578 * Returns enum cds_wfcq_ret which indicates the state of the src or
579 * dest queue. Never returns CDS_WFCQ_RET_WOULDBLOCK.
47215721 580 */
23773356 581static inline enum cds_wfcq_ret
47215721 582___cds_wfcq_splice_blocking(
f637f191 583 cds_wfcq_head_ptr_t dest_q_head,
47215721 584 struct cds_wfcq_tail *dest_q_tail,
f637f191 585 cds_wfcq_head_ptr_t src_q_head,
47215721
MD
586 struct cds_wfcq_tail *src_q_tail)
587{
23773356 588 return ___cds_wfcq_splice(dest_q_head, dest_q_tail,
47215721
MD
589 src_q_head, src_q_tail, 1);
590}
591
592/*
593 * __cds_wfcq_splice_nonblocking: enqueue all src_q nodes at the end of dest_q.
594 *
23773356
MD
595 * Same as __cds_wfcq_splice_blocking, but returns
596 * CDS_WFCQ_RET_WOULDBLOCK if it needs to block.
47215721 597 */
23773356 598static inline enum cds_wfcq_ret
47215721 599___cds_wfcq_splice_nonblocking(
f637f191 600 cds_wfcq_head_ptr_t dest_q_head,
47215721 601 struct cds_wfcq_tail *dest_q_tail,
f637f191 602 cds_wfcq_head_ptr_t src_q_head,
47215721
MD
603 struct cds_wfcq_tail *src_q_tail)
604{
605 return ___cds_wfcq_splice(dest_q_head, dest_q_tail,
606 src_q_head, src_q_tail, 0);
8ad4ce58
MD
607}
608
609/*
eec791af 610 * cds_wfcq_dequeue_with_state_blocking: dequeue a node from a wait-free queue.
8ad4ce58
MD
611 *
612 * Content written into the node before enqueue is guaranteed to be
613 * consistent, but no other memory ordering is ensured.
ffa11a18 614 * Mutual exclusion with cds_wfcq_splice_blocking and dequeue lock is
8ad4ce58
MD
615 * ensured.
616 * It is valid to reuse and free a dequeued node immediately.
617 */
618static inline struct cds_wfcq_node *
eec791af
MD
619_cds_wfcq_dequeue_with_state_blocking(struct cds_wfcq_head *head,
620 struct cds_wfcq_tail *tail, int *state)
8ad4ce58
MD
621{
622 struct cds_wfcq_node *retval;
623
624 _cds_wfcq_dequeue_lock(head, tail);
4d0d7cbc
MD
625 retval = ___cds_wfcq_dequeue_with_state_blocking(cds_wfcq_head_cast(head),
626 tail, state);
8ad4ce58
MD
627 _cds_wfcq_dequeue_unlock(head, tail);
628 return retval;
629}
630
eec791af
MD
631/*
632 * cds_wfcq_dequeue_blocking: dequeue node from queue.
633 *
634 * Same as cds_wfcq_dequeue_blocking, but without saving state.
635 */
636static inline struct cds_wfcq_node *
637_cds_wfcq_dequeue_blocking(struct cds_wfcq_head *head,
638 struct cds_wfcq_tail *tail)
639{
640 return _cds_wfcq_dequeue_with_state_blocking(head, tail, NULL);
641}
642
8ad4ce58
MD
643/*
644 * cds_wfcq_splice_blocking: enqueue all src_q nodes at the end of dest_q.
645 *
646 * Dequeue all nodes from src_q.
647 * dest_q must be already initialized.
648 * Content written into the node before enqueue is guaranteed to be
649 * consistent, but no other memory ordering is ensured.
ffa11a18 650 * Mutual exclusion with cds_wfcq_dequeue_blocking and dequeue lock is
8ad4ce58 651 * ensured.
23773356
MD
652 * Returns enum cds_wfcq_ret which indicates the state of the src or
653 * dest queue. Never returns CDS_WFCQ_RET_WOULDBLOCK.
8ad4ce58 654 */
23773356 655static inline enum cds_wfcq_ret
8ad4ce58
MD
656_cds_wfcq_splice_blocking(
657 struct cds_wfcq_head *dest_q_head,
658 struct cds_wfcq_tail *dest_q_tail,
659 struct cds_wfcq_head *src_q_head,
660 struct cds_wfcq_tail *src_q_tail)
661{
23773356
MD
662 enum cds_wfcq_ret ret;
663
8ad4ce58 664 _cds_wfcq_dequeue_lock(src_q_head, src_q_tail);
4d0d7cbc
MD
665 ret = ___cds_wfcq_splice_blocking(cds_wfcq_head_cast(dest_q_head), dest_q_tail,
666 cds_wfcq_head_cast(src_q_head), src_q_tail);
8ad4ce58 667 _cds_wfcq_dequeue_unlock(src_q_head, src_q_tail);
23773356 668 return ret;
8ad4ce58
MD
669}
670
671#ifdef __cplusplus
672}
673#endif
674
675#endif /* _URCU_WFCQUEUE_STATIC_H */
This page took 0.075016 seconds and 4 git commands to generate.