fix: handle EINTR correctly in get_cpu_mask_from_sysfs
[urcu.git] / include / urcu / static / wfcqueue.h
... / ...
CommitLineData
1// SPDX-FileCopyrightText: 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
2// SPDX-FileCopyrightText: 2011-2012 Lai Jiangshan <laijs@cn.fujitsu.com>
3//
4// SPDX-License-Identifier: LGPL-2.1-or-later
5
6#ifndef _URCU_WFCQUEUE_STATIC_H
7#define _URCU_WFCQUEUE_STATIC_H
8
9/*
10 * Userspace RCU library - Concurrent Queue with Wait-Free Enqueue/Blocking Dequeue
11 *
12 * TO BE INCLUDED ONLY IN LGPL-COMPATIBLE CODE. See urcu/wfcqueue.h for
13 * linking dynamically with the userspace rcu library.
14 */
15
16#include <pthread.h>
17#include <poll.h>
18#include <stdbool.h>
19#include <urcu/assert.h>
20#include <urcu/compiler.h>
21#include <urcu/uatomic.h>
22
23#ifdef __cplusplus
24extern "C" {
25#endif
26
27/*
28 * Concurrent queue with wait-free enqueue/blocking dequeue.
29 *
30 * This queue has been designed and implemented collaboratively by
31 * Mathieu Desnoyers and Lai Jiangshan. Inspired from
32 * half-wait-free/half-blocking queue implementation done by Paul E.
33 * McKenney.
34 *
35 * Mutual exclusion of cds_wfcq_* / __cds_wfcq_* API
36 *
37 * Synchronization table:
38 *
39 * External synchronization techniques described in the API below is
40 * required between pairs marked with "X". No external synchronization
41 * required between pairs marked with "-".
42 *
43 * Legend:
44 * [1] cds_wfcq_enqueue
45 * [2] __cds_wfcq_splice (destination queue)
46 * [3] __cds_wfcq_dequeue
47 * [4] __cds_wfcq_splice (source queue)
48 * [5] __cds_wfcq_first
49 * [6] __cds_wfcq_next
50 *
51 * [1] [2] [3] [4] [5] [6]
52 * [1] - - - - - -
53 * [2] - - - - - -
54 * [3] - - X X X X
55 * [4] - - X - X X
56 * [5] - - X X - -
57 * [6] - - X X - -
58 *
59 * Mutual exclusion can be ensured by holding cds_wfcq_dequeue_lock().
60 *
61 * For convenience, cds_wfcq_dequeue_blocking() and
62 * cds_wfcq_splice_blocking() hold the dequeue lock.
63 *
64 * Besides locking, mutual exclusion of dequeue, splice and iteration
65 * can be ensured by performing all of those operations from a single
66 * thread, without requiring any lock.
67 */
68
69#define WFCQ_ADAPT_ATTEMPTS 10 /* Retry if being set */
70#define WFCQ_WAIT 10 /* Wait 10 ms if being set */
71
72/*
73 * cds_wfcq_node_init: initialize wait-free queue node.
74 */
75static inline void _cds_wfcq_node_init(struct cds_wfcq_node *node)
76{
77 node->next = NULL;
78}
79
80static inline void _cds_wfcq_node_init_atomic(struct cds_wfcq_node *node)
81{
82 uatomic_store(&node->next, NULL, CMM_RELAXED);
83}
84
85/*
86 * cds_wfcq_init: initialize wait-free queue (with lock). Pair with
87 * cds_wfcq_destroy().
88 */
89static inline void _cds_wfcq_init(struct cds_wfcq_head *head,
90 struct cds_wfcq_tail *tail)
91{
92 int ret;
93
94 /* Set queue head and tail */
95 _cds_wfcq_node_init(&head->node);
96 tail->p = &head->node;
97 ret = pthread_mutex_init(&head->lock, NULL);
98 urcu_posix_assert(!ret);
99}
100
101/*
102 * cds_wfcq_destroy: destroy wait-free queue (with lock). Pair with
103 * cds_wfcq_init().
104 */
105static inline void _cds_wfcq_destroy(struct cds_wfcq_head *head,
106 struct cds_wfcq_tail *tail __attribute__((unused)))
107{
108 int ret = pthread_mutex_destroy(&head->lock);
109 urcu_posix_assert(!ret);
110}
111
112/*
113 * __cds_wfcq_init: initialize wait-free queue (without lock). Don't
114 * pair with any destroy function.
115 */
116static inline void ___cds_wfcq_init(struct __cds_wfcq_head *head,
117 struct cds_wfcq_tail *tail)
118{
119 /* Set queue head and tail */
120 _cds_wfcq_node_init(&head->node);
121 tail->p = &head->node;
122}
123
124/*
125 * cds_wfcq_empty: return whether wait-free queue is empty.
126 *
127 * No memory barrier is issued. No mutual exclusion is required.
128 *
129 * We perform the test on head->node.next to check if the queue is
130 * possibly empty, but we confirm this by checking if the tail pointer
131 * points to the head node because the tail pointer is the linearisation
132 * point of the enqueuers. Just checking the head next pointer could
133 * make a queue appear empty if an enqueuer is preempted for a long time
134 * between xchg() and setting the previous node's next pointer.
135 */
136static inline bool _cds_wfcq_empty(cds_wfcq_head_ptr_t u_head,
137 struct cds_wfcq_tail *tail)
138{
139 struct __cds_wfcq_head *head = u_head._h;
140 /*
141 * Queue is empty if no node is pointed by head->node.next nor
142 * tail->p. Even though the tail->p check is sufficient to find
143 * out of the queue is empty, we first check head->node.next as a
144 * common case to ensure that dequeuers do not frequently access
145 * enqueuer's tail->p cache line.
146 */
147 return uatomic_load(&head->node.next, CMM_CONSUME) == NULL
148 && uatomic_load(&tail->p, CMM_CONSUME) == &head->node;
149}
150
151static inline void _cds_wfcq_dequeue_lock(struct cds_wfcq_head *head,
152 struct cds_wfcq_tail *tail __attribute__((unused)))
153{
154 int ret;
155
156 ret = pthread_mutex_lock(&head->lock);
157 urcu_posix_assert(!ret);
158}
159
160static inline void _cds_wfcq_dequeue_unlock(struct cds_wfcq_head *head,
161 struct cds_wfcq_tail *tail __attribute__((unused)))
162{
163 int ret;
164
165 ret = pthread_mutex_unlock(&head->lock);
166 urcu_posix_assert(!ret);
167}
168
169static inline bool ___cds_wfcq_append(cds_wfcq_head_ptr_t u_head,
170 struct cds_wfcq_tail *tail,
171 struct cds_wfcq_node *new_head,
172 struct cds_wfcq_node *new_tail)
173{
174 struct __cds_wfcq_head *head = u_head._h;
175 struct cds_wfcq_node *old_tail;
176
177 /*
178 * Implicit memory barrier before uatomic_xchg() orders earlier
179 * stores to data structure containing node and setting
180 * node->next to NULL before publication.
181 */
182 old_tail = uatomic_xchg_mo(&tail->p, new_tail, CMM_SEQ_CST);
183
184 /*
185 * Implicit memory barrier after uatomic_xchg() orders store to
186 * q->tail before store to old_tail->next.
187 *
188 * At this point, dequeuers see a NULL tail->p->next, which
189 * indicates that the queue is being appended to. The following
190 * store will append "node" to the queue from a dequeuer
191 * perspective.
192 */
193 uatomic_store(&old_tail->next, new_head, CMM_RELEASE);
194
195 /*
196 * Return false if queue was empty prior to adding the node,
197 * else return true.
198 */
199 return old_tail != &head->node;
200}
201
202/*
203 * cds_wfcq_enqueue: enqueue a node into a wait-free queue.
204 *
205 * Operations prior to enqueue are consistant with respect to dequeuing or
206 * splicing and iterating.
207 *
208 * Returns false if the queue was empty prior to adding the node.
209 * Returns true otherwise.
210 */
211static inline bool _cds_wfcq_enqueue(cds_wfcq_head_ptr_t head,
212 struct cds_wfcq_tail *tail,
213 struct cds_wfcq_node *new_tail)
214{
215 cmm_emit_legacy_smp_mb();
216
217 return ___cds_wfcq_append(head, tail, new_tail, new_tail);
218}
219
220/*
221 * CDS_WFCQ_WAIT_SLEEP:
222 *
223 * By default, this sleeps for the given @msec milliseconds.
224 * This is a macro which LGPL users may #define themselves before
225 * including wfcqueue.h to override the default behavior (e.g.
226 * to log a warning or perform other background work).
227 */
228#ifndef CDS_WFCQ_WAIT_SLEEP
229#define CDS_WFCQ_WAIT_SLEEP(msec) ___cds_wfcq_wait_sleep(msec)
230#endif
231
232static inline void ___cds_wfcq_wait_sleep(int msec)
233{
234 (void) poll(NULL, 0, msec);
235}
236
237/*
238 * ___cds_wfcq_busy_wait: adaptative busy-wait.
239 *
240 * Returns 1 if nonblocking and needs to block, 0 otherwise.
241 */
242static inline bool
243___cds_wfcq_busy_wait(int *attempt, int blocking)
244{
245 if (!blocking)
246 return 1;
247 if (++(*attempt) >= WFCQ_ADAPT_ATTEMPTS) {
248 CDS_WFCQ_WAIT_SLEEP(WFCQ_WAIT); /* Wait for 10ms */
249 *attempt = 0;
250 } else {
251 caa_cpu_relax();
252 }
253 return 0;
254}
255
256/*
257 * Waiting for enqueuer to complete enqueue and return the next node.
258 */
259static inline struct cds_wfcq_node *
260___cds_wfcq_node_sync_next(struct cds_wfcq_node *node, int blocking)
261{
262 struct cds_wfcq_node *next;
263 int attempt = 0;
264
265 /*
266 * Adaptative busy-looping waiting for enqueuer to complete enqueue.
267 *
268 * Load node.next before loading node's content
269 */
270 while ((next = uatomic_load(&node->next, CMM_CONSUME)) == NULL) {
271 if (___cds_wfcq_busy_wait(&attempt, blocking))
272 return CDS_WFCQ_WOULDBLOCK;
273 }
274
275 return next;
276}
277
278static inline struct cds_wfcq_node *
279___cds_wfcq_first(cds_wfcq_head_ptr_t u_head,
280 struct cds_wfcq_tail *tail,
281 int blocking)
282{
283 struct __cds_wfcq_head *head = u_head._h;
284 struct cds_wfcq_node *node;
285
286 if (_cds_wfcq_empty(__cds_wfcq_head_cast(head), tail))
287 return NULL;
288 node = ___cds_wfcq_node_sync_next(&head->node, blocking);
289
290 return node;
291}
292
293/*
294 * __cds_wfcq_first_blocking: get first node of a queue, without dequeuing.
295 *
296 * Content written into the node before enqueue is guaranteed to be
297 * consistent, but no other memory ordering is ensured.
298 * Dequeue/splice/iteration mutual exclusion should be ensured by the
299 * caller.
300 *
301 * Used by for-like iteration macros in urcu/wfqueue.h:
302 * __cds_wfcq_for_each_blocking()
303 * __cds_wfcq_for_each_blocking_safe()
304 *
305 * Returns NULL if queue is empty, first node otherwise.
306 */
307static inline struct cds_wfcq_node *
308___cds_wfcq_first_blocking(cds_wfcq_head_ptr_t head,
309 struct cds_wfcq_tail *tail)
310{
311 return ___cds_wfcq_first(head, tail, 1);
312}
313
314
315/*
316 * __cds_wfcq_first_nonblocking: get first node of a queue, without dequeuing.
317 *
318 * Same as __cds_wfcq_first_blocking, but returns CDS_WFCQ_WOULDBLOCK if
319 * it needs to block.
320 */
321static inline struct cds_wfcq_node *
322___cds_wfcq_first_nonblocking(cds_wfcq_head_ptr_t head,
323 struct cds_wfcq_tail *tail)
324{
325 return ___cds_wfcq_first(head, tail, 0);
326}
327
328static inline struct cds_wfcq_node *
329___cds_wfcq_next(cds_wfcq_head_ptr_t head __attribute__((unused)),
330 struct cds_wfcq_tail *tail,
331 struct cds_wfcq_node *node,
332 int blocking)
333{
334 struct cds_wfcq_node *next;
335
336 /*
337 * Even though the following tail->p check is sufficient to find
338 * out if we reached the end of the queue, we first check
339 * node->next as a common case to ensure that iteration on nodes
340 * do not frequently access enqueuer's tail->p cache line.
341 *
342 * Load node->next before loading next's content
343 */
344 if ((next = uatomic_load(&node->next, CMM_CONSUME)) == NULL) {
345 if (uatomic_load(&tail->p, CMM_RELAXED) == node)
346 return NULL;
347 next = ___cds_wfcq_node_sync_next(node, blocking);
348 }
349
350 return next;
351}
352
353/*
354 * __cds_wfcq_next_blocking: get next node of a queue, without dequeuing.
355 *
356 * Content written into the node before enqueue is guaranteed to be
357 * consistent, but no other memory ordering is ensured.
358 * Dequeue/splice/iteration mutual exclusion should be ensured by the
359 * caller.
360 *
361 * Used by for-like iteration macros in urcu/wfqueue.h:
362 * __cds_wfcq_for_each_blocking()
363 * __cds_wfcq_for_each_blocking_safe()
364 *
365 * Returns NULL if reached end of queue, non-NULL next queue node
366 * otherwise.
367 */
368static inline struct cds_wfcq_node *
369___cds_wfcq_next_blocking(cds_wfcq_head_ptr_t head,
370 struct cds_wfcq_tail *tail,
371 struct cds_wfcq_node *node)
372{
373 return ___cds_wfcq_next(head, tail, node, 1);
374}
375
376/*
377 * __cds_wfcq_next_blocking: get next node of a queue, without dequeuing.
378 *
379 * Same as __cds_wfcq_next_blocking, but returns CDS_WFCQ_WOULDBLOCK if
380 * it needs to block.
381 */
382static inline struct cds_wfcq_node *
383___cds_wfcq_next_nonblocking(cds_wfcq_head_ptr_t head,
384 struct cds_wfcq_tail *tail,
385 struct cds_wfcq_node *node)
386{
387 return ___cds_wfcq_next(head, tail, node, 0);
388}
389
390static inline struct cds_wfcq_node *
391___cds_wfcq_dequeue_with_state(cds_wfcq_head_ptr_t u_head,
392 struct cds_wfcq_tail *tail,
393 int *state,
394 int blocking)
395{
396 struct __cds_wfcq_head *head = u_head._h;
397 struct cds_wfcq_node *node, *next;
398
399 if (state)
400 *state = 0;
401
402 if (_cds_wfcq_empty(__cds_wfcq_head_cast(head), tail)) {
403 return NULL;
404 }
405
406 node = ___cds_wfcq_node_sync_next(&head->node, blocking);
407 if (!blocking && node == CDS_WFCQ_WOULDBLOCK) {
408 return CDS_WFCQ_WOULDBLOCK;
409 }
410
411 if ((next = uatomic_load(&node->next, CMM_CONSUME)) == NULL) {
412 /*
413 * @node is probably the only node in the queue.
414 * Try to move the tail to &q->head.
415 * q->head.next is set to NULL here, and stays
416 * NULL if the cmpxchg succeeds. Should the
417 * cmpxchg fail due to a concurrent enqueue, the
418 * q->head.next will be set to the next node.
419 */
420 _cds_wfcq_node_init_atomic(&head->node);
421 if (uatomic_cmpxchg_mo(&tail->p, node, &head->node,
422 CMM_SEQ_CST, CMM_SEQ_CST) == node) {
423 if (state)
424 *state |= CDS_WFCQ_STATE_LAST;
425 cmm_emit_legacy_smp_mb();
426 return node;
427 }
428 next = ___cds_wfcq_node_sync_next(node, blocking);
429 /*
430 * In nonblocking mode, if we would need to block to
431 * get node's next, set the head next node pointer
432 * (currently NULL) back to its original value.
433 */
434 if (!blocking && next == CDS_WFCQ_WOULDBLOCK) {
435 uatomic_store(&head->node.next, node, CMM_RELAXED);
436 return CDS_WFCQ_WOULDBLOCK;
437 }
438 }
439
440 /*
441 * Move queue head forward.
442 */
443 uatomic_store(&head->node.next, next, CMM_RELAXED);
444 cmm_emit_legacy_smp_mb();
445
446 return node;
447}
448
449/*
450 * __cds_wfcq_dequeue_with_state_blocking: dequeue node from queue, with state.
451 *
452 * Content written into the node before enqueue is guaranteed to be
453 * consistent, but no other memory ordering is ensured.
454 * It is valid to reuse and free a dequeued node immediately.
455 * Dequeue/splice/iteration mutual exclusion should be ensured by the
456 * caller.
457 */
458static inline struct cds_wfcq_node *
459___cds_wfcq_dequeue_with_state_blocking(cds_wfcq_head_ptr_t head,
460 struct cds_wfcq_tail *tail, int *state)
461{
462 return ___cds_wfcq_dequeue_with_state(head, tail, state, 1);
463}
464
465/*
466 * ___cds_wfcq_dequeue_blocking: dequeue node from queue.
467 *
468 * Same as __cds_wfcq_dequeue_with_state_blocking, but without saving
469 * state.
470 */
471static inline struct cds_wfcq_node *
472___cds_wfcq_dequeue_blocking(cds_wfcq_head_ptr_t head,
473 struct cds_wfcq_tail *tail)
474{
475 return ___cds_wfcq_dequeue_with_state_blocking(head, tail, NULL);
476}
477
478/*
479 * __cds_wfcq_dequeue_with_state_nonblocking: dequeue node, with state.
480 *
481 * Same as __cds_wfcq_dequeue_blocking, but returns CDS_WFCQ_WOULDBLOCK
482 * if it needs to block.
483 */
484static inline struct cds_wfcq_node *
485___cds_wfcq_dequeue_with_state_nonblocking(cds_wfcq_head_ptr_t head,
486 struct cds_wfcq_tail *tail, int *state)
487{
488 return ___cds_wfcq_dequeue_with_state(head, tail, state, 0);
489}
490
491/*
492 * ___cds_wfcq_dequeue_nonblocking: dequeue node from queue.
493 *
494 * Same as __cds_wfcq_dequeue_with_state_nonblocking, but without saving
495 * state.
496 */
497static inline struct cds_wfcq_node *
498___cds_wfcq_dequeue_nonblocking(cds_wfcq_head_ptr_t head,
499 struct cds_wfcq_tail *tail)
500{
501 return ___cds_wfcq_dequeue_with_state_nonblocking(head, tail, NULL);
502}
503
504/*
505 * __cds_wfcq_splice: enqueue all src_q nodes at the end of dest_q.
506 *
507 * Operations after splice are consistant with respect to enqueue.
508 *
509 * Dequeue all nodes from src_q.
510 * dest_q must be already initialized.
511 * Mutual exclusion for src_q should be ensured by the caller as
512 * specified in the "Synchronisation table".
513 * Returns enum cds_wfcq_ret which indicates the state of the src or
514 * dest queue.
515 */
516static inline enum cds_wfcq_ret
517___cds_wfcq_splice(
518 cds_wfcq_head_ptr_t u_dest_q_head,
519 struct cds_wfcq_tail *dest_q_tail,
520 cds_wfcq_head_ptr_t u_src_q_head,
521 struct cds_wfcq_tail *src_q_tail,
522 int blocking)
523{
524 struct __cds_wfcq_head *dest_q_head = u_dest_q_head._h;
525 struct __cds_wfcq_head *src_q_head = u_src_q_head._h;
526 struct cds_wfcq_node *head, *tail;
527 int attempt = 0;
528
529 /*
530 * Initial emptiness check to speed up cases where queue is
531 * empty: only require loads to check if queue is empty.
532 */
533 if (_cds_wfcq_empty(__cds_wfcq_head_cast(src_q_head), src_q_tail))
534 return CDS_WFCQ_RET_SRC_EMPTY;
535
536 for (;;) {
537 /*
538 * Open-coded _cds_wfcq_empty() by testing result of
539 * uatomic_xchg, as well as tail pointer vs head node
540 * address.
541 */
542 head = uatomic_xchg_mo(&src_q_head->node.next, NULL, CMM_SEQ_CST);
543 if (head)
544 break; /* non-empty */
545 if (uatomic_load(&src_q_tail->p, CMM_CONSUME) == &src_q_head->node)
546 return CDS_WFCQ_RET_SRC_EMPTY;
547 if (___cds_wfcq_busy_wait(&attempt, blocking))
548 return CDS_WFCQ_RET_WOULDBLOCK;
549 }
550
551 /*
552 * Memory barrier implied before uatomic_xchg() orders store to
553 * src_q->head before store to src_q->tail. This is required by
554 * concurrent enqueue on src_q, which exchanges the tail before
555 * updating the previous tail's next pointer.
556 */
557 cmm_emit_legacy_smp_mb();
558 tail = uatomic_xchg_mo(&src_q_tail->p, &src_q_head->node, CMM_SEQ_CST);
559
560 /*
561 * Append the spliced content of src_q into dest_q. Does not
562 * require mutual exclusion on dest_q (wait-free).
563 */
564 if (___cds_wfcq_append(__cds_wfcq_head_cast(dest_q_head), dest_q_tail,
565 head, tail))
566 return CDS_WFCQ_RET_DEST_NON_EMPTY;
567 else
568 return CDS_WFCQ_RET_DEST_EMPTY;
569}
570
571/*
572 * __cds_wfcq_splice_blocking: enqueue all src_q nodes at the end of dest_q.
573 *
574 * Dequeue all nodes from src_q.
575 * dest_q must be already initialized.
576 * Mutual exclusion for src_q should be ensured by the caller as
577 * specified in the "Synchronisation table".
578 * Returns enum cds_wfcq_ret which indicates the state of the src or
579 * dest queue. Never returns CDS_WFCQ_RET_WOULDBLOCK.
580 */
581static inline enum cds_wfcq_ret
582___cds_wfcq_splice_blocking(
583 cds_wfcq_head_ptr_t dest_q_head,
584 struct cds_wfcq_tail *dest_q_tail,
585 cds_wfcq_head_ptr_t src_q_head,
586 struct cds_wfcq_tail *src_q_tail)
587{
588 return ___cds_wfcq_splice(dest_q_head, dest_q_tail,
589 src_q_head, src_q_tail, 1);
590}
591
592/*
593 * __cds_wfcq_splice_nonblocking: enqueue all src_q nodes at the end of dest_q.
594 *
595 * Same as __cds_wfcq_splice_blocking, but returns
596 * CDS_WFCQ_RET_WOULDBLOCK if it needs to block.
597 */
598static inline enum cds_wfcq_ret
599___cds_wfcq_splice_nonblocking(
600 cds_wfcq_head_ptr_t dest_q_head,
601 struct cds_wfcq_tail *dest_q_tail,
602 cds_wfcq_head_ptr_t src_q_head,
603 struct cds_wfcq_tail *src_q_tail)
604{
605 return ___cds_wfcq_splice(dest_q_head, dest_q_tail,
606 src_q_head, src_q_tail, 0);
607}
608
609/*
610 * cds_wfcq_dequeue_with_state_blocking: dequeue a node from a wait-free queue.
611 *
612 * Content written into the node before enqueue is guaranteed to be
613 * consistent, but no other memory ordering is ensured.
614 * Mutual exclusion with cds_wfcq_splice_blocking and dequeue lock is
615 * ensured.
616 * It is valid to reuse and free a dequeued node immediately.
617 */
618static inline struct cds_wfcq_node *
619_cds_wfcq_dequeue_with_state_blocking(struct cds_wfcq_head *head,
620 struct cds_wfcq_tail *tail, int *state)
621{
622 struct cds_wfcq_node *retval;
623
624 _cds_wfcq_dequeue_lock(head, tail);
625 retval = ___cds_wfcq_dequeue_with_state_blocking(cds_wfcq_head_cast(head),
626 tail, state);
627 _cds_wfcq_dequeue_unlock(head, tail);
628 return retval;
629}
630
631/*
632 * cds_wfcq_dequeue_blocking: dequeue node from queue.
633 *
634 * Same as cds_wfcq_dequeue_blocking, but without saving state.
635 */
636static inline struct cds_wfcq_node *
637_cds_wfcq_dequeue_blocking(struct cds_wfcq_head *head,
638 struct cds_wfcq_tail *tail)
639{
640 return _cds_wfcq_dequeue_with_state_blocking(head, tail, NULL);
641}
642
643/*
644 * cds_wfcq_splice_blocking: enqueue all src_q nodes at the end of dest_q.
645 *
646 * Dequeue all nodes from src_q.
647 * dest_q must be already initialized.
648 * Content written into the node before enqueue is guaranteed to be
649 * consistent, but no other memory ordering is ensured.
650 * Mutual exclusion with cds_wfcq_dequeue_blocking and dequeue lock is
651 * ensured.
652 * Returns enum cds_wfcq_ret which indicates the state of the src or
653 * dest queue. Never returns CDS_WFCQ_RET_WOULDBLOCK.
654 */
655static inline enum cds_wfcq_ret
656_cds_wfcq_splice_blocking(
657 struct cds_wfcq_head *dest_q_head,
658 struct cds_wfcq_tail *dest_q_tail,
659 struct cds_wfcq_head *src_q_head,
660 struct cds_wfcq_tail *src_q_tail)
661{
662 enum cds_wfcq_ret ret;
663
664 _cds_wfcq_dequeue_lock(src_q_head, src_q_tail);
665 ret = ___cds_wfcq_splice_blocking(cds_wfcq_head_cast(dest_q_head), dest_q_tail,
666 cds_wfcq_head_cast(src_q_head), src_q_tail);
667 _cds_wfcq_dequeue_unlock(src_q_head, src_q_tail);
668 return ret;
669}
670
671#ifdef __cplusplus
672}
673#endif
674
675#endif /* _URCU_WFCQUEUE_STATIC_H */
This page took 0.026816 seconds and 4 git commands to generate.