wfcqueue: implement C++ API based on function overloading
[urcu.git] / include / urcu / wfcqueue.h
1 #ifndef _URCU_WFCQUEUE_H
2 #define _URCU_WFCQUEUE_H
3
4 /*
5 * urcu/wfcqueue.h
6 *
7 * Userspace RCU library - Concurrent Queue with Wait-Free Enqueue/Blocking Dequeue
8 *
9 * Copyright 2010-2012 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
10 * Copyright 2011-2012 - Lai Jiangshan <laijs@cn.fujitsu.com>
11 *
12 * This library is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU Lesser General Public
14 * License as published by the Free Software Foundation; either
15 * version 2.1 of the License, or (at your option) any later version.
16 *
17 * This library is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * Lesser General Public License for more details.
21 *
22 * You should have received a copy of the GNU Lesser General Public
23 * License along with this library; if not, write to the Free Software
24 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
25 */
26
27 #include <pthread.h>
28 #include <stdbool.h>
29 #include <urcu/compiler.h>
30 #include <urcu/arch.h>
31
32 #ifdef __cplusplus
33 extern "C" {
34 #endif
35
36 /*
37 * Concurrent queue with wait-free enqueue/blocking dequeue.
38 *
39 * This queue has been designed and implemented collaboratively by
40 * Mathieu Desnoyers and Lai Jiangshan. Inspired from
41 * half-wait-free/half-blocking queue implementation done by Paul E.
42 * McKenney.
43 */
44
45 #define CDS_WFCQ_WOULDBLOCK ((struct cds_wfcq_node *) -1UL)
46
47 enum cds_wfcq_ret {
48 CDS_WFCQ_RET_WOULDBLOCK = -1,
49 CDS_WFCQ_RET_DEST_EMPTY = 0,
50 CDS_WFCQ_RET_DEST_NON_EMPTY = 1,
51 CDS_WFCQ_RET_SRC_EMPTY = 2,
52 };
53
54 enum cds_wfcq_state {
55 CDS_WFCQ_STATE_LAST = (1U << 0),
56 };
57
58 struct cds_wfcq_node {
59 struct cds_wfcq_node *next;
60 };
61
62 /*
63 * Do not put head and tail on the same cache-line if concurrent
64 * enqueue/dequeue are expected from many CPUs. This eliminates
65 * false-sharing between enqueue and dequeue.
66 */
67 struct __cds_wfcq_head {
68 struct cds_wfcq_node node;
69 };
70
71 struct cds_wfcq_head {
72 struct cds_wfcq_node node;
73 pthread_mutex_t lock;
74 };
75
76 /*
77 * In C, the transparent union allows calling functions that work on both
78 * struct cds_wfcq_head and struct __cds_wfcq_head on any of those two
79 * types.
80 *
81 * In C++, implement static inline wrappers using function overloading
82 * to obtain an API similar to C.
83 */
84 typedef union {
85 struct __cds_wfcq_head *_h;
86 struct cds_wfcq_head *h;
87 } caa_c_transparent_union cds_wfcq_head_ptr_t;
88
89 #ifndef __cplusplus
90 /*
91 * This static inline is only present for compatibility with C++. It is
92 * effect-less in C.
93 */
94 static inline struct __cds_wfcq_head *__cds_wfcq_head_cast(struct __cds_wfcq_head *head)
95 {
96 return head;
97 }
98
99 /*
100 * This static inline is only present for compatibility with C++. It is
101 * effect-less in C.
102 */
103 static inline struct cds_wfcq_head *cds_wfcq_head_cast(struct cds_wfcq_head *head)
104 {
105 return head;
106 }
107 #else /* #ifndef __cplusplus */
108
109 /*
110 * This static inline is used by C++ function overloading. It is also
111 * used internally in the static inline implementation of the API.
112 */
113 static inline cds_wfcq_head_ptr_t __cds_wfcq_head_cast(struct __cds_wfcq_head *head)
114 {
115 cds_wfcq_head_ptr_t ret = { ._h = head };
116 return ret;
117 }
118
119 /*
120 * This static inline is used by C++ function overloading. It is also
121 * used internally in the static inline implementation of the API.
122 */
123 static inline cds_wfcq_head_ptr_t cds_wfcq_head_cast(struct cds_wfcq_head *head)
124 {
125 cds_wfcq_head_ptr_t ret = { .h = head };
126 return ret;
127 }
128 #endif /* #else #ifndef __cplusplus */
129
130 struct cds_wfcq_tail {
131 struct cds_wfcq_node *p;
132 };
133
134 #ifdef _LGPL_SOURCE
135
136 #include <urcu/static/wfcqueue.h>
137
138 #define cds_wfcq_node_init _cds_wfcq_node_init
139 #define cds_wfcq_init _cds_wfcq_init
140 #define __cds_wfcq_init ___cds_wfcq_init
141 #define cds_wfcq_destroy _cds_wfcq_destroy
142 #define cds_wfcq_empty _cds_wfcq_empty
143 #define cds_wfcq_enqueue _cds_wfcq_enqueue
144
145 /* Dequeue locking */
146 #define cds_wfcq_dequeue_lock _cds_wfcq_dequeue_lock
147 #define cds_wfcq_dequeue_unlock _cds_wfcq_dequeue_unlock
148
149 /* Locking performed within cds_wfcq calls. */
150 #define cds_wfcq_dequeue_blocking _cds_wfcq_dequeue_blocking
151 #define cds_wfcq_dequeue_with_state_blocking \
152 _cds_wfcq_dequeue_with_state_blocking
153 #define cds_wfcq_splice_blocking _cds_wfcq_splice_blocking
154 #define cds_wfcq_first_blocking _cds_wfcq_first_blocking
155 #define cds_wfcq_next_blocking _cds_wfcq_next_blocking
156
157 /* Locking ensured by caller by holding cds_wfcq_dequeue_lock() */
158 #define __cds_wfcq_dequeue_blocking ___cds_wfcq_dequeue_blocking
159 #define __cds_wfcq_dequeue_with_state_blocking \
160 ___cds_wfcq_dequeue_with_state_blocking
161 #define __cds_wfcq_splice_blocking ___cds_wfcq_splice_blocking
162 #define __cds_wfcq_first_blocking ___cds_wfcq_first_blocking
163 #define __cds_wfcq_next_blocking ___cds_wfcq_next_blocking
164
165 /*
166 * Locking ensured by caller by holding cds_wfcq_dequeue_lock().
167 * Non-blocking: deque, first, next return CDS_WFCQ_WOULDBLOCK if they
168 * need to block. splice returns nonzero if it needs to block.
169 */
170 #define __cds_wfcq_dequeue_nonblocking ___cds_wfcq_dequeue_nonblocking
171 #define __cds_wfcq_dequeue_with_state_nonblocking \
172 ___cds_wfcq_dequeue_with_state_nonblocking
173 #define __cds_wfcq_splice_nonblocking ___cds_wfcq_splice_nonblocking
174 #define __cds_wfcq_first_nonblocking ___cds_wfcq_first_nonblocking
175 #define __cds_wfcq_next_nonblocking ___cds_wfcq_next_nonblocking
176
177 #else /* !_LGPL_SOURCE */
178
179 /*
180 * Mutual exclusion of cds_wfcq_* / __cds_wfcq_* API
181 *
182 * Synchronization table:
183 *
184 * External synchronization techniques described in the API below is
185 * required between pairs marked with "X". No external synchronization
186 * required between pairs marked with "-".
187 *
188 * Legend:
189 * [1] cds_wfcq_enqueue
190 * [2] __cds_wfcq_splice (destination queue)
191 * [3] __cds_wfcq_dequeue
192 * [4] __cds_wfcq_splice (source queue)
193 * [5] __cds_wfcq_first
194 * [6] __cds_wfcq_next
195 *
196 * [1] [2] [3] [4] [5] [6]
197 * [1] - - - - - -
198 * [2] - - - - - -
199 * [3] - - X X X X
200 * [4] - - X - X X
201 * [5] - - X X - -
202 * [6] - - X X - -
203 *
204 * Mutual exclusion can be ensured by holding cds_wfcq_dequeue_lock().
205 *
206 * For convenience, cds_wfcq_dequeue_blocking() and
207 * cds_wfcq_splice_blocking() hold the dequeue lock.
208 *
209 * Besides locking, mutual exclusion of dequeue, splice and iteration
210 * can be ensured by performing all of those operations from a single
211 * thread, without requiring any lock.
212 */
213
214 /*
215 * cds_wfcq_node_init: initialize wait-free queue node.
216 */
217 extern void cds_wfcq_node_init(struct cds_wfcq_node *node);
218
219 /*
220 * cds_wfcq_init: initialize wait-free queue. Pair with
221 * cds_wfcq_destroy().
222 */
223 extern void cds_wfcq_init(struct cds_wfcq_head *head,
224 struct cds_wfcq_tail *tail);
225
226 /*
227 * cds_wfcq_destroy: destroy wait-free queue. Pair with
228 * cds_wfcq_init().
229 */
230 extern void cds_wfcq_destroy(struct cds_wfcq_head *head,
231 struct cds_wfcq_tail *tail);
232
233 /*
234 * __cds_wfcq_init: initialize wait-free queue (without lock). Don't
235 * pair with any destroy function.
236 */
237 extern void __cds_wfcq_init(struct __cds_wfcq_head *head,
238 struct cds_wfcq_tail *tail);
239
240 /*
241 * cds_wfcq_empty: return whether wait-free queue is empty.
242 *
243 * No memory barrier is issued. No mutual exclusion is required.
244 */
245 extern bool cds_wfcq_empty(cds_wfcq_head_ptr_t head,
246 struct cds_wfcq_tail *tail);
247
248 /*
249 * cds_wfcq_dequeue_lock: take the dequeue mutual exclusion lock.
250 */
251 extern void cds_wfcq_dequeue_lock(struct cds_wfcq_head *head,
252 struct cds_wfcq_tail *tail);
253
254 /*
255 * cds_wfcq_dequeue_unlock: release the dequeue mutual exclusion lock.
256 */
257 extern void cds_wfcq_dequeue_unlock(struct cds_wfcq_head *head,
258 struct cds_wfcq_tail *tail);
259
260 /*
261 * cds_wfcq_enqueue: enqueue a node into a wait-free queue.
262 *
263 * Issues a full memory barrier before enqueue. No mutual exclusion is
264 * required.
265 *
266 * Returns false if the queue was empty prior to adding the node.
267 * Returns true otherwise.
268 */
269 extern bool cds_wfcq_enqueue(cds_wfcq_head_ptr_t head,
270 struct cds_wfcq_tail *tail,
271 struct cds_wfcq_node *node);
272
273 /*
274 * cds_wfcq_dequeue_blocking: dequeue a node from a wait-free queue.
275 *
276 * Content written into the node before enqueue is guaranteed to be
277 * consistent, but no other memory ordering is ensured.
278 * It is valid to reuse and free a dequeued node immediately.
279 * Mutual exclusion with cds_wfcq_dequeue_blocking and dequeue lock is
280 * ensured.
281 */
282 extern struct cds_wfcq_node *cds_wfcq_dequeue_blocking(
283 struct cds_wfcq_head *head,
284 struct cds_wfcq_tail *tail);
285
286 /*
287 * cds_wfcq_dequeue_with_state_blocking: dequeue with state.
288 *
289 * Same as cds_wfcq_dequeue_blocking, but saves whether dequeueing the
290 * last node of the queue into state (CDS_WFCQ_STATE_LAST).
291 */
292 extern struct cds_wfcq_node *cds_wfcq_dequeue_with_state_blocking(
293 struct cds_wfcq_head *head,
294 struct cds_wfcq_tail *tail,
295 int *state);
296
297 /*
298 * cds_wfcq_splice_blocking: enqueue all src_q nodes at the end of dest_q.
299 *
300 * Dequeue all nodes from src_q.
301 * dest_q must be already initialized.
302 * Content written into the node before enqueue is guaranteed to be
303 * consistent, but no other memory ordering is ensured.
304 * Mutual exclusion with cds_wfcq_dequeue_blocking and dequeue lock is
305 * ensured.
306 *
307 * Returns enum cds_wfcq_ret which indicates the state of the src or
308 * dest queue.
309 */
310 extern enum cds_wfcq_ret cds_wfcq_splice_blocking(
311 struct cds_wfcq_head *dest_q_head,
312 struct cds_wfcq_tail *dest_q_tail,
313 struct cds_wfcq_head *src_q_head,
314 struct cds_wfcq_tail *src_q_tail);
315
316 /*
317 * __cds_wfcq_dequeue_blocking: dequeue a node from a wait-free queue.
318 *
319 * Content written into the node before enqueue is guaranteed to be
320 * consistent, but no other memory ordering is ensured.
321 * It is valid to reuse and free a dequeued node immediately.
322 * Dequeue/splice/iteration mutual exclusion should be ensured by the
323 * caller.
324 */
325 extern struct cds_wfcq_node *__cds_wfcq_dequeue_blocking(
326 cds_wfcq_head_ptr_t head,
327 struct cds_wfcq_tail *tail);
328
329 /*
330 * __cds_wfcq_dequeue_with_state_blocking: dequeue with state.
331 *
332 * Same as __cds_wfcq_dequeue_blocking, but saves whether dequeueing the
333 * last node of the queue into state (CDS_WFCQ_STATE_LAST).
334 */
335 extern struct cds_wfcq_node *__cds_wfcq_dequeue_with_state_blocking(
336 cds_wfcq_head_ptr_t head,
337 struct cds_wfcq_tail *tail,
338 int *state);
339
340 /*
341 * __cds_wfcq_dequeue_nonblocking: dequeue a node from a wait-free queue.
342 *
343 * Same as __cds_wfcq_dequeue_blocking, but returns CDS_WFCQ_WOULDBLOCK
344 * if it needs to block.
345 */
346 extern struct cds_wfcq_node *__cds_wfcq_dequeue_nonblocking(
347 cds_wfcq_head_ptr_t head,
348 struct cds_wfcq_tail *tail);
349
350 /*
351 * __cds_wfcq_dequeue_with_state_blocking: dequeue with state.
352 *
353 * Same as __cds_wfcq_dequeue_nonblocking, but saves whether dequeueing
354 * the last node of the queue into state (CDS_WFCQ_STATE_LAST).
355 */
356 extern struct cds_wfcq_node *__cds_wfcq_dequeue_with_state_nonblocking(
357 cds_wfcq_head_ptr_t head,
358 struct cds_wfcq_tail *tail,
359 int *state);
360
361 /*
362 * __cds_wfcq_splice_blocking: enqueue all src_q nodes at the end of dest_q.
363 *
364 * Dequeue all nodes from src_q.
365 * dest_q must be already initialized.
366 * Mutual exclusion for src_q should be ensured by the caller as
367 * specified in the "Synchronisation table".
368 * Returns enum cds_wfcq_ret which indicates the state of the src or
369 * dest queue. Never returns CDS_WFCQ_RET_WOULDBLOCK.
370 */
371 extern enum cds_wfcq_ret __cds_wfcq_splice_blocking(
372 cds_wfcq_head_ptr_t dest_q_head,
373 struct cds_wfcq_tail *dest_q_tail,
374 cds_wfcq_head_ptr_t src_q_head,
375 struct cds_wfcq_tail *src_q_tail);
376
377 /*
378 * __cds_wfcq_splice_nonblocking: enqueue all src_q nodes at the end of dest_q.
379 *
380 * Same as __cds_wfcq_splice_blocking, but returns
381 * CDS_WFCQ_RET_WOULDBLOCK if it needs to block.
382 */
383 extern enum cds_wfcq_ret __cds_wfcq_splice_nonblocking(
384 cds_wfcq_head_ptr_t dest_q_head,
385 struct cds_wfcq_tail *dest_q_tail,
386 cds_wfcq_head_ptr_t src_q_head,
387 struct cds_wfcq_tail *src_q_tail);
388
389 /*
390 * __cds_wfcq_first_blocking: get first node of a queue, without dequeuing.
391 *
392 * Content written into the node before enqueue is guaranteed to be
393 * consistent, but no other memory ordering is ensured.
394 * Dequeue/splice/iteration mutual exclusion should be ensured by the
395 * caller.
396 *
397 * Used by for-like iteration macros:
398 * __cds_wfcq_for_each_blocking()
399 * __cds_wfcq_for_each_blocking_safe()
400 *
401 * Returns NULL if queue is empty, first node otherwise.
402 */
403 extern struct cds_wfcq_node *__cds_wfcq_first_blocking(
404 cds_wfcq_head_ptr_t head,
405 struct cds_wfcq_tail *tail);
406
407 /*
408 * __cds_wfcq_first_nonblocking: get first node of a queue, without dequeuing.
409 *
410 * Same as __cds_wfcq_first_blocking, but returns CDS_WFCQ_WOULDBLOCK if
411 * it needs to block.
412 */
413 extern struct cds_wfcq_node *__cds_wfcq_first_nonblocking(
414 cds_wfcq_head_ptr_t head,
415 struct cds_wfcq_tail *tail);
416
417 /*
418 * __cds_wfcq_next_blocking: get next node of a queue, without dequeuing.
419 *
420 * Content written into the node before enqueue is guaranteed to be
421 * consistent, but no other memory ordering is ensured.
422 * Dequeue/splice/iteration mutual exclusion should be ensured by the
423 * caller.
424 *
425 * Used by for-like iteration macros:
426 * __cds_wfcq_for_each_blocking()
427 * __cds_wfcq_for_each_blocking_safe()
428 *
429 * Returns NULL if reached end of queue, non-NULL next queue node
430 * otherwise.
431 */
432 extern struct cds_wfcq_node *__cds_wfcq_next_blocking(
433 cds_wfcq_head_ptr_t head,
434 struct cds_wfcq_tail *tail,
435 struct cds_wfcq_node *node);
436
437 /*
438 * __cds_wfcq_next_blocking: get next node of a queue, without dequeuing.
439 *
440 * Same as __cds_wfcq_next_blocking, but returns CDS_WFCQ_WOULDBLOCK if
441 * it needs to block.
442 */
443 extern struct cds_wfcq_node *__cds_wfcq_next_nonblocking(
444 cds_wfcq_head_ptr_t head,
445 struct cds_wfcq_tail *tail,
446 struct cds_wfcq_node *node);
447
448 #endif /* !_LGPL_SOURCE */
449
450 /*
451 * __cds_wfcq_for_each_blocking: Iterate over all nodes in a queue,
452 * without dequeuing them.
453 * @head: head of the queue (struct cds_wfcq_head or __cds_wfcq_head pointer).
454 * @tail: tail of the queue (struct cds_wfcq_tail pointer).
455 * @node: iterator on the queue (struct cds_wfcq_node pointer).
456 *
457 * Content written into each node before enqueue is guaranteed to be
458 * consistent, but no other memory ordering is ensured.
459 * Dequeue/splice/iteration mutual exclusion should be ensured by the
460 * caller.
461 */
462 #define __cds_wfcq_for_each_blocking(head, tail, node) \
463 for (node = __cds_wfcq_first_blocking(head, tail); \
464 node != NULL; \
465 node = __cds_wfcq_next_blocking(head, tail, node))
466
467 /*
468 * __cds_wfcq_for_each_blocking_safe: Iterate over all nodes in a queue,
469 * without dequeuing them. Safe against deletion.
470 * @head: head of the queue (struct cds_wfcq_head or __cds_wfcq_head pointer).
471 * @tail: tail of the queue (struct cds_wfcq_tail pointer).
472 * @node: iterator on the queue (struct cds_wfcq_node pointer).
473 * @n: struct cds_wfcq_node pointer holding the next pointer (used
474 * internally).
475 *
476 * Content written into each node before enqueue is guaranteed to be
477 * consistent, but no other memory ordering is ensured.
478 * Dequeue/splice/iteration mutual exclusion should be ensured by the
479 * caller.
480 */
481 #define __cds_wfcq_for_each_blocking_safe(head, tail, node, n) \
482 for (node = __cds_wfcq_first_blocking(head, tail), \
483 n = (node ? __cds_wfcq_next_blocking(head, tail, node) : NULL); \
484 node != NULL; \
485 node = n, n = (node ? __cds_wfcq_next_blocking(head, tail, node) : NULL))
486
487 #ifdef __cplusplus
488 }
489
490 /*
491 * In C++, implement static inline wrappers using function overloading
492 * to obtain an API similar to C.
493 */
494
495 static inline bool cds_wfcq_empty(struct __cds_wfcq_head *head,
496 struct cds_wfcq_tail *tail)
497 {
498 return cds_wfcq_empty(__cds_wfcq_head_cast(head), tail);
499 }
500
501 static inline bool cds_wfcq_empty(struct cds_wfcq_head *head,
502 struct cds_wfcq_tail *tail)
503 {
504 return cds_wfcq_empty(cds_wfcq_head_cast(head), tail);
505 }
506
507 static inline bool cds_wfcq_enqueue(struct __cds_wfcq_head *head,
508 struct cds_wfcq_tail *tail,
509 struct cds_wfcq_node *node)
510 {
511 return cds_wfcq_enqueue(__cds_wfcq_head_cast(head), tail, node);
512 }
513
514 static inline bool cds_wfcq_enqueue(struct cds_wfcq_head *head,
515 struct cds_wfcq_tail *tail,
516 struct cds_wfcq_node *node)
517 {
518 return cds_wfcq_enqueue(cds_wfcq_head_cast(head), tail, node);
519 }
520
521 static inline struct cds_wfcq_node *__cds_wfcq_dequeue_blocking(
522 struct __cds_wfcq_head *head,
523 struct cds_wfcq_tail *tail)
524 {
525 return __cds_wfcq_dequeue_blocking(__cds_wfcq_head_cast(head), tail);
526 }
527
528 static inline struct cds_wfcq_node *__cds_wfcq_dequeue_blocking(
529 struct cds_wfcq_head *head,
530 struct cds_wfcq_tail *tail)
531 {
532 return __cds_wfcq_dequeue_blocking(cds_wfcq_head_cast(head), tail);
533 }
534
535 static inline struct cds_wfcq_node *__cds_wfcq_dequeue_with_state_blocking(
536 struct __cds_wfcq_head *head,
537 struct cds_wfcq_tail *tail,
538 int *state)
539 {
540 return __cds_wfcq_dequeue_with_state_blocking(__cds_wfcq_head_cast(head),
541 tail, state);
542 }
543
544 static inline struct cds_wfcq_node *__cds_wfcq_dequeue_with_state_blocking(
545 struct cds_wfcq_head *head,
546 struct cds_wfcq_tail *tail,
547 int *state)
548 {
549 return __cds_wfcq_dequeue_with_state_blocking(cds_wfcq_head_cast(head),
550 tail, state);
551 }
552
553 static inline struct cds_wfcq_node *__cds_wfcq_dequeue_nonblocking(
554 struct __cds_wfcq_head *head,
555 struct cds_wfcq_tail *tail)
556 {
557 return __cds_wfcq_dequeue_nonblocking(__cds_wfcq_head_cast(head), tail);
558 }
559
560 static inline struct cds_wfcq_node *__cds_wfcq_dequeue_nonblocking(
561 struct cds_wfcq_head *head,
562 struct cds_wfcq_tail *tail)
563 {
564 return __cds_wfcq_dequeue_nonblocking(cds_wfcq_head_cast(head), tail);
565 }
566
567 static inline struct cds_wfcq_node *__cds_wfcq_dequeue_with_state_nonblocking(
568 struct __cds_wfcq_head *head,
569 struct cds_wfcq_tail *tail,
570 int *state)
571 {
572 return __cds_wfcq_dequeue_with_state_nonblocking(__cds_wfcq_head_cast(head),
573 tail, state);
574 }
575
576 static inline struct cds_wfcq_node *__cds_wfcq_dequeue_with_state_nonblocking(
577 struct cds_wfcq_head *head,
578 struct cds_wfcq_tail *tail,
579 int *state)
580 {
581 return __cds_wfcq_dequeue_with_state_nonblocking(cds_wfcq_head_cast(head),
582 tail, state);
583 }
584
585 /* Support the power set of type combinations. */
586 static inline enum cds_wfcq_ret __cds_wfcq_splice_blocking(
587 struct __cds_wfcq_head *dest_q_head,
588 struct cds_wfcq_tail *dest_q_tail,
589 struct __cds_wfcq_head *src_q_head,
590 struct cds_wfcq_tail *src_q_tail)
591 {
592 return __cds_wfcq_splice_blocking(__cds_wfcq_head_cast(dest_q_head),
593 dest_q_tail,
594 __cds_wfcq_head_cast(src_q_head),
595 src_q_tail);
596 }
597
598 static inline enum cds_wfcq_ret __cds_wfcq_splice_blocking(
599 struct cds_wfcq_head *dest_q_head,
600 struct cds_wfcq_tail *dest_q_tail,
601 struct __cds_wfcq_head *src_q_head,
602 struct cds_wfcq_tail *src_q_tail)
603 {
604 return __cds_wfcq_splice_blocking(cds_wfcq_head_cast(dest_q_head),
605 dest_q_tail,
606 __cds_wfcq_head_cast(src_q_head),
607 src_q_tail);
608 }
609
610 static inline enum cds_wfcq_ret __cds_wfcq_splice_blocking(
611 struct __cds_wfcq_head *dest_q_head,
612 struct cds_wfcq_tail *dest_q_tail,
613 struct cds_wfcq_head *src_q_head,
614 struct cds_wfcq_tail *src_q_tail)
615 {
616 return __cds_wfcq_splice_blocking(__cds_wfcq_head_cast(dest_q_head),
617 dest_q_tail,
618 cds_wfcq_head_cast(src_q_head),
619 src_q_tail);
620 }
621
622 static inline enum cds_wfcq_ret __cds_wfcq_splice_blocking(
623 struct cds_wfcq_head *dest_q_head,
624 struct cds_wfcq_tail *dest_q_tail,
625 struct cds_wfcq_head *src_q_head,
626 struct cds_wfcq_tail *src_q_tail)
627 {
628 return __cds_wfcq_splice_blocking(cds_wfcq_head_cast(dest_q_head),
629 dest_q_tail,
630 cds_wfcq_head_cast(src_q_head),
631 src_q_tail);
632 }
633
634 /* Support the power set of type combinations. */
635 static inline enum cds_wfcq_ret __cds_wfcq_splice_nonblocking(
636 struct __cds_wfcq_head *dest_q_head,
637 struct cds_wfcq_tail *dest_q_tail,
638 struct __cds_wfcq_head *src_q_head,
639 struct cds_wfcq_tail *src_q_tail)
640 {
641 return __cds_wfcq_splice_nonblocking(__cds_wfcq_head_cast(dest_q_head),
642 dest_q_tail,
643 __cds_wfcq_head_cast(src_q_head),
644 src_q_tail);
645 }
646
647 static inline enum cds_wfcq_ret __cds_wfcq_splice_nonblocking(
648 struct cds_wfcq_head *dest_q_head,
649 struct cds_wfcq_tail *dest_q_tail,
650 struct __cds_wfcq_head *src_q_head,
651 struct cds_wfcq_tail *src_q_tail)
652 {
653 return __cds_wfcq_splice_nonblocking(cds_wfcq_head_cast(dest_q_head),
654 dest_q_tail,
655 __cds_wfcq_head_cast(src_q_head),
656 src_q_tail);
657 }
658
659 static inline enum cds_wfcq_ret __cds_wfcq_splice_nonblocking(
660 struct __cds_wfcq_head *dest_q_head,
661 struct cds_wfcq_tail *dest_q_tail,
662 struct cds_wfcq_head *src_q_head,
663 struct cds_wfcq_tail *src_q_tail)
664 {
665 return __cds_wfcq_splice_nonblocking(__cds_wfcq_head_cast(dest_q_head),
666 dest_q_tail,
667 cds_wfcq_head_cast(src_q_head),
668 src_q_tail);
669 }
670
671 static inline enum cds_wfcq_ret __cds_wfcq_splice_nonblocking(
672 struct cds_wfcq_head *dest_q_head,
673 struct cds_wfcq_tail *dest_q_tail,
674 struct cds_wfcq_head *src_q_head,
675 struct cds_wfcq_tail *src_q_tail)
676 {
677 return __cds_wfcq_splice_nonblocking(cds_wfcq_head_cast(dest_q_head),
678 dest_q_tail,
679 cds_wfcq_head_cast(src_q_head),
680 src_q_tail);
681 }
682
683 static inline struct cds_wfcq_node *__cds_wfcq_first_blocking(
684 struct __cds_wfcq_head *head,
685 struct cds_wfcq_tail *tail)
686 {
687 return __cds_wfcq_first_blocking(__cds_wfcq_head_cast(head), tail);
688 }
689
690 static inline struct cds_wfcq_node *__cds_wfcq_first_blocking(
691 struct cds_wfcq_head *head,
692 struct cds_wfcq_tail *tail)
693 {
694 return __cds_wfcq_first_blocking(cds_wfcq_head_cast(head), tail);
695 }
696
697 static inline struct cds_wfcq_node *__cds_wfcq_first_nonblocking(
698 struct __cds_wfcq_head *head,
699 struct cds_wfcq_tail *tail)
700 {
701 return __cds_wfcq_first_nonblocking(__cds_wfcq_head_cast(head), tail);
702 }
703
704 static inline struct cds_wfcq_node *__cds_wfcq_first_nonblocking(
705 struct cds_wfcq_head *head,
706 struct cds_wfcq_tail *tail)
707 {
708 return __cds_wfcq_first_nonblocking(cds_wfcq_head_cast(head), tail);
709 }
710
711 static inline struct cds_wfcq_node *__cds_wfcq_next_blocking(
712 struct __cds_wfcq_head *head,
713 struct cds_wfcq_tail *tail,
714 struct cds_wfcq_node *node)
715 {
716 return __cds_wfcq_next_blocking(__cds_wfcq_head_cast(head), tail, node);
717 }
718
719 static inline struct cds_wfcq_node *__cds_wfcq_next_blocking(
720 struct cds_wfcq_head *head,
721 struct cds_wfcq_tail *tail,
722 struct cds_wfcq_node *node)
723 {
724 return __cds_wfcq_next_blocking(cds_wfcq_head_cast(head), tail, node);
725 }
726
727 static inline struct cds_wfcq_node *__cds_wfcq_next_nonblocking(
728 struct __cds_wfcq_head *head,
729 struct cds_wfcq_tail *tail,
730 struct cds_wfcq_node *node)
731 {
732 return __cds_wfcq_next_nonblocking(__cds_wfcq_head_cast(head), tail, node);
733 }
734
735 static inline struct cds_wfcq_node *__cds_wfcq_next_nonblocking(
736 struct cds_wfcq_head *head,
737 struct cds_wfcq_tail *tail,
738 struct cds_wfcq_node *node)
739 {
740 return __cds_wfcq_next_nonblocking(cds_wfcq_head_cast(head), tail, node);
741 }
742
743 #endif
744
745 #endif /* _URCU_WFCQUEUE_H */
This page took 0.043998 seconds and 5 git commands to generate.