Public headers: use SPDX identifiers
[urcu.git] / include / urcu / static / wfqueue.h
CommitLineData
d3d3857f
MJ
1// SPDX-FileCopyrightText: 2010 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
2//
3// SPDX-License-Identifier: LGPL-2.1-or-later
4
4d001e96
MD
5#ifndef _URCU_WFQUEUE_STATIC_H
6#define _URCU_WFQUEUE_STATIC_H
7
8/*
4d001e96
MD
9 * Userspace RCU library - Queue with Wait-Free Enqueue/Blocking Dequeue
10 *
11 * TO BE INCLUDED ONLY IN LGPL-COMPATIBLE CODE. See wfqueue.h for linking
12 * dynamically with the userspace rcu library.
4d001e96
MD
13 */
14
15#include <pthread.h>
b57aee66 16#include <poll.h>
01477510 17#include <urcu/assert.h>
4d001e96 18#include <urcu/compiler.h>
a2e7bf9c 19#include <urcu/uatomic.h>
4d001e96
MD
20
21#ifdef __cplusplus
22extern "C" {
23#endif
24
25/*
26 * Queue with wait-free enqueue/blocking dequeue.
27 * This implementation adds a dummy head node when the queue is empty to ensure
28 * we can always update the queue locklessly.
29 *
30 * Inspired from half-wait-free/half-blocking queue implementation done by
31 * Paul E. McKenney.
32 */
33
34#define WFQ_ADAPT_ATTEMPTS 10 /* Retry if being set */
35#define WFQ_WAIT 10 /* Wait 10 ms if being set */
36
b57aee66 37static inline void _cds_wfq_node_init(struct cds_wfq_node *node)
4d001e96
MD
38{
39 node->next = NULL;
40}
41
b57aee66 42static inline void _cds_wfq_init(struct cds_wfq_queue *q)
4d001e96
MD
43{
44 int ret;
45
16aa9ee8 46 _cds_wfq_node_init(&q->dummy);
4d001e96
MD
47 /* Set queue head and tail */
48 q->head = &q->dummy;
49 q->tail = &q->dummy.next;
50 ret = pthread_mutex_init(&q->lock, NULL);
01477510 51 urcu_posix_assert(!ret);
4d001e96
MD
52}
53
200d100e
MD
54static inline void _cds_wfq_destroy(struct cds_wfq_queue *q)
55{
56 int ret = pthread_mutex_destroy(&q->lock);
01477510 57 urcu_posix_assert(!ret);
200d100e
MD
58}
59
b57aee66
PM
60static inline void _cds_wfq_enqueue(struct cds_wfq_queue *q,
61 struct cds_wfq_node *node)
4d001e96 62{
16aa9ee8 63 struct cds_wfq_node **old_tail;
4d001e96
MD
64
65 /*
66 * uatomic_xchg() implicit memory barrier orders earlier stores to data
67 * structure containing node and setting node->next to NULL before
68 * publication.
69 */
75635e2a 70 old_tail = uatomic_xchg(&q->tail, &node->next);
4d001e96
MD
71 /*
72 * At this point, dequeuers see a NULL old_tail->next, which indicates
73 * that the queue is being appended to. The following store will append
74 * "node" to the queue from a dequeuer perspective.
75 */
6cf3827c 76 CMM_STORE_SHARED(*old_tail, node);
4d001e96
MD
77}
78
b9103f30
LJ
79/*
80 * Waiting for enqueuer to complete enqueue and return the next node
81 */
82static inline struct cds_wfq_node *
83___cds_wfq_node_sync_next(struct cds_wfq_node *node)
84{
85 struct cds_wfq_node *next;
86 int attempt = 0;
87
88 /*
89 * Adaptative busy-looping waiting for enqueuer to complete enqueue.
90 */
91 while ((next = CMM_LOAD_SHARED(node->next)) == NULL) {
92 if (++attempt >= WFQ_ADAPT_ATTEMPTS) {
124bd5c7 93 (void) poll(NULL, 0, WFQ_WAIT); /* Wait for 10ms */
b9103f30
LJ
94 attempt = 0;
95 } else
96 caa_cpu_relax();
97 }
98
99 return next;
100}
101
4d001e96
MD
102/*
103 * It is valid to reuse and free a dequeued node immediately.
104 *
105 * No need to go on a waitqueue here, as there is no possible state in which the
106 * list could cause dequeue to busy-loop needlessly while waiting for another
107 * thread to be scheduled. The queue appears empty until tail->next is set by
108 * enqueue.
109 */
b57aee66 110static inline struct cds_wfq_node *
16aa9ee8 111___cds_wfq_dequeue_blocking(struct cds_wfq_queue *q)
4d001e96 112{
16aa9ee8 113 struct cds_wfq_node *node, *next;
4d001e96
MD
114
115 /*
116 * Queue is empty if it only contains the dummy node.
117 */
6cf3827c 118 if (q->head == &q->dummy && CMM_LOAD_SHARED(q->tail) == &q->dummy.next)
4d001e96
MD
119 return NULL;
120 node = q->head;
121
b9103f30
LJ
122 next = ___cds_wfq_node_sync_next(node);
123
4d001e96
MD
124 /*
125 * Move queue head forward.
126 */
127 q->head = next;
128 /*
129 * Requeue dummy node if we just dequeued it.
130 */
131 if (node == &q->dummy) {
16aa9ee8
DG
132 _cds_wfq_node_init(node);
133 _cds_wfq_enqueue(q, node);
134 return ___cds_wfq_dequeue_blocking(q);
4d001e96
MD
135 }
136 return node;
137}
138
b57aee66 139static inline struct cds_wfq_node *
16aa9ee8 140_cds_wfq_dequeue_blocking(struct cds_wfq_queue *q)
4d001e96 141{
16aa9ee8 142 struct cds_wfq_node *retnode;
4d001e96
MD
143 int ret;
144
145 ret = pthread_mutex_lock(&q->lock);
01477510 146 urcu_posix_assert(!ret);
16aa9ee8 147 retnode = ___cds_wfq_dequeue_blocking(q);
4d001e96 148 ret = pthread_mutex_unlock(&q->lock);
01477510 149 urcu_posix_assert(!ret);
4d001e96
MD
150 return retnode;
151}
152
153#ifdef __cplusplus
154}
155#endif
156
157#endif /* _URCU_WFQUEUE_STATIC_H */
This page took 0.052827 seconds and 4 git commands to generate.