Public headers: use SPDX identifiers
[urcu.git] / include / urcu / static / rculfqueue.h
1 // SPDX-FileCopyrightText: 2010 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
2 //
3 // SPDX-License-Identifier: LGPL-2.1-or-later
4
5 #ifndef _URCU_RCULFQUEUE_STATIC_H
6 #define _URCU_RCULFQUEUE_STATIC_H
7
8 /*
9 * Userspace RCU library - Lock-Free RCU Queue
10 *
11 * TO BE INCLUDED ONLY IN LGPL-COMPATIBLE CODE. See rculfqueue.h for linking
12 * dynamically with the userspace rcu library.
13 */
14
15 #include <urcu-call-rcu.h>
16 #include <urcu/assert.h>
17 #include <urcu/uatomic.h>
18 #include <urcu-pointer.h>
19 #include <errno.h>
20
21 #ifdef __cplusplus
22 extern "C" {
23 #endif
24
25 struct cds_lfq_node_rcu_dummy {
26 struct cds_lfq_node_rcu parent;
27 struct rcu_head head;
28 struct cds_lfq_queue_rcu *q;
29 };
30
31 /*
32 * Lock-free RCU queue. Enqueue and dequeue operations hold a RCU read
33 * lock to deal with cmpxchg ABA problem. This queue is *not* circular:
34 * head points to the oldest node, tail points to the newest node.
35 * A dummy node is kept to ensure enqueue and dequeue can always proceed
36 * concurrently. Keeping a separate head and tail helps with large
37 * queues: enqueue and dequeue can proceed concurrently without
38 * wrestling for exclusive access to the same variables.
39 *
40 * Dequeue retry if it detects that it would be dequeueing the last node
41 * (it means a dummy node dequeue-requeue is in progress). This ensures
42 * that there is always at least one node in the queue.
43 *
44 * In the dequeue operation, we internally reallocate the dummy node
45 * upon dequeue/requeue and use call_rcu to free the old one after a
46 * grace period.
47 */
48
49 static inline
50 struct cds_lfq_node_rcu *make_dummy(struct cds_lfq_queue_rcu *q,
51 struct cds_lfq_node_rcu *next)
52 {
53 struct cds_lfq_node_rcu_dummy *dummy;
54
55 dummy = (struct cds_lfq_node_rcu_dummy *)
56 malloc(sizeof(struct cds_lfq_node_rcu_dummy));
57 urcu_posix_assert(dummy);
58 dummy->parent.next = next;
59 dummy->parent.dummy = 1;
60 dummy->q = q;
61 return &dummy->parent;
62 }
63
64 static inline
65 void free_dummy_cb(struct rcu_head *head)
66 {
67 struct cds_lfq_node_rcu_dummy *dummy =
68 caa_container_of(head, struct cds_lfq_node_rcu_dummy, head);
69 free(dummy);
70 }
71
72 static inline
73 void rcu_free_dummy(struct cds_lfq_node_rcu *node)
74 {
75 struct cds_lfq_node_rcu_dummy *dummy;
76
77 urcu_posix_assert(node->dummy);
78 dummy = caa_container_of(node, struct cds_lfq_node_rcu_dummy, parent);
79 dummy->q->queue_call_rcu(&dummy->head, free_dummy_cb);
80 }
81
82 static inline
83 void free_dummy(struct cds_lfq_node_rcu *node)
84 {
85 struct cds_lfq_node_rcu_dummy *dummy;
86
87 urcu_posix_assert(node->dummy);
88 dummy = caa_container_of(node, struct cds_lfq_node_rcu_dummy, parent);
89 free(dummy);
90 }
91
92 static inline
93 void _cds_lfq_node_init_rcu(struct cds_lfq_node_rcu *node)
94 {
95 node->next = NULL;
96 node->dummy = 0;
97 }
98
99 static inline
100 void _cds_lfq_init_rcu(struct cds_lfq_queue_rcu *q,
101 void queue_call_rcu(struct rcu_head *head,
102 void (*func)(struct rcu_head *head)))
103 {
104 q->tail = make_dummy(q, NULL);
105 q->head = q->tail;
106 q->queue_call_rcu = queue_call_rcu;
107 }
108
109 /*
110 * The queue should be emptied before calling destroy.
111 *
112 * Return 0 on success, -EPERM if queue is not empty.
113 */
114 static inline
115 int _cds_lfq_destroy_rcu(struct cds_lfq_queue_rcu *q)
116 {
117 struct cds_lfq_node_rcu *head;
118
119 head = rcu_dereference(q->head);
120 if (!(head->dummy && head->next == NULL))
121 return -EPERM; /* not empty */
122 free_dummy(head);
123 return 0;
124 }
125
126 /*
127 * Should be called under rcu read lock critical section.
128 */
129 static inline
130 void _cds_lfq_enqueue_rcu(struct cds_lfq_queue_rcu *q,
131 struct cds_lfq_node_rcu *node)
132 {
133 /*
134 * uatomic_cmpxchg() implicit memory barrier orders earlier stores to
135 * node before publication.
136 */
137
138 for (;;) {
139 struct cds_lfq_node_rcu *tail, *next;
140
141 tail = rcu_dereference(q->tail);
142 next = uatomic_cmpxchg(&tail->next, NULL, node);
143 if (next == NULL) {
144 /*
145 * Tail was at the end of queue, we successfully
146 * appended to it. Now move tail (another
147 * enqueue might beat us to it, that's fine).
148 */
149 (void) uatomic_cmpxchg(&q->tail, tail, node);
150 return;
151 } else {
152 /*
153 * Failure to append to current tail.
154 * Help moving tail further and retry.
155 */
156 (void) uatomic_cmpxchg(&q->tail, tail, next);
157 continue;
158 }
159 }
160 }
161
162 static inline
163 void enqueue_dummy(struct cds_lfq_queue_rcu *q)
164 {
165 struct cds_lfq_node_rcu *node;
166
167 /* We need to reallocate to protect from ABA. */
168 node = make_dummy(q, NULL);
169 _cds_lfq_enqueue_rcu(q, node);
170 }
171
172 /*
173 * Should be called under rcu read lock critical section.
174 *
175 * The caller must wait for a grace period to pass before freeing the returned
176 * node or modifying the cds_lfq_node_rcu structure.
177 * Returns NULL if queue is empty.
178 */
179 static inline
180 struct cds_lfq_node_rcu *_cds_lfq_dequeue_rcu(struct cds_lfq_queue_rcu *q)
181 {
182 for (;;) {
183 struct cds_lfq_node_rcu *head, *next;
184
185 head = rcu_dereference(q->head);
186 next = rcu_dereference(head->next);
187 if (head->dummy && next == NULL)
188 return NULL; /* empty */
189 /*
190 * We never, ever allow dequeue to get to a state where
191 * the queue is empty (we need at least one node in the
192 * queue). This is ensured by checking if the head next
193 * is NULL, which means we need to enqueue a dummy node
194 * before we can hope dequeuing anything.
195 */
196 if (!next) {
197 enqueue_dummy(q);
198 next = rcu_dereference(head->next);
199 }
200 if (uatomic_cmpxchg(&q->head, head, next) != head)
201 continue; /* Concurrently pushed. */
202 if (head->dummy) {
203 /* Free dummy after grace period. */
204 rcu_free_dummy(head);
205 continue; /* try again */
206 }
207 return head;
208 }
209 }
210
211 #ifdef __cplusplus
212 }
213 #endif
214
215 #endif /* _URCU_RCULFQUEUE_STATIC_H */
This page took 0.034999 seconds and 4 git commands to generate.