Public headers: use SPDX identifiers
[urcu.git] / include / urcu / static / wfstack.h
1 // SPDX-FileCopyrightText: 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
2 //
3 // SPDX-License-Identifier: LGPL-2.1-or-later
4
5 #ifndef _URCU_STATIC_WFSTACK_H
6 #define _URCU_STATIC_WFSTACK_H
7
8 /*
9 * Userspace RCU library - Stack with with wait-free push, blocking traversal.
10 *
11 * TO BE INCLUDED ONLY IN LGPL-COMPATIBLE CODE. See urcu/wfstack.h for
12 * linking dynamically with the userspace rcu library.
13 */
14
15 #include <pthread.h>
16 #include <poll.h>
17 #include <stdbool.h>
18 #include <urcu/assert.h>
19 #include <urcu/compiler.h>
20 #include <urcu/uatomic.h>
21
22 #ifdef __cplusplus
23 extern "C" {
24 #endif
25
26 #define CDS_WFS_END ((struct cds_wfs_head *) 0x1UL)
27 #define CDS_WFS_ADAPT_ATTEMPTS 10 /* Retry if being set */
28 #define CDS_WFS_WAIT 10 /* Wait 10 ms if being set */
29
30 /*
31 * Stack with wait-free push, blocking traversal.
32 *
33 * Stack implementing push, pop, pop_all operations, as well as iterator
34 * on the stack head returned by pop_all.
35 *
36 * Wait-free operations: cds_wfs_push, __cds_wfs_pop_all, cds_wfs_empty,
37 * cds_wfs_first.
38 * Blocking operations: cds_wfs_pop, cds_wfs_pop_all, cds_wfs_next,
39 * iteration on stack head returned by pop_all.
40 *
41 * Synchronization table:
42 *
43 * External synchronization techniques described in the API below is
44 * required between pairs marked with "X". No external synchronization
45 * required between pairs marked with "-".
46 *
47 * cds_wfs_push __cds_wfs_pop __cds_wfs_pop_all
48 * cds_wfs_push - - -
49 * __cds_wfs_pop - X X
50 * __cds_wfs_pop_all - X -
51 *
52 * cds_wfs_pop and cds_wfs_pop_all use an internal mutex to provide
53 * synchronization.
54 */
55
56 /*
57 * cds_wfs_node_init: initialize wait-free stack node.
58 */
59 static inline
60 void _cds_wfs_node_init(struct cds_wfs_node *node)
61 {
62 node->next = NULL;
63 }
64
65 /*
66 * __cds_wfs_init: initialize wait-free stack. Don't pair with
67 * any destroy function.
68 */
69 static inline void ___cds_wfs_init(struct __cds_wfs_stack *s)
70 {
71 s->head = CDS_WFS_END;
72 }
73
74 /*
75 * cds_wfs_init: initialize wait-free stack. Pair with
76 * cds_wfs_destroy().
77 */
78 static inline
79 void _cds_wfs_init(struct cds_wfs_stack *s)
80 {
81 int ret;
82
83 s->head = CDS_WFS_END;
84 ret = pthread_mutex_init(&s->lock, NULL);
85 urcu_posix_assert(!ret);
86 }
87
88 /*
89 * cds_wfs_destroy: destroy wait-free stack. Pair with
90 * cds_wfs_init().
91 */
92 static inline
93 void _cds_wfs_destroy(struct cds_wfs_stack *s)
94 {
95 int ret = pthread_mutex_destroy(&s->lock);
96 urcu_posix_assert(!ret);
97 }
98
99 static inline bool ___cds_wfs_end(void *node)
100 {
101 return node == CDS_WFS_END;
102 }
103
104 /*
105 * cds_wfs_empty: return whether wait-free stack is empty.
106 *
107 * No memory barrier is issued. No mutual exclusion is required.
108 */
109 static inline bool _cds_wfs_empty(cds_wfs_stack_ptr_t u_stack)
110 {
111 struct __cds_wfs_stack *s = u_stack._s;
112
113 return ___cds_wfs_end(CMM_LOAD_SHARED(s->head));
114 }
115
116 /*
117 * cds_wfs_push: push a node into the stack.
118 *
119 * Issues a full memory barrier before push. No mutual exclusion is
120 * required.
121 *
122 * Returns 0 if the stack was empty prior to adding the node.
123 * Returns non-zero otherwise.
124 */
125 static inline
126 int _cds_wfs_push(cds_wfs_stack_ptr_t u_stack, struct cds_wfs_node *node)
127 {
128 struct __cds_wfs_stack *s = u_stack._s;
129 struct cds_wfs_head *old_head, *new_head;
130
131 urcu_posix_assert(node->next == NULL);
132 new_head = caa_container_of(node, struct cds_wfs_head, node);
133 /*
134 * uatomic_xchg() implicit memory barrier orders earlier stores
135 * to node (setting it to NULL) before publication.
136 */
137 old_head = uatomic_xchg(&s->head, new_head);
138 /*
139 * At this point, dequeuers see a NULL node->next, they should
140 * busy-wait until node->next is set to old_head.
141 */
142 CMM_STORE_SHARED(node->next, &old_head->node);
143 return !___cds_wfs_end(old_head);
144 }
145
146 /*
147 * Waiting for push to complete enqueue and return the next node.
148 */
149 static inline struct cds_wfs_node *
150 ___cds_wfs_node_sync_next(struct cds_wfs_node *node, int blocking)
151 {
152 struct cds_wfs_node *next;
153 int attempt = 0;
154
155 /*
156 * Adaptative busy-looping waiting for push to complete.
157 */
158 while ((next = CMM_LOAD_SHARED(node->next)) == NULL) {
159 if (!blocking)
160 return CDS_WFS_WOULDBLOCK;
161 if (++attempt >= CDS_WFS_ADAPT_ATTEMPTS) {
162 (void) poll(NULL, 0, CDS_WFS_WAIT); /* Wait for 10ms */
163 attempt = 0;
164 } else {
165 caa_cpu_relax();
166 }
167 }
168
169 return next;
170 }
171
172 static inline
173 struct cds_wfs_node *
174 ___cds_wfs_pop(cds_wfs_stack_ptr_t u_stack, int *state, int blocking)
175 {
176 struct cds_wfs_head *head, *new_head;
177 struct cds_wfs_node *next;
178 struct __cds_wfs_stack *s = u_stack._s;
179
180 if (state)
181 *state = 0;
182 for (;;) {
183 head = CMM_LOAD_SHARED(s->head);
184 if (___cds_wfs_end(head)) {
185 return NULL;
186 }
187 next = ___cds_wfs_node_sync_next(&head->node, blocking);
188 if (!blocking && next == CDS_WFS_WOULDBLOCK) {
189 return CDS_WFS_WOULDBLOCK;
190 }
191 new_head = caa_container_of(next, struct cds_wfs_head, node);
192 if (uatomic_cmpxchg(&s->head, head, new_head) == head) {
193 if (state && ___cds_wfs_end(new_head))
194 *state |= CDS_WFS_STATE_LAST;
195 return &head->node;
196 }
197 if (!blocking) {
198 return CDS_WFS_WOULDBLOCK;
199 }
200 /* busy-loop if head changed under us */
201 }
202 }
203
204 /*
205 * __cds_wfs_pop_with_state_blocking: pop a node from the stack, with state.
206 *
207 * Returns NULL if stack is empty.
208 *
209 * __cds_wfs_pop_blocking needs to be synchronized using one of the
210 * following techniques:
211 *
212 * 1) Calling __cds_wfs_pop_blocking under rcu read lock critical
213 * section. The caller must wait for a grace period to pass before
214 * freeing the returned node or modifying the cds_wfs_node structure.
215 * 2) Using mutual exclusion (e.g. mutexes) to protect
216 * __cds_wfs_pop_blocking and __cds_wfs_pop_all callers.
217 * 3) Ensuring that only ONE thread can call __cds_wfs_pop_blocking()
218 * and __cds_wfs_pop_all(). (multi-provider/single-consumer scheme).
219 *
220 * "state" saves state flags atomically sampled with pop operation.
221 */
222 static inline
223 struct cds_wfs_node *
224 ___cds_wfs_pop_with_state_blocking(cds_wfs_stack_ptr_t u_stack, int *state)
225 {
226 return ___cds_wfs_pop(u_stack, state, 1);
227 }
228
229 static inline
230 struct cds_wfs_node *
231 ___cds_wfs_pop_blocking(cds_wfs_stack_ptr_t u_stack)
232 {
233 return ___cds_wfs_pop_with_state_blocking(u_stack, NULL);
234 }
235
236 /*
237 * __cds_wfs_pop_with_state_nonblocking: pop a node from the stack.
238 *
239 * Same as __cds_wfs_pop_with_state_blocking, but returns
240 * CDS_WFS_WOULDBLOCK if it needs to block.
241 *
242 * "state" saves state flags atomically sampled with pop operation.
243 */
244 static inline
245 struct cds_wfs_node *
246 ___cds_wfs_pop_with_state_nonblocking(cds_wfs_stack_ptr_t u_stack, int *state)
247 {
248 return ___cds_wfs_pop(u_stack, state, 0);
249 }
250
251 /*
252 * __cds_wfs_pop_nonblocking: pop a node from the stack.
253 *
254 * Same as __cds_wfs_pop_blocking, but returns CDS_WFS_WOULDBLOCK if
255 * it needs to block.
256 */
257 static inline
258 struct cds_wfs_node *
259 ___cds_wfs_pop_nonblocking(cds_wfs_stack_ptr_t u_stack)
260 {
261 return ___cds_wfs_pop_with_state_nonblocking(u_stack, NULL);
262 }
263
264 /*
265 * __cds_wfs_pop_all: pop all nodes from a stack.
266 *
267 * __cds_wfs_pop_all does not require any synchronization with other
268 * push, nor with other __cds_wfs_pop_all, but requires synchronization
269 * matching the technique used to synchronize __cds_wfs_pop_blocking:
270 *
271 * 1) If __cds_wfs_pop_blocking is called under rcu read lock critical
272 * section, both __cds_wfs_pop_blocking and cds_wfs_pop_all callers
273 * must wait for a grace period to pass before freeing the returned
274 * node or modifying the cds_wfs_node structure. However, no RCU
275 * read-side critical section is needed around __cds_wfs_pop_all.
276 * 2) Using mutual exclusion (e.g. mutexes) to protect
277 * __cds_wfs_pop_blocking and __cds_wfs_pop_all callers.
278 * 3) Ensuring that only ONE thread can call __cds_wfs_pop_blocking()
279 * and __cds_wfs_pop_all(). (multi-provider/single-consumer scheme).
280 */
281 static inline
282 struct cds_wfs_head *
283 ___cds_wfs_pop_all(cds_wfs_stack_ptr_t u_stack)
284 {
285 struct __cds_wfs_stack *s = u_stack._s;
286 struct cds_wfs_head *head;
287
288 /*
289 * Implicit memory barrier after uatomic_xchg() matches implicit
290 * memory barrier before uatomic_xchg() in cds_wfs_push. It
291 * ensures that all nodes of the returned list are consistent.
292 * There is no need to issue memory barriers when iterating on
293 * the returned list, because the full memory barrier issued
294 * prior to each uatomic_cmpxchg, which each write to head, are
295 * taking care to order writes to each node prior to the full
296 * memory barrier after this uatomic_xchg().
297 */
298 head = uatomic_xchg(&s->head, CDS_WFS_END);
299 if (___cds_wfs_end(head))
300 return NULL;
301 return head;
302 }
303
304 /*
305 * cds_wfs_pop_lock: lock stack pop-protection mutex.
306 */
307 static inline void _cds_wfs_pop_lock(struct cds_wfs_stack *s)
308 {
309 int ret;
310
311 ret = pthread_mutex_lock(&s->lock);
312 urcu_posix_assert(!ret);
313 }
314
315 /*
316 * cds_wfs_pop_unlock: unlock stack pop-protection mutex.
317 */
318 static inline void _cds_wfs_pop_unlock(struct cds_wfs_stack *s)
319 {
320 int ret;
321
322 ret = pthread_mutex_unlock(&s->lock);
323 urcu_posix_assert(!ret);
324 }
325
326 /*
327 * Call __cds_wfs_pop_with_state_blocking with an internal pop mutex held.
328 */
329 static inline
330 struct cds_wfs_node *
331 _cds_wfs_pop_with_state_blocking(struct cds_wfs_stack *s, int *state)
332 {
333 struct cds_wfs_node *retnode;
334 cds_wfs_stack_ptr_t stack;
335
336 _cds_wfs_pop_lock(s);
337 stack.s = s;
338 retnode = ___cds_wfs_pop_with_state_blocking(stack, state);
339 _cds_wfs_pop_unlock(s);
340 return retnode;
341 }
342
343 /*
344 * Call _cds_wfs_pop_with_state_blocking without saving any state.
345 */
346 static inline
347 struct cds_wfs_node *
348 _cds_wfs_pop_blocking(struct cds_wfs_stack *s)
349 {
350 return _cds_wfs_pop_with_state_blocking(s, NULL);
351 }
352
353 /*
354 * Call __cds_wfs_pop_all with an internal pop mutex held.
355 */
356 static inline
357 struct cds_wfs_head *
358 _cds_wfs_pop_all_blocking(struct cds_wfs_stack *s)
359 {
360 struct cds_wfs_head *rethead;
361 cds_wfs_stack_ptr_t stack;
362
363 _cds_wfs_pop_lock(s);
364 stack.s = s;
365 rethead = ___cds_wfs_pop_all(stack);
366 _cds_wfs_pop_unlock(s);
367 return rethead;
368 }
369
370 /*
371 * cds_wfs_first: get first node of a popped stack.
372 *
373 * Content written into the node before enqueue is guaranteed to be
374 * consistent, but no other memory ordering is ensured.
375 *
376 * Used by for-like iteration macros in urcu/wfstack.h:
377 * cds_wfs_for_each_blocking()
378 * cds_wfs_for_each_blocking_safe()
379 *
380 * Returns NULL if popped stack is empty, top stack node otherwise.
381 */
382 static inline struct cds_wfs_node *
383 _cds_wfs_first(struct cds_wfs_head *head)
384 {
385 if (___cds_wfs_end(head))
386 return NULL;
387 return &head->node;
388 }
389
390 static inline struct cds_wfs_node *
391 ___cds_wfs_next(struct cds_wfs_node *node, int blocking)
392 {
393 struct cds_wfs_node *next;
394
395 next = ___cds_wfs_node_sync_next(node, blocking);
396 /*
397 * CDS_WFS_WOULDBLOCK != CSD_WFS_END, so we can check for end
398 * even if ___cds_wfs_node_sync_next returns CDS_WFS_WOULDBLOCK,
399 * and still return CDS_WFS_WOULDBLOCK.
400 */
401 if (___cds_wfs_end(next))
402 return NULL;
403 return next;
404 }
405
406 /*
407 * cds_wfs_next_blocking: get next node of a popped stack.
408 *
409 * Content written into the node before enqueue is guaranteed to be
410 * consistent, but no other memory ordering is ensured.
411 *
412 * Used by for-like iteration macros in urcu/wfstack.h:
413 * cds_wfs_for_each_blocking()
414 * cds_wfs_for_each_blocking_safe()
415 *
416 * Returns NULL if reached end of popped stack, non-NULL next stack
417 * node otherwise.
418 */
419 static inline struct cds_wfs_node *
420 _cds_wfs_next_blocking(struct cds_wfs_node *node)
421 {
422 return ___cds_wfs_next(node, 1);
423 }
424
425
426 /*
427 * cds_wfs_next_nonblocking: get next node of a popped stack.
428 *
429 * Same as cds_wfs_next_blocking, but returns CDS_WFS_WOULDBLOCK if it
430 * needs to block.
431 */
432 static inline struct cds_wfs_node *
433 _cds_wfs_next_nonblocking(struct cds_wfs_node *node)
434 {
435 return ___cds_wfs_next(node, 0);
436 }
437
438 #ifdef __cplusplus
439 }
440 #endif
441
442 #endif /* _URCU_STATIC_WFSTACK_H */
This page took 0.037666 seconds and 4 git commands to generate.