Cleanup: cast poll delay return value to void
[urcu.git] / urcu / static / wfstack.h
CommitLineData
edac6b69
MD
1#ifndef _URCU_STATIC_WFSTACK_H
2#define _URCU_STATIC_WFSTACK_H
294d3396
MD
3
4/*
edac6b69 5 * urcu/static/wfstack.h
294d3396 6 *
edac6b69 7 * Userspace RCU library - Stack with with wait-free push, blocking traversal.
294d3396 8 *
07c2a4fd
MD
9 * TO BE INCLUDED ONLY IN LGPL-COMPATIBLE CODE. See urcu/wfstack.h for
10 * linking dynamically with the userspace rcu library.
294d3396 11 *
a03a0f42 12 * Copyright 2010-2012 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
294d3396
MD
13 *
14 * This library is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU Lesser General Public
16 * License as published by the Free Software Foundation; either
17 * version 2.1 of the License, or (at your option) any later version.
18 *
19 * This library is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 * Lesser General Public License for more details.
23 *
24 * You should have received a copy of the GNU Lesser General Public
25 * License along with this library; if not, write to the Free Software
26 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 */
28
29#include <pthread.h>
30#include <assert.h>
b57aee66 31#include <poll.h>
edac6b69 32#include <stdbool.h>
294d3396 33#include <urcu/compiler.h>
a2e7bf9c 34#include <urcu/uatomic.h>
294d3396
MD
35
36#ifdef __cplusplus
37extern "C" {
38#endif
39
edac6b69 40#define CDS_WFS_END ((void *) 0x1UL)
16aa9ee8
DG
41#define CDS_WFS_ADAPT_ATTEMPTS 10 /* Retry if being set */
42#define CDS_WFS_WAIT 10 /* Wait 10 ms if being set */
294d3396 43
edac6b69
MD
44/*
45 * Stack with wait-free push, blocking traversal.
46 *
47 * Stack implementing push, pop, pop_all operations, as well as iterator
48 * on the stack head returned by pop_all.
49 *
c97c6ce5
MD
50 * Wait-free operations: cds_wfs_push, __cds_wfs_pop_all, cds_wfs_empty,
51 * cds_wfs_first.
52 * Blocking operations: cds_wfs_pop, cds_wfs_pop_all, cds_wfs_next,
53 * iteration on stack head returned by pop_all.
edac6b69
MD
54 *
55 * Synchronization table:
56 *
57 * External synchronization techniques described in the API below is
58 * required between pairs marked with "X". No external synchronization
59 * required between pairs marked with "-".
60 *
61 * cds_wfs_push __cds_wfs_pop __cds_wfs_pop_all
62 * cds_wfs_push - - -
63 * __cds_wfs_pop - X X
64 * __cds_wfs_pop_all - X -
65 *
66 * cds_wfs_pop and cds_wfs_pop_all use an internal mutex to provide
67 * synchronization.
68 */
69
70/*
71 * cds_wfs_node_init: initialize wait-free stack node.
72 */
756a0322 73static inline
16aa9ee8 74void _cds_wfs_node_init(struct cds_wfs_node *node)
294d3396
MD
75{
76 node->next = NULL;
77}
78
718eb63e
EW
79/*
80 * __cds_wfs_init: initialize wait-free stack.
81 */
82static inline void ___cds_wfs_init(struct __cds_wfs_stack *s)
83{
84 s->head = CDS_WFS_END;
85}
86
edac6b69
MD
87/*
88 * cds_wfs_init: initialize wait-free stack.
89 */
756a0322 90static inline
16aa9ee8 91void _cds_wfs_init(struct cds_wfs_stack *s)
294d3396
MD
92{
93 int ret;
94
edac6b69 95 s->head = CDS_WFS_END;
294d3396
MD
96 ret = pthread_mutex_init(&s->lock, NULL);
97 assert(!ret);
98}
99
edac6b69
MD
100static inline bool ___cds_wfs_end(void *node)
101{
102 return node == CDS_WFS_END;
103}
104
191098fc 105/*
edac6b69
MD
106 * cds_wfs_empty: return whether wait-free stack is empty.
107 *
108 * No memory barrier is issued. No mutual exclusion is required.
109 */
718eb63e 110static inline bool _cds_wfs_empty(cds_wfs_stack_ptr_t u_stack)
edac6b69 111{
718eb63e
EW
112 struct __cds_wfs_stack *s = u_stack._s;
113
edac6b69
MD
114 return ___cds_wfs_end(CMM_LOAD_SHARED(s->head));
115}
116
117/*
118 * cds_wfs_push: push a node into the stack.
119 *
120 * Issues a full memory barrier before push. No mutual exclusion is
121 * required.
122 *
123 * Returns 0 if the stack was empty prior to adding the node.
124 * Returns non-zero otherwise.
191098fc 125 */
756a0322 126static inline
718eb63e 127int _cds_wfs_push(cds_wfs_stack_ptr_t u_stack, struct cds_wfs_node *node)
294d3396 128{
718eb63e 129 struct __cds_wfs_stack *s = u_stack._s;
edac6b69 130 struct cds_wfs_head *old_head, *new_head;
294d3396
MD
131
132 assert(node->next == NULL);
edac6b69 133 new_head = caa_container_of(node, struct cds_wfs_head, node);
294d3396 134 /*
edac6b69
MD
135 * uatomic_xchg() implicit memory barrier orders earlier stores
136 * to node (setting it to NULL) before publication.
294d3396 137 */
edac6b69 138 old_head = uatomic_xchg(&s->head, new_head);
294d3396 139 /*
edac6b69
MD
140 * At this point, dequeuers see a NULL node->next, they should
141 * busy-wait until node->next is set to old_head.
294d3396 142 */
edac6b69
MD
143 CMM_STORE_SHARED(node->next, &old_head->node);
144 return !___cds_wfs_end(old_head);
294d3396
MD
145}
146
147/*
edac6b69 148 * Waiting for push to complete enqueue and return the next node.
294d3396 149 */
edac6b69 150static inline struct cds_wfs_node *
af67624d 151___cds_wfs_node_sync_next(struct cds_wfs_node *node, int blocking)
294d3396 152{
edac6b69 153 struct cds_wfs_node *next;
294d3396
MD
154 int attempt = 0;
155
294d3396
MD
156 /*
157 * Adaptative busy-looping waiting for push to complete.
158 */
edac6b69 159 while ((next = CMM_LOAD_SHARED(node->next)) == NULL) {
af67624d
MD
160 if (!blocking)
161 return CDS_WFS_WOULDBLOCK;
16aa9ee8
DG
162 if (++attempt >= CDS_WFS_ADAPT_ATTEMPTS) {
163 poll(NULL, 0, CDS_WFS_WAIT); /* Wait for 10ms */
294d3396 164 attempt = 0;
edac6b69 165 } else {
06f22bdb 166 caa_cpu_relax();
edac6b69 167 }
294d3396 168 }
edac6b69
MD
169
170 return next;
294d3396
MD
171}
172
af67624d
MD
173static inline
174struct cds_wfs_node *
711ff0f9 175___cds_wfs_pop(cds_wfs_stack_ptr_t u_stack, int *state, int blocking)
af67624d
MD
176{
177 struct cds_wfs_head *head, *new_head;
178 struct cds_wfs_node *next;
711ff0f9 179 struct __cds_wfs_stack *s = u_stack._s;
af67624d 180
c8975b94
MD
181 if (state)
182 *state = 0;
af67624d
MD
183 for (;;) {
184 head = CMM_LOAD_SHARED(s->head);
c8975b94 185 if (___cds_wfs_end(head)) {
af67624d 186 return NULL;
c8975b94 187 }
af67624d 188 next = ___cds_wfs_node_sync_next(&head->node, blocking);
c8975b94 189 if (!blocking && next == CDS_WFS_WOULDBLOCK) {
af67624d 190 return CDS_WFS_WOULDBLOCK;
c8975b94 191 }
af67624d 192 new_head = caa_container_of(next, struct cds_wfs_head, node);
c8975b94
MD
193 if (uatomic_cmpxchg(&s->head, head, new_head) == head) {
194 if (state && ___cds_wfs_end(new_head))
195 *state |= CDS_WFS_STATE_LAST;
af67624d 196 return &head->node;
c8975b94
MD
197 }
198 if (!blocking) {
af67624d 199 return CDS_WFS_WOULDBLOCK;
c8975b94 200 }
af67624d
MD
201 /* busy-loop if head changed under us */
202 }
203}
204
edac6b69 205/*
c8975b94 206 * __cds_wfs_pop_with_state_blocking: pop a node from the stack, with state.
edac6b69
MD
207 *
208 * Returns NULL if stack is empty.
209 *
210 * __cds_wfs_pop_blocking needs to be synchronized using one of the
211 * following techniques:
212 *
213 * 1) Calling __cds_wfs_pop_blocking under rcu read lock critical
214 * section. The caller must wait for a grace period to pass before
215 * freeing the returned node or modifying the cds_wfs_node structure.
216 * 2) Using mutual exclusion (e.g. mutexes) to protect
217 * __cds_wfs_pop_blocking and __cds_wfs_pop_all callers.
218 * 3) Ensuring that only ONE thread can call __cds_wfs_pop_blocking()
219 * and __cds_wfs_pop_all(). (multi-provider/single-consumer scheme).
c8975b94
MD
220 *
221 * "state" saves state flags atomically sampled with pop operation.
edac6b69 222 */
c8975b94
MD
223static inline
224struct cds_wfs_node *
711ff0f9 225___cds_wfs_pop_with_state_blocking(cds_wfs_stack_ptr_t u_stack, int *state)
c8975b94 226{
711ff0f9 227 return ___cds_wfs_pop(u_stack, state, 1);
c8975b94
MD
228}
229
756a0322 230static inline
16aa9ee8 231struct cds_wfs_node *
711ff0f9 232___cds_wfs_pop_blocking(cds_wfs_stack_ptr_t u_stack)
edac6b69 233{
711ff0f9 234 return ___cds_wfs_pop_with_state_blocking(u_stack, NULL);
c8975b94
MD
235}
236
237/*
238 * __cds_wfs_pop_with_state_nonblocking: pop a node from the stack.
239 *
240 * Same as __cds_wfs_pop_with_state_blocking, but returns
241 * CDS_WFS_WOULDBLOCK if it needs to block.
242 *
243 * "state" saves state flags atomically sampled with pop operation.
244 */
245static inline
246struct cds_wfs_node *
711ff0f9 247___cds_wfs_pop_with_state_nonblocking(cds_wfs_stack_ptr_t u_stack, int *state)
c8975b94 248{
711ff0f9 249 return ___cds_wfs_pop(u_stack, state, 0);
af67624d 250}
edac6b69 251
af67624d
MD
252/*
253 * __cds_wfs_pop_nonblocking: pop a node from the stack.
254 *
255 * Same as __cds_wfs_pop_blocking, but returns CDS_WFS_WOULDBLOCK if
256 * it needs to block.
257 */
258static inline
259struct cds_wfs_node *
711ff0f9 260___cds_wfs_pop_nonblocking(cds_wfs_stack_ptr_t u_stack)
af67624d 261{
711ff0f9 262 return ___cds_wfs_pop_with_state_nonblocking(u_stack, NULL);
edac6b69
MD
263}
264
265/*
266 * __cds_wfs_pop_all: pop all nodes from a stack.
267 *
268 * __cds_wfs_pop_all does not require any synchronization with other
269 * push, nor with other __cds_wfs_pop_all, but requires synchronization
270 * matching the technique used to synchronize __cds_wfs_pop_blocking:
271 *
272 * 1) If __cds_wfs_pop_blocking is called under rcu read lock critical
273 * section, both __cds_wfs_pop_blocking and cds_wfs_pop_all callers
274 * must wait for a grace period to pass before freeing the returned
275 * node or modifying the cds_wfs_node structure. However, no RCU
276 * read-side critical section is needed around __cds_wfs_pop_all.
277 * 2) Using mutual exclusion (e.g. mutexes) to protect
278 * __cds_wfs_pop_blocking and __cds_wfs_pop_all callers.
279 * 3) Ensuring that only ONE thread can call __cds_wfs_pop_blocking()
280 * and __cds_wfs_pop_all(). (multi-provider/single-consumer scheme).
281 */
282static inline
283struct cds_wfs_head *
718eb63e 284___cds_wfs_pop_all(cds_wfs_stack_ptr_t u_stack)
edac6b69 285{
718eb63e 286 struct __cds_wfs_stack *s = u_stack._s;
edac6b69
MD
287 struct cds_wfs_head *head;
288
289 /*
290 * Implicit memory barrier after uatomic_xchg() matches implicit
291 * memory barrier before uatomic_xchg() in cds_wfs_push. It
292 * ensures that all nodes of the returned list are consistent.
293 * There is no need to issue memory barriers when iterating on
294 * the returned list, because the full memory barrier issued
295 * prior to each uatomic_cmpxchg, which each write to head, are
296 * taking care to order writes to each node prior to the full
297 * memory barrier after this uatomic_xchg().
298 */
299 head = uatomic_xchg(&s->head, CDS_WFS_END);
300 if (___cds_wfs_end(head))
301 return NULL;
302 return head;
303}
304
305/*
306 * cds_wfs_pop_lock: lock stack pop-protection mutex.
307 */
308static inline void _cds_wfs_pop_lock(struct cds_wfs_stack *s)
294d3396 309{
294d3396
MD
310 int ret;
311
312 ret = pthread_mutex_lock(&s->lock);
313 assert(!ret);
edac6b69
MD
314}
315
316/*
317 * cds_wfs_pop_unlock: unlock stack pop-protection mutex.
318 */
319static inline void _cds_wfs_pop_unlock(struct cds_wfs_stack *s)
320{
321 int ret;
322
294d3396
MD
323 ret = pthread_mutex_unlock(&s->lock);
324 assert(!ret);
edac6b69
MD
325}
326
327/*
c8975b94 328 * Call __cds_wfs_pop_with_state_blocking with an internal pop mutex held.
edac6b69
MD
329 */
330static inline
331struct cds_wfs_node *
c8975b94 332_cds_wfs_pop_with_state_blocking(struct cds_wfs_stack *s, int *state)
edac6b69
MD
333{
334 struct cds_wfs_node *retnode;
335
336 _cds_wfs_pop_lock(s);
c8975b94 337 retnode = ___cds_wfs_pop_with_state_blocking(s, state);
edac6b69 338 _cds_wfs_pop_unlock(s);
294d3396
MD
339 return retnode;
340}
341
c8975b94
MD
342/*
343 * Call _cds_wfs_pop_with_state_blocking without saving any state.
344 */
345static inline
346struct cds_wfs_node *
347_cds_wfs_pop_blocking(struct cds_wfs_stack *s)
348{
349 return _cds_wfs_pop_with_state_blocking(s, NULL);
350}
351
edac6b69
MD
352/*
353 * Call __cds_wfs_pop_all with an internal pop mutex held.
354 */
355static inline
356struct cds_wfs_head *
357_cds_wfs_pop_all_blocking(struct cds_wfs_stack *s)
358{
359 struct cds_wfs_head *rethead;
360
361 _cds_wfs_pop_lock(s);
362 rethead = ___cds_wfs_pop_all(s);
363 _cds_wfs_pop_unlock(s);
364 return rethead;
365}
366
367/*
c7ba06ba 368 * cds_wfs_first: get first node of a popped stack.
edac6b69
MD
369 *
370 * Content written into the node before enqueue is guaranteed to be
371 * consistent, but no other memory ordering is ensured.
372 *
373 * Used by for-like iteration macros in urcu/wfstack.h:
374 * cds_wfs_for_each_blocking()
375 * cds_wfs_for_each_blocking_safe()
8af2956c
MD
376 *
377 * Returns NULL if popped stack is empty, top stack node otherwise.
edac6b69
MD
378 */
379static inline struct cds_wfs_node *
c7ba06ba 380_cds_wfs_first(struct cds_wfs_head *head)
edac6b69
MD
381{
382 if (___cds_wfs_end(head))
383 return NULL;
384 return &head->node;
385}
386
af67624d
MD
387static inline struct cds_wfs_node *
388___cds_wfs_next(struct cds_wfs_node *node, int blocking)
389{
390 struct cds_wfs_node *next;
391
392 next = ___cds_wfs_node_sync_next(node, blocking);
393 /*
394 * CDS_WFS_WOULDBLOCK != CSD_WFS_END, so we can check for end
395 * even if ___cds_wfs_node_sync_next returns CDS_WFS_WOULDBLOCK,
396 * and still return CDS_WFS_WOULDBLOCK.
397 */
398 if (___cds_wfs_end(next))
399 return NULL;
400 return next;
401}
402
edac6b69
MD
403/*
404 * cds_wfs_next_blocking: get next node of a popped stack.
405 *
406 * Content written into the node before enqueue is guaranteed to be
407 * consistent, but no other memory ordering is ensured.
408 *
409 * Used by for-like iteration macros in urcu/wfstack.h:
410 * cds_wfs_for_each_blocking()
411 * cds_wfs_for_each_blocking_safe()
8af2956c
MD
412 *
413 * Returns NULL if reached end of popped stack, non-NULL next stack
414 * node otherwise.
edac6b69
MD
415 */
416static inline struct cds_wfs_node *
417_cds_wfs_next_blocking(struct cds_wfs_node *node)
418{
af67624d
MD
419 return ___cds_wfs_next(node, 1);
420}
edac6b69 421
af67624d
MD
422
423/*
424 * cds_wfs_next_nonblocking: get next node of a popped stack.
425 *
426 * Same as cds_wfs_next_blocking, but returns CDS_WFS_WOULDBLOCK if it
427 * needs to block.
428 */
429static inline struct cds_wfs_node *
430_cds_wfs_next_nonblocking(struct cds_wfs_node *node)
431{
432 return ___cds_wfs_next(node, 0);
edac6b69
MD
433}
434
294d3396
MD
435#ifdef __cplusplus
436}
437#endif
438
edac6b69 439#endif /* _URCU_STATIC_WFSTACK_H */
This page took 0.048126 seconds and 4 git commands to generate.