e96c8876a2b916dcd5bde1e1ba2ad3f8b5ff4ec8
[urcu.git] / include / urcu / static / wfstack.h
1 #ifndef _URCU_STATIC_WFSTACK_H
2 #define _URCU_STATIC_WFSTACK_H
3
4 /*
5 * urcu/static/wfstack.h
6 *
7 * Userspace RCU library - Stack with with wait-free push, blocking traversal.
8 *
9 * TO BE INCLUDED ONLY IN LGPL-COMPATIBLE CODE. See urcu/wfstack.h for
10 * linking dynamically with the userspace rcu library.
11 *
12 * Copyright 2010-2012 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
13 *
14 * This library is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU Lesser General Public
16 * License as published by the Free Software Foundation; either
17 * version 2.1 of the License, or (at your option) any later version.
18 *
19 * This library is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 * Lesser General Public License for more details.
23 *
24 * You should have received a copy of the GNU Lesser General Public
25 * License along with this library; if not, write to the Free Software
26 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 */
28
29 #include <pthread.h>
30 #include <assert.h>
31 #include <poll.h>
32 #include <stdbool.h>
33 #include <urcu/compiler.h>
34 #include <urcu/uatomic.h>
35
36 #ifdef __cplusplus
37 extern "C" {
38 #endif
39
40 #define CDS_WFS_END ((void *) 0x1UL)
41 #define CDS_WFS_ADAPT_ATTEMPTS 10 /* Retry if being set */
42 #define CDS_WFS_WAIT 10 /* Wait 10 ms if being set */
43
44 /*
45 * Stack with wait-free push, blocking traversal.
46 *
47 * Stack implementing push, pop, pop_all operations, as well as iterator
48 * on the stack head returned by pop_all.
49 *
50 * Wait-free operations: cds_wfs_push, __cds_wfs_pop_all, cds_wfs_empty,
51 * cds_wfs_first.
52 * Blocking operations: cds_wfs_pop, cds_wfs_pop_all, cds_wfs_next,
53 * iteration on stack head returned by pop_all.
54 *
55 * Synchronization table:
56 *
57 * External synchronization techniques described in the API below is
58 * required between pairs marked with "X". No external synchronization
59 * required between pairs marked with "-".
60 *
61 * cds_wfs_push __cds_wfs_pop __cds_wfs_pop_all
62 * cds_wfs_push - - -
63 * __cds_wfs_pop - X X
64 * __cds_wfs_pop_all - X -
65 *
66 * cds_wfs_pop and cds_wfs_pop_all use an internal mutex to provide
67 * synchronization.
68 */
69
70 /*
71 * cds_wfs_node_init: initialize wait-free stack node.
72 */
73 static inline
74 void _cds_wfs_node_init(struct cds_wfs_node *node)
75 {
76 node->next = NULL;
77 }
78
79 /*
80 * __cds_wfs_init: initialize wait-free stack. Don't pair with
81 * any destroy function.
82 */
83 static inline void ___cds_wfs_init(struct __cds_wfs_stack *s)
84 {
85 s->head = CDS_WFS_END;
86 }
87
88 /*
89 * cds_wfs_init: initialize wait-free stack. Pair with
90 * cds_wfs_destroy().
91 */
92 static inline
93 void _cds_wfs_init(struct cds_wfs_stack *s)
94 {
95 int ret;
96
97 s->head = CDS_WFS_END;
98 ret = pthread_mutex_init(&s->lock, NULL);
99 assert(!ret);
100 }
101
102 /*
103 * cds_wfs_destroy: destroy wait-free stack. Pair with
104 * cds_wfs_init().
105 */
106 static inline
107 void _cds_wfs_destroy(struct cds_wfs_stack *s)
108 {
109 int ret = pthread_mutex_destroy(&s->lock);
110 assert(!ret);
111 }
112
113 static inline bool ___cds_wfs_end(void *node)
114 {
115 return node == CDS_WFS_END;
116 }
117
118 /*
119 * cds_wfs_empty: return whether wait-free stack is empty.
120 *
121 * No memory barrier is issued. No mutual exclusion is required.
122 */
123 static inline bool _cds_wfs_empty(cds_wfs_stack_ptr_t u_stack)
124 {
125 struct __cds_wfs_stack *s = u_stack._s;
126
127 return ___cds_wfs_end(CMM_LOAD_SHARED(s->head));
128 }
129
130 /*
131 * cds_wfs_push: push a node into the stack.
132 *
133 * Issues a full memory barrier before push. No mutual exclusion is
134 * required.
135 *
136 * Returns 0 if the stack was empty prior to adding the node.
137 * Returns non-zero otherwise.
138 */
139 static inline
140 int _cds_wfs_push(cds_wfs_stack_ptr_t u_stack, struct cds_wfs_node *node)
141 {
142 struct __cds_wfs_stack *s = u_stack._s;
143 struct cds_wfs_head *old_head, *new_head;
144
145 assert(node->next == NULL);
146 new_head = caa_container_of(node, struct cds_wfs_head, node);
147 /*
148 * uatomic_xchg() implicit memory barrier orders earlier stores
149 * to node (setting it to NULL) before publication.
150 */
151 old_head = uatomic_xchg(&s->head, new_head);
152 /*
153 * At this point, dequeuers see a NULL node->next, they should
154 * busy-wait until node->next is set to old_head.
155 */
156 CMM_STORE_SHARED(node->next, &old_head->node);
157 return !___cds_wfs_end(old_head);
158 }
159
160 /*
161 * Waiting for push to complete enqueue and return the next node.
162 */
163 static inline struct cds_wfs_node *
164 ___cds_wfs_node_sync_next(struct cds_wfs_node *node, int blocking)
165 {
166 struct cds_wfs_node *next;
167 int attempt = 0;
168
169 /*
170 * Adaptative busy-looping waiting for push to complete.
171 */
172 while ((next = CMM_LOAD_SHARED(node->next)) == NULL) {
173 if (!blocking)
174 return CDS_WFS_WOULDBLOCK;
175 if (++attempt >= CDS_WFS_ADAPT_ATTEMPTS) {
176 (void) poll(NULL, 0, CDS_WFS_WAIT); /* Wait for 10ms */
177 attempt = 0;
178 } else {
179 caa_cpu_relax();
180 }
181 }
182
183 return next;
184 }
185
186 static inline
187 struct cds_wfs_node *
188 ___cds_wfs_pop(cds_wfs_stack_ptr_t u_stack, int *state, int blocking)
189 {
190 struct cds_wfs_head *head, *new_head;
191 struct cds_wfs_node *next;
192 struct __cds_wfs_stack *s = u_stack._s;
193
194 if (state)
195 *state = 0;
196 for (;;) {
197 head = CMM_LOAD_SHARED(s->head);
198 if (___cds_wfs_end(head)) {
199 return NULL;
200 }
201 next = ___cds_wfs_node_sync_next(&head->node, blocking);
202 if (!blocking && next == CDS_WFS_WOULDBLOCK) {
203 return CDS_WFS_WOULDBLOCK;
204 }
205 new_head = caa_container_of(next, struct cds_wfs_head, node);
206 if (uatomic_cmpxchg(&s->head, head, new_head) == head) {
207 if (state && ___cds_wfs_end(new_head))
208 *state |= CDS_WFS_STATE_LAST;
209 return &head->node;
210 }
211 if (!blocking) {
212 return CDS_WFS_WOULDBLOCK;
213 }
214 /* busy-loop if head changed under us */
215 }
216 }
217
218 /*
219 * __cds_wfs_pop_with_state_blocking: pop a node from the stack, with state.
220 *
221 * Returns NULL if stack is empty.
222 *
223 * __cds_wfs_pop_blocking needs to be synchronized using one of the
224 * following techniques:
225 *
226 * 1) Calling __cds_wfs_pop_blocking under rcu read lock critical
227 * section. The caller must wait for a grace period to pass before
228 * freeing the returned node or modifying the cds_wfs_node structure.
229 * 2) Using mutual exclusion (e.g. mutexes) to protect
230 * __cds_wfs_pop_blocking and __cds_wfs_pop_all callers.
231 * 3) Ensuring that only ONE thread can call __cds_wfs_pop_blocking()
232 * and __cds_wfs_pop_all(). (multi-provider/single-consumer scheme).
233 *
234 * "state" saves state flags atomically sampled with pop operation.
235 */
236 static inline
237 struct cds_wfs_node *
238 ___cds_wfs_pop_with_state_blocking(cds_wfs_stack_ptr_t u_stack, int *state)
239 {
240 return ___cds_wfs_pop(u_stack, state, 1);
241 }
242
243 static inline
244 struct cds_wfs_node *
245 ___cds_wfs_pop_blocking(cds_wfs_stack_ptr_t u_stack)
246 {
247 return ___cds_wfs_pop_with_state_blocking(u_stack, NULL);
248 }
249
250 /*
251 * __cds_wfs_pop_with_state_nonblocking: pop a node from the stack.
252 *
253 * Same as __cds_wfs_pop_with_state_blocking, but returns
254 * CDS_WFS_WOULDBLOCK if it needs to block.
255 *
256 * "state" saves state flags atomically sampled with pop operation.
257 */
258 static inline
259 struct cds_wfs_node *
260 ___cds_wfs_pop_with_state_nonblocking(cds_wfs_stack_ptr_t u_stack, int *state)
261 {
262 return ___cds_wfs_pop(u_stack, state, 0);
263 }
264
265 /*
266 * __cds_wfs_pop_nonblocking: pop a node from the stack.
267 *
268 * Same as __cds_wfs_pop_blocking, but returns CDS_WFS_WOULDBLOCK if
269 * it needs to block.
270 */
271 static inline
272 struct cds_wfs_node *
273 ___cds_wfs_pop_nonblocking(cds_wfs_stack_ptr_t u_stack)
274 {
275 return ___cds_wfs_pop_with_state_nonblocking(u_stack, NULL);
276 }
277
278 /*
279 * __cds_wfs_pop_all: pop all nodes from a stack.
280 *
281 * __cds_wfs_pop_all does not require any synchronization with other
282 * push, nor with other __cds_wfs_pop_all, but requires synchronization
283 * matching the technique used to synchronize __cds_wfs_pop_blocking:
284 *
285 * 1) If __cds_wfs_pop_blocking is called under rcu read lock critical
286 * section, both __cds_wfs_pop_blocking and cds_wfs_pop_all callers
287 * must wait for a grace period to pass before freeing the returned
288 * node or modifying the cds_wfs_node structure. However, no RCU
289 * read-side critical section is needed around __cds_wfs_pop_all.
290 * 2) Using mutual exclusion (e.g. mutexes) to protect
291 * __cds_wfs_pop_blocking and __cds_wfs_pop_all callers.
292 * 3) Ensuring that only ONE thread can call __cds_wfs_pop_blocking()
293 * and __cds_wfs_pop_all(). (multi-provider/single-consumer scheme).
294 */
295 static inline
296 struct cds_wfs_head *
297 ___cds_wfs_pop_all(cds_wfs_stack_ptr_t u_stack)
298 {
299 struct __cds_wfs_stack *s = u_stack._s;
300 struct cds_wfs_head *head;
301
302 /*
303 * Implicit memory barrier after uatomic_xchg() matches implicit
304 * memory barrier before uatomic_xchg() in cds_wfs_push. It
305 * ensures that all nodes of the returned list are consistent.
306 * There is no need to issue memory barriers when iterating on
307 * the returned list, because the full memory barrier issued
308 * prior to each uatomic_cmpxchg, which each write to head, are
309 * taking care to order writes to each node prior to the full
310 * memory barrier after this uatomic_xchg().
311 */
312 head = uatomic_xchg(&s->head, CDS_WFS_END);
313 if (___cds_wfs_end(head))
314 return NULL;
315 return head;
316 }
317
318 /*
319 * cds_wfs_pop_lock: lock stack pop-protection mutex.
320 */
321 static inline void _cds_wfs_pop_lock(struct cds_wfs_stack *s)
322 {
323 int ret;
324
325 ret = pthread_mutex_lock(&s->lock);
326 assert(!ret);
327 }
328
329 /*
330 * cds_wfs_pop_unlock: unlock stack pop-protection mutex.
331 */
332 static inline void _cds_wfs_pop_unlock(struct cds_wfs_stack *s)
333 {
334 int ret;
335
336 ret = pthread_mutex_unlock(&s->lock);
337 assert(!ret);
338 }
339
340 /*
341 * Call __cds_wfs_pop_with_state_blocking with an internal pop mutex held.
342 */
343 static inline
344 struct cds_wfs_node *
345 _cds_wfs_pop_with_state_blocking(struct cds_wfs_stack *s, int *state)
346 {
347 struct cds_wfs_node *retnode;
348
349 _cds_wfs_pop_lock(s);
350 retnode = ___cds_wfs_pop_with_state_blocking(s, state);
351 _cds_wfs_pop_unlock(s);
352 return retnode;
353 }
354
355 /*
356 * Call _cds_wfs_pop_with_state_blocking without saving any state.
357 */
358 static inline
359 struct cds_wfs_node *
360 _cds_wfs_pop_blocking(struct cds_wfs_stack *s)
361 {
362 return _cds_wfs_pop_with_state_blocking(s, NULL);
363 }
364
365 /*
366 * Call __cds_wfs_pop_all with an internal pop mutex held.
367 */
368 static inline
369 struct cds_wfs_head *
370 _cds_wfs_pop_all_blocking(struct cds_wfs_stack *s)
371 {
372 struct cds_wfs_head *rethead;
373
374 _cds_wfs_pop_lock(s);
375 rethead = ___cds_wfs_pop_all(s);
376 _cds_wfs_pop_unlock(s);
377 return rethead;
378 }
379
380 /*
381 * cds_wfs_first: get first node of a popped stack.
382 *
383 * Content written into the node before enqueue is guaranteed to be
384 * consistent, but no other memory ordering is ensured.
385 *
386 * Used by for-like iteration macros in urcu/wfstack.h:
387 * cds_wfs_for_each_blocking()
388 * cds_wfs_for_each_blocking_safe()
389 *
390 * Returns NULL if popped stack is empty, top stack node otherwise.
391 */
392 static inline struct cds_wfs_node *
393 _cds_wfs_first(struct cds_wfs_head *head)
394 {
395 if (___cds_wfs_end(head))
396 return NULL;
397 return &head->node;
398 }
399
400 static inline struct cds_wfs_node *
401 ___cds_wfs_next(struct cds_wfs_node *node, int blocking)
402 {
403 struct cds_wfs_node *next;
404
405 next = ___cds_wfs_node_sync_next(node, blocking);
406 /*
407 * CDS_WFS_WOULDBLOCK != CSD_WFS_END, so we can check for end
408 * even if ___cds_wfs_node_sync_next returns CDS_WFS_WOULDBLOCK,
409 * and still return CDS_WFS_WOULDBLOCK.
410 */
411 if (___cds_wfs_end(next))
412 return NULL;
413 return next;
414 }
415
416 /*
417 * cds_wfs_next_blocking: get next node of a popped stack.
418 *
419 * Content written into the node before enqueue is guaranteed to be
420 * consistent, but no other memory ordering is ensured.
421 *
422 * Used by for-like iteration macros in urcu/wfstack.h:
423 * cds_wfs_for_each_blocking()
424 * cds_wfs_for_each_blocking_safe()
425 *
426 * Returns NULL if reached end of popped stack, non-NULL next stack
427 * node otherwise.
428 */
429 static inline struct cds_wfs_node *
430 _cds_wfs_next_blocking(struct cds_wfs_node *node)
431 {
432 return ___cds_wfs_next(node, 1);
433 }
434
435
436 /*
437 * cds_wfs_next_nonblocking: get next node of a popped stack.
438 *
439 * Same as cds_wfs_next_blocking, but returns CDS_WFS_WOULDBLOCK if it
440 * needs to block.
441 */
442 static inline struct cds_wfs_node *
443 _cds_wfs_next_nonblocking(struct cds_wfs_node *node)
444 {
445 return ___cds_wfs_next(node, 0);
446 }
447
448 #ifdef __cplusplus
449 }
450 #endif
451
452 #endif /* _URCU_STATIC_WFSTACK_H */
This page took 0.036521 seconds and 3 git commands to generate.