Fix: add missing destroy functions to queues/stack APIs
[userspace-rcu.git] / urcu / static / wfstack.h
1 #ifndef _URCU_STATIC_WFSTACK_H
2 #define _URCU_STATIC_WFSTACK_H
3
4 /*
5 * urcu/static/wfstack.h
6 *
7 * Userspace RCU library - Stack with with wait-free push, blocking traversal.
8 *
9 * TO BE INCLUDED ONLY IN LGPL-COMPATIBLE CODE. See urcu/wfstack.h for
10 * linking dynamically with the userspace rcu library.
11 *
12 * Copyright 2010-2012 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
13 *
14 * This library is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU Lesser General Public
16 * License as published by the Free Software Foundation; either
17 * version 2.1 of the License, or (at your option) any later version.
18 *
19 * This library is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 * Lesser General Public License for more details.
23 *
24 * You should have received a copy of the GNU Lesser General Public
25 * License along with this library; if not, write to the Free Software
26 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 */
28
29 #include <pthread.h>
30 #include <assert.h>
31 #include <poll.h>
32 #include <stdbool.h>
33 #include <urcu/compiler.h>
34 #include <urcu/uatomic.h>
35
36 #ifdef __cplusplus
37 extern "C" {
38 #endif
39
40 #define CDS_WFS_END ((void *) 0x1UL)
41 #define CDS_WFS_ADAPT_ATTEMPTS 10 /* Retry if being set */
42 #define CDS_WFS_WAIT 10 /* Wait 10 ms if being set */
43
44 /*
45 * Stack with wait-free push, blocking traversal.
46 *
47 * Stack implementing push, pop, pop_all operations, as well as iterator
48 * on the stack head returned by pop_all.
49 *
50 * Wait-free operations: cds_wfs_push, __cds_wfs_pop_all, cds_wfs_empty,
51 * cds_wfs_first.
52 * Blocking operations: cds_wfs_pop, cds_wfs_pop_all, cds_wfs_next,
53 * iteration on stack head returned by pop_all.
54 *
55 * Synchronization table:
56 *
57 * External synchronization techniques described in the API below is
58 * required between pairs marked with "X". No external synchronization
59 * required between pairs marked with "-".
60 *
61 * cds_wfs_push __cds_wfs_pop __cds_wfs_pop_all
62 * cds_wfs_push - - -
63 * __cds_wfs_pop - X X
64 * __cds_wfs_pop_all - X -
65 *
66 * cds_wfs_pop and cds_wfs_pop_all use an internal mutex to provide
67 * synchronization.
68 */
69
70 /*
71 * cds_wfs_node_init: initialize wait-free stack node.
72 */
73 static inline
74 void _cds_wfs_node_init(struct cds_wfs_node *node)
75 {
76 node->next = NULL;
77 }
78
79 /*
80 * cds_wfs_init: initialize wait-free stack. Pair with
81 * cds_wfs_destroy().
82 */
83 static inline
84 void _cds_wfs_init(struct cds_wfs_stack *s)
85 {
86 int ret;
87
88 s->head = CDS_WFS_END;
89 ret = pthread_mutex_init(&s->lock, NULL);
90 assert(!ret);
91 }
92
93 /*
94 * cds_wfs_destroy: destroy wait-free stack. Pair with
95 * cds_wfs_init().
96 */
97 static inline
98 void _cds_wfs_destroy(struct cds_wfs_stack *s)
99 {
100 int ret = pthread_mutex_destroy(&s->lock);
101 assert(!ret);
102 }
103
104 static inline bool ___cds_wfs_end(void *node)
105 {
106 return node == CDS_WFS_END;
107 }
108
109 /*
110 * cds_wfs_empty: return whether wait-free stack is empty.
111 *
112 * No memory barrier is issued. No mutual exclusion is required.
113 */
114 static inline bool _cds_wfs_empty(struct cds_wfs_stack *s)
115 {
116 return ___cds_wfs_end(CMM_LOAD_SHARED(s->head));
117 }
118
119 /*
120 * cds_wfs_push: push a node into the stack.
121 *
122 * Issues a full memory barrier before push. No mutual exclusion is
123 * required.
124 *
125 * Returns 0 if the stack was empty prior to adding the node.
126 * Returns non-zero otherwise.
127 */
128 static inline
129 int _cds_wfs_push(struct cds_wfs_stack *s, struct cds_wfs_node *node)
130 {
131 struct cds_wfs_head *old_head, *new_head;
132
133 assert(node->next == NULL);
134 new_head = caa_container_of(node, struct cds_wfs_head, node);
135 /*
136 * uatomic_xchg() implicit memory barrier orders earlier stores
137 * to node (setting it to NULL) before publication.
138 */
139 old_head = uatomic_xchg(&s->head, new_head);
140 /*
141 * At this point, dequeuers see a NULL node->next, they should
142 * busy-wait until node->next is set to old_head.
143 */
144 CMM_STORE_SHARED(node->next, &old_head->node);
145 return !___cds_wfs_end(old_head);
146 }
147
148 /*
149 * Waiting for push to complete enqueue and return the next node.
150 */
151 static inline struct cds_wfs_node *
152 ___cds_wfs_node_sync_next(struct cds_wfs_node *node, int blocking)
153 {
154 struct cds_wfs_node *next;
155 int attempt = 0;
156
157 /*
158 * Adaptative busy-looping waiting for push to complete.
159 */
160 while ((next = CMM_LOAD_SHARED(node->next)) == NULL) {
161 if (!blocking)
162 return CDS_WFS_WOULDBLOCK;
163 if (++attempt >= CDS_WFS_ADAPT_ATTEMPTS) {
164 poll(NULL, 0, CDS_WFS_WAIT); /* Wait for 10ms */
165 attempt = 0;
166 } else {
167 caa_cpu_relax();
168 }
169 }
170
171 return next;
172 }
173
174 static inline
175 struct cds_wfs_node *
176 ___cds_wfs_pop(struct cds_wfs_stack *s, int *state, int blocking)
177 {
178 struct cds_wfs_head *head, *new_head;
179 struct cds_wfs_node *next;
180
181 if (state)
182 *state = 0;
183 for (;;) {
184 head = CMM_LOAD_SHARED(s->head);
185 if (___cds_wfs_end(head)) {
186 return NULL;
187 }
188 next = ___cds_wfs_node_sync_next(&head->node, blocking);
189 if (!blocking && next == CDS_WFS_WOULDBLOCK) {
190 return CDS_WFS_WOULDBLOCK;
191 }
192 new_head = caa_container_of(next, struct cds_wfs_head, node);
193 if (uatomic_cmpxchg(&s->head, head, new_head) == head) {
194 if (state && ___cds_wfs_end(new_head))
195 *state |= CDS_WFS_STATE_LAST;
196 return &head->node;
197 }
198 if (!blocking) {
199 return CDS_WFS_WOULDBLOCK;
200 }
201 /* busy-loop if head changed under us */
202 }
203 }
204
205 /*
206 * __cds_wfs_pop_with_state_blocking: pop a node from the stack, with state.
207 *
208 * Returns NULL if stack is empty.
209 *
210 * __cds_wfs_pop_blocking needs to be synchronized using one of the
211 * following techniques:
212 *
213 * 1) Calling __cds_wfs_pop_blocking under rcu read lock critical
214 * section. The caller must wait for a grace period to pass before
215 * freeing the returned node or modifying the cds_wfs_node structure.
216 * 2) Using mutual exclusion (e.g. mutexes) to protect
217 * __cds_wfs_pop_blocking and __cds_wfs_pop_all callers.
218 * 3) Ensuring that only ONE thread can call __cds_wfs_pop_blocking()
219 * and __cds_wfs_pop_all(). (multi-provider/single-consumer scheme).
220 *
221 * "state" saves state flags atomically sampled with pop operation.
222 */
223 static inline
224 struct cds_wfs_node *
225 ___cds_wfs_pop_with_state_blocking(struct cds_wfs_stack *s, int *state)
226 {
227 return ___cds_wfs_pop(s, state, 1);
228 }
229
230 static inline
231 struct cds_wfs_node *
232 ___cds_wfs_pop_blocking(struct cds_wfs_stack *s)
233 {
234 return ___cds_wfs_pop_with_state_blocking(s, NULL);
235 }
236
237 /*
238 * __cds_wfs_pop_with_state_nonblocking: pop a node from the stack.
239 *
240 * Same as __cds_wfs_pop_with_state_blocking, but returns
241 * CDS_WFS_WOULDBLOCK if it needs to block.
242 *
243 * "state" saves state flags atomically sampled with pop operation.
244 */
245 static inline
246 struct cds_wfs_node *
247 ___cds_wfs_pop_with_state_nonblocking(struct cds_wfs_stack *s, int *state)
248 {
249 return ___cds_wfs_pop(s, state, 0);
250 }
251
252 /*
253 * __cds_wfs_pop_nonblocking: pop a node from the stack.
254 *
255 * Same as __cds_wfs_pop_blocking, but returns CDS_WFS_WOULDBLOCK if
256 * it needs to block.
257 */
258 static inline
259 struct cds_wfs_node *
260 ___cds_wfs_pop_nonblocking(struct cds_wfs_stack *s)
261 {
262 return ___cds_wfs_pop_with_state_nonblocking(s, NULL);
263 }
264
265 /*
266 * __cds_wfs_pop_all: pop all nodes from a stack.
267 *
268 * __cds_wfs_pop_all does not require any synchronization with other
269 * push, nor with other __cds_wfs_pop_all, but requires synchronization
270 * matching the technique used to synchronize __cds_wfs_pop_blocking:
271 *
272 * 1) If __cds_wfs_pop_blocking is called under rcu read lock critical
273 * section, both __cds_wfs_pop_blocking and cds_wfs_pop_all callers
274 * must wait for a grace period to pass before freeing the returned
275 * node or modifying the cds_wfs_node structure. However, no RCU
276 * read-side critical section is needed around __cds_wfs_pop_all.
277 * 2) Using mutual exclusion (e.g. mutexes) to protect
278 * __cds_wfs_pop_blocking and __cds_wfs_pop_all callers.
279 * 3) Ensuring that only ONE thread can call __cds_wfs_pop_blocking()
280 * and __cds_wfs_pop_all(). (multi-provider/single-consumer scheme).
281 */
282 static inline
283 struct cds_wfs_head *
284 ___cds_wfs_pop_all(struct cds_wfs_stack *s)
285 {
286 struct cds_wfs_head *head;
287
288 /*
289 * Implicit memory barrier after uatomic_xchg() matches implicit
290 * memory barrier before uatomic_xchg() in cds_wfs_push. It
291 * ensures that all nodes of the returned list are consistent.
292 * There is no need to issue memory barriers when iterating on
293 * the returned list, because the full memory barrier issued
294 * prior to each uatomic_cmpxchg, which each write to head, are
295 * taking care to order writes to each node prior to the full
296 * memory barrier after this uatomic_xchg().
297 */
298 head = uatomic_xchg(&s->head, CDS_WFS_END);
299 if (___cds_wfs_end(head))
300 return NULL;
301 return head;
302 }
303
304 /*
305 * cds_wfs_pop_lock: lock stack pop-protection mutex.
306 */
307 static inline void _cds_wfs_pop_lock(struct cds_wfs_stack *s)
308 {
309 int ret;
310
311 ret = pthread_mutex_lock(&s->lock);
312 assert(!ret);
313 }
314
315 /*
316 * cds_wfs_pop_unlock: unlock stack pop-protection mutex.
317 */
318 static inline void _cds_wfs_pop_unlock(struct cds_wfs_stack *s)
319 {
320 int ret;
321
322 ret = pthread_mutex_unlock(&s->lock);
323 assert(!ret);
324 }
325
326 /*
327 * Call __cds_wfs_pop_with_state_blocking with an internal pop mutex held.
328 */
329 static inline
330 struct cds_wfs_node *
331 _cds_wfs_pop_with_state_blocking(struct cds_wfs_stack *s, int *state)
332 {
333 struct cds_wfs_node *retnode;
334
335 _cds_wfs_pop_lock(s);
336 retnode = ___cds_wfs_pop_with_state_blocking(s, state);
337 _cds_wfs_pop_unlock(s);
338 return retnode;
339 }
340
341 /*
342 * Call _cds_wfs_pop_with_state_blocking without saving any state.
343 */
344 static inline
345 struct cds_wfs_node *
346 _cds_wfs_pop_blocking(struct cds_wfs_stack *s)
347 {
348 return _cds_wfs_pop_with_state_blocking(s, NULL);
349 }
350
351 /*
352 * Call __cds_wfs_pop_all with an internal pop mutex held.
353 */
354 static inline
355 struct cds_wfs_head *
356 _cds_wfs_pop_all_blocking(struct cds_wfs_stack *s)
357 {
358 struct cds_wfs_head *rethead;
359
360 _cds_wfs_pop_lock(s);
361 rethead = ___cds_wfs_pop_all(s);
362 _cds_wfs_pop_unlock(s);
363 return rethead;
364 }
365
366 /*
367 * cds_wfs_first: get first node of a popped stack.
368 *
369 * Content written into the node before enqueue is guaranteed to be
370 * consistent, but no other memory ordering is ensured.
371 *
372 * Used by for-like iteration macros in urcu/wfstack.h:
373 * cds_wfs_for_each_blocking()
374 * cds_wfs_for_each_blocking_safe()
375 *
376 * Returns NULL if popped stack is empty, top stack node otherwise.
377 */
378 static inline struct cds_wfs_node *
379 _cds_wfs_first(struct cds_wfs_head *head)
380 {
381 if (___cds_wfs_end(head))
382 return NULL;
383 return &head->node;
384 }
385
386 static inline struct cds_wfs_node *
387 ___cds_wfs_next(struct cds_wfs_node *node, int blocking)
388 {
389 struct cds_wfs_node *next;
390
391 next = ___cds_wfs_node_sync_next(node, blocking);
392 /*
393 * CDS_WFS_WOULDBLOCK != CSD_WFS_END, so we can check for end
394 * even if ___cds_wfs_node_sync_next returns CDS_WFS_WOULDBLOCK,
395 * and still return CDS_WFS_WOULDBLOCK.
396 */
397 if (___cds_wfs_end(next))
398 return NULL;
399 return next;
400 }
401
402 /*
403 * cds_wfs_next_blocking: get next node of a popped stack.
404 *
405 * Content written into the node before enqueue is guaranteed to be
406 * consistent, but no other memory ordering is ensured.
407 *
408 * Used by for-like iteration macros in urcu/wfstack.h:
409 * cds_wfs_for_each_blocking()
410 * cds_wfs_for_each_blocking_safe()
411 *
412 * Returns NULL if reached end of popped stack, non-NULL next stack
413 * node otherwise.
414 */
415 static inline struct cds_wfs_node *
416 _cds_wfs_next_blocking(struct cds_wfs_node *node)
417 {
418 return ___cds_wfs_next(node, 1);
419 }
420
421
422 /*
423 * cds_wfs_next_nonblocking: get next node of a popped stack.
424 *
425 * Same as cds_wfs_next_blocking, but returns CDS_WFS_WOULDBLOCK if it
426 * needs to block.
427 */
428 static inline struct cds_wfs_node *
429 _cds_wfs_next_nonblocking(struct cds_wfs_node *node)
430 {
431 return ___cds_wfs_next(node, 0);
432 }
433
434 #ifdef __cplusplus
435 }
436 #endif
437
438 #endif /* _URCU_STATIC_WFSTACK_H */
This page took 0.037386 seconds and 4 git commands to generate.