wfstack: implement nonblocking pop and next
[urcu.git] / urcu / static / wfstack.h
1 #ifndef _URCU_STATIC_WFSTACK_H
2 #define _URCU_STATIC_WFSTACK_H
3
4 /*
5 * urcu/static/wfstack.h
6 *
7 * Userspace RCU library - Stack with with wait-free push, blocking traversal.
8 *
9 * TO BE INCLUDED ONLY IN LGPL-COMPATIBLE CODE. See urcu/wfstack.h for
10 * linking dynamically with the userspace rcu library.
11 *
12 * Copyright 2010-2012 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
13 *
14 * This library is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU Lesser General Public
16 * License as published by the Free Software Foundation; either
17 * version 2.1 of the License, or (at your option) any later version.
18 *
19 * This library is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 * Lesser General Public License for more details.
23 *
24 * You should have received a copy of the GNU Lesser General Public
25 * License along with this library; if not, write to the Free Software
26 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 */
28
29 #include <pthread.h>
30 #include <assert.h>
31 #include <poll.h>
32 #include <stdbool.h>
33 #include <urcu/compiler.h>
34 #include <urcu/uatomic.h>
35
36 #ifdef __cplusplus
37 extern "C" {
38 #endif
39
40 #define CDS_WFS_END ((void *) 0x1UL)
41 #define CDS_WFS_ADAPT_ATTEMPTS 10 /* Retry if being set */
42 #define CDS_WFS_WAIT 10 /* Wait 10 ms if being set */
43
44 /*
45 * Stack with wait-free push, blocking traversal.
46 *
47 * Stack implementing push, pop, pop_all operations, as well as iterator
48 * on the stack head returned by pop_all.
49 *
50 * Wait-free operations: cds_wfs_push, __cds_wfs_pop_all, cds_wfs_empty,
51 * cds_wfs_first.
52 * Blocking operations: cds_wfs_pop, cds_wfs_pop_all, cds_wfs_next,
53 * iteration on stack head returned by pop_all.
54 *
55 * Synchronization table:
56 *
57 * External synchronization techniques described in the API below is
58 * required between pairs marked with "X". No external synchronization
59 * required between pairs marked with "-".
60 *
61 * cds_wfs_push __cds_wfs_pop __cds_wfs_pop_all
62 * cds_wfs_push - - -
63 * __cds_wfs_pop - X X
64 * __cds_wfs_pop_all - X -
65 *
66 * cds_wfs_pop and cds_wfs_pop_all use an internal mutex to provide
67 * synchronization.
68 */
69
70 /*
71 * cds_wfs_node_init: initialize wait-free stack node.
72 */
73 static inline
74 void _cds_wfs_node_init(struct cds_wfs_node *node)
75 {
76 node->next = NULL;
77 }
78
79 /*
80 * cds_wfs_init: initialize wait-free stack.
81 */
82 static inline
83 void _cds_wfs_init(struct cds_wfs_stack *s)
84 {
85 int ret;
86
87 s->head = CDS_WFS_END;
88 ret = pthread_mutex_init(&s->lock, NULL);
89 assert(!ret);
90 }
91
92 static inline bool ___cds_wfs_end(void *node)
93 {
94 return node == CDS_WFS_END;
95 }
96
97 /*
98 * cds_wfs_empty: return whether wait-free stack is empty.
99 *
100 * No memory barrier is issued. No mutual exclusion is required.
101 */
102 static inline bool _cds_wfs_empty(struct cds_wfs_stack *s)
103 {
104 return ___cds_wfs_end(CMM_LOAD_SHARED(s->head));
105 }
106
107 /*
108 * cds_wfs_push: push a node into the stack.
109 *
110 * Issues a full memory barrier before push. No mutual exclusion is
111 * required.
112 *
113 * Returns 0 if the stack was empty prior to adding the node.
114 * Returns non-zero otherwise.
115 */
116 static inline
117 int _cds_wfs_push(struct cds_wfs_stack *s, struct cds_wfs_node *node)
118 {
119 struct cds_wfs_head *old_head, *new_head;
120
121 assert(node->next == NULL);
122 new_head = caa_container_of(node, struct cds_wfs_head, node);
123 /*
124 * uatomic_xchg() implicit memory barrier orders earlier stores
125 * to node (setting it to NULL) before publication.
126 */
127 old_head = uatomic_xchg(&s->head, new_head);
128 /*
129 * At this point, dequeuers see a NULL node->next, they should
130 * busy-wait until node->next is set to old_head.
131 */
132 CMM_STORE_SHARED(node->next, &old_head->node);
133 return !___cds_wfs_end(old_head);
134 }
135
136 /*
137 * Waiting for push to complete enqueue and return the next node.
138 */
139 static inline struct cds_wfs_node *
140 ___cds_wfs_node_sync_next(struct cds_wfs_node *node, int blocking)
141 {
142 struct cds_wfs_node *next;
143 int attempt = 0;
144
145 /*
146 * Adaptative busy-looping waiting for push to complete.
147 */
148 while ((next = CMM_LOAD_SHARED(node->next)) == NULL) {
149 if (!blocking)
150 return CDS_WFS_WOULDBLOCK;
151 if (++attempt >= CDS_WFS_ADAPT_ATTEMPTS) {
152 poll(NULL, 0, CDS_WFS_WAIT); /* Wait for 10ms */
153 attempt = 0;
154 } else {
155 caa_cpu_relax();
156 }
157 }
158
159 return next;
160 }
161
162 static inline
163 struct cds_wfs_node *
164 ___cds_wfs_pop(struct cds_wfs_stack *s, int blocking)
165 {
166 struct cds_wfs_head *head, *new_head;
167 struct cds_wfs_node *next;
168
169 for (;;) {
170 head = CMM_LOAD_SHARED(s->head);
171 if (___cds_wfs_end(head))
172 return NULL;
173 next = ___cds_wfs_node_sync_next(&head->node, blocking);
174 if (!blocking && next == CDS_WFS_WOULDBLOCK)
175 return CDS_WFS_WOULDBLOCK;
176 new_head = caa_container_of(next, struct cds_wfs_head, node);
177 if (uatomic_cmpxchg(&s->head, head, new_head) == head)
178 return &head->node;
179 if (!blocking)
180 return CDS_WFS_WOULDBLOCK;
181 /* busy-loop if head changed under us */
182 }
183 }
184
185 /*
186 * __cds_wfs_pop_blocking: pop a node from the stack.
187 *
188 * Returns NULL if stack is empty.
189 *
190 * __cds_wfs_pop_blocking needs to be synchronized using one of the
191 * following techniques:
192 *
193 * 1) Calling __cds_wfs_pop_blocking under rcu read lock critical
194 * section. The caller must wait for a grace period to pass before
195 * freeing the returned node or modifying the cds_wfs_node structure.
196 * 2) Using mutual exclusion (e.g. mutexes) to protect
197 * __cds_wfs_pop_blocking and __cds_wfs_pop_all callers.
198 * 3) Ensuring that only ONE thread can call __cds_wfs_pop_blocking()
199 * and __cds_wfs_pop_all(). (multi-provider/single-consumer scheme).
200 */
201 static inline
202 struct cds_wfs_node *
203 ___cds_wfs_pop_blocking(struct cds_wfs_stack *s)
204 {
205 return ___cds_wfs_pop(s, 1);
206 }
207
208 /*
209 * __cds_wfs_pop_nonblocking: pop a node from the stack.
210 *
211 * Same as __cds_wfs_pop_blocking, but returns CDS_WFS_WOULDBLOCK if
212 * it needs to block.
213 */
214 static inline
215 struct cds_wfs_node *
216 ___cds_wfs_pop_nonblocking(struct cds_wfs_stack *s)
217 {
218 return ___cds_wfs_pop(s, 0);
219 }
220
221 /*
222 * __cds_wfs_pop_all: pop all nodes from a stack.
223 *
224 * __cds_wfs_pop_all does not require any synchronization with other
225 * push, nor with other __cds_wfs_pop_all, but requires synchronization
226 * matching the technique used to synchronize __cds_wfs_pop_blocking:
227 *
228 * 1) If __cds_wfs_pop_blocking is called under rcu read lock critical
229 * section, both __cds_wfs_pop_blocking and cds_wfs_pop_all callers
230 * must wait for a grace period to pass before freeing the returned
231 * node or modifying the cds_wfs_node structure. However, no RCU
232 * read-side critical section is needed around __cds_wfs_pop_all.
233 * 2) Using mutual exclusion (e.g. mutexes) to protect
234 * __cds_wfs_pop_blocking and __cds_wfs_pop_all callers.
235 * 3) Ensuring that only ONE thread can call __cds_wfs_pop_blocking()
236 * and __cds_wfs_pop_all(). (multi-provider/single-consumer scheme).
237 */
238 static inline
239 struct cds_wfs_head *
240 ___cds_wfs_pop_all(struct cds_wfs_stack *s)
241 {
242 struct cds_wfs_head *head;
243
244 /*
245 * Implicit memory barrier after uatomic_xchg() matches implicit
246 * memory barrier before uatomic_xchg() in cds_wfs_push. It
247 * ensures that all nodes of the returned list are consistent.
248 * There is no need to issue memory barriers when iterating on
249 * the returned list, because the full memory barrier issued
250 * prior to each uatomic_cmpxchg, which each write to head, are
251 * taking care to order writes to each node prior to the full
252 * memory barrier after this uatomic_xchg().
253 */
254 head = uatomic_xchg(&s->head, CDS_WFS_END);
255 if (___cds_wfs_end(head))
256 return NULL;
257 return head;
258 }
259
260 /*
261 * cds_wfs_pop_lock: lock stack pop-protection mutex.
262 */
263 static inline void _cds_wfs_pop_lock(struct cds_wfs_stack *s)
264 {
265 int ret;
266
267 ret = pthread_mutex_lock(&s->lock);
268 assert(!ret);
269 }
270
271 /*
272 * cds_wfs_pop_unlock: unlock stack pop-protection mutex.
273 */
274 static inline void _cds_wfs_pop_unlock(struct cds_wfs_stack *s)
275 {
276 int ret;
277
278 ret = pthread_mutex_unlock(&s->lock);
279 assert(!ret);
280 }
281
282 /*
283 * Call __cds_wfs_pop_blocking with an internal pop mutex held.
284 */
285 static inline
286 struct cds_wfs_node *
287 _cds_wfs_pop_blocking(struct cds_wfs_stack *s)
288 {
289 struct cds_wfs_node *retnode;
290
291 _cds_wfs_pop_lock(s);
292 retnode = ___cds_wfs_pop_blocking(s);
293 _cds_wfs_pop_unlock(s);
294 return retnode;
295 }
296
297 /*
298 * Call __cds_wfs_pop_all with an internal pop mutex held.
299 */
300 static inline
301 struct cds_wfs_head *
302 _cds_wfs_pop_all_blocking(struct cds_wfs_stack *s)
303 {
304 struct cds_wfs_head *rethead;
305
306 _cds_wfs_pop_lock(s);
307 rethead = ___cds_wfs_pop_all(s);
308 _cds_wfs_pop_unlock(s);
309 return rethead;
310 }
311
312 /*
313 * cds_wfs_first: get first node of a popped stack.
314 *
315 * Content written into the node before enqueue is guaranteed to be
316 * consistent, but no other memory ordering is ensured.
317 *
318 * Used by for-like iteration macros in urcu/wfstack.h:
319 * cds_wfs_for_each_blocking()
320 * cds_wfs_for_each_blocking_safe()
321 *
322 * Returns NULL if popped stack is empty, top stack node otherwise.
323 */
324 static inline struct cds_wfs_node *
325 _cds_wfs_first(struct cds_wfs_head *head)
326 {
327 if (___cds_wfs_end(head))
328 return NULL;
329 return &head->node;
330 }
331
332 static inline struct cds_wfs_node *
333 ___cds_wfs_next(struct cds_wfs_node *node, int blocking)
334 {
335 struct cds_wfs_node *next;
336
337 next = ___cds_wfs_node_sync_next(node, blocking);
338 /*
339 * CDS_WFS_WOULDBLOCK != CSD_WFS_END, so we can check for end
340 * even if ___cds_wfs_node_sync_next returns CDS_WFS_WOULDBLOCK,
341 * and still return CDS_WFS_WOULDBLOCK.
342 */
343 if (___cds_wfs_end(next))
344 return NULL;
345 return next;
346 }
347
348 /*
349 * cds_wfs_next_blocking: get next node of a popped stack.
350 *
351 * Content written into the node before enqueue is guaranteed to be
352 * consistent, but no other memory ordering is ensured.
353 *
354 * Used by for-like iteration macros in urcu/wfstack.h:
355 * cds_wfs_for_each_blocking()
356 * cds_wfs_for_each_blocking_safe()
357 *
358 * Returns NULL if reached end of popped stack, non-NULL next stack
359 * node otherwise.
360 */
361 static inline struct cds_wfs_node *
362 _cds_wfs_next_blocking(struct cds_wfs_node *node)
363 {
364 return ___cds_wfs_next(node, 1);
365 }
366
367
368 /*
369 * cds_wfs_next_nonblocking: get next node of a popped stack.
370 *
371 * Same as cds_wfs_next_blocking, but returns CDS_WFS_WOULDBLOCK if it
372 * needs to block.
373 */
374 static inline struct cds_wfs_node *
375 _cds_wfs_next_nonblocking(struct cds_wfs_node *node)
376 {
377 return ___cds_wfs_next(node, 0);
378 }
379
380 #ifdef __cplusplus
381 }
382 #endif
383
384 #endif /* _URCU_STATIC_WFSTACK_H */
This page took 0.037511 seconds and 5 git commands to generate.