Commit | Line | Data |
---|---|---|
edac6b69 MD |
1 | #ifndef _URCU_STATIC_WFSTACK_H |
2 | #define _URCU_STATIC_WFSTACK_H | |
294d3396 MD |
3 | |
4 | /* | |
edac6b69 | 5 | * urcu/static/wfstack.h |
294d3396 | 6 | * |
edac6b69 | 7 | * Userspace RCU library - Stack with with wait-free push, blocking traversal. |
294d3396 | 8 | * |
07c2a4fd MD |
9 | * TO BE INCLUDED ONLY IN LGPL-COMPATIBLE CODE. See urcu/wfstack.h for |
10 | * linking dynamically with the userspace rcu library. | |
294d3396 | 11 | * |
a03a0f42 | 12 | * Copyright 2010-2012 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com> |
294d3396 MD |
13 | * |
14 | * This library is free software; you can redistribute it and/or | |
15 | * modify it under the terms of the GNU Lesser General Public | |
16 | * License as published by the Free Software Foundation; either | |
17 | * version 2.1 of the License, or (at your option) any later version. | |
18 | * | |
19 | * This library is distributed in the hope that it will be useful, | |
20 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
21 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
22 | * Lesser General Public License for more details. | |
23 | * | |
24 | * You should have received a copy of the GNU Lesser General Public | |
25 | * License along with this library; if not, write to the Free Software | |
26 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |
27 | */ | |
28 | ||
29 | #include <pthread.h> | |
30 | #include <assert.h> | |
b57aee66 | 31 | #include <poll.h> |
edac6b69 | 32 | #include <stdbool.h> |
294d3396 | 33 | #include <urcu/compiler.h> |
a2e7bf9c | 34 | #include <urcu/uatomic.h> |
294d3396 MD |
35 | |
36 | #ifdef __cplusplus | |
37 | extern "C" { | |
38 | #endif | |
39 | ||
edac6b69 | 40 | #define CDS_WFS_END ((void *) 0x1UL) |
16aa9ee8 DG |
41 | #define CDS_WFS_ADAPT_ATTEMPTS 10 /* Retry if being set */ |
42 | #define CDS_WFS_WAIT 10 /* Wait 10 ms if being set */ | |
294d3396 | 43 | |
edac6b69 MD |
44 | /* |
45 | * Stack with wait-free push, blocking traversal. | |
46 | * | |
47 | * Stack implementing push, pop, pop_all operations, as well as iterator | |
48 | * on the stack head returned by pop_all. | |
49 | * | |
c97c6ce5 MD |
50 | * Wait-free operations: cds_wfs_push, __cds_wfs_pop_all, cds_wfs_empty, |
51 | * cds_wfs_first. | |
52 | * Blocking operations: cds_wfs_pop, cds_wfs_pop_all, cds_wfs_next, | |
53 | * iteration on stack head returned by pop_all. | |
edac6b69 MD |
54 | * |
55 | * Synchronization table: | |
56 | * | |
57 | * External synchronization techniques described in the API below is | |
58 | * required between pairs marked with "X". No external synchronization | |
59 | * required between pairs marked with "-". | |
60 | * | |
61 | * cds_wfs_push __cds_wfs_pop __cds_wfs_pop_all | |
62 | * cds_wfs_push - - - | |
63 | * __cds_wfs_pop - X X | |
64 | * __cds_wfs_pop_all - X - | |
65 | * | |
66 | * cds_wfs_pop and cds_wfs_pop_all use an internal mutex to provide | |
67 | * synchronization. | |
68 | */ | |
69 | ||
70 | /* | |
71 | * cds_wfs_node_init: initialize wait-free stack node. | |
72 | */ | |
756a0322 | 73 | static inline |
16aa9ee8 | 74 | void _cds_wfs_node_init(struct cds_wfs_node *node) |
294d3396 MD |
75 | { |
76 | node->next = NULL; | |
77 | } | |
78 | ||
edac6b69 MD |
79 | /* |
80 | * cds_wfs_init: initialize wait-free stack. | |
81 | */ | |
756a0322 | 82 | static inline |
16aa9ee8 | 83 | void _cds_wfs_init(struct cds_wfs_stack *s) |
294d3396 MD |
84 | { |
85 | int ret; | |
86 | ||
edac6b69 | 87 | s->head = CDS_WFS_END; |
294d3396 MD |
88 | ret = pthread_mutex_init(&s->lock, NULL); |
89 | assert(!ret); | |
90 | } | |
91 | ||
edac6b69 MD |
92 | static inline bool ___cds_wfs_end(void *node) |
93 | { | |
94 | return node == CDS_WFS_END; | |
95 | } | |
96 | ||
191098fc | 97 | /* |
edac6b69 MD |
98 | * cds_wfs_empty: return whether wait-free stack is empty. |
99 | * | |
100 | * No memory barrier is issued. No mutual exclusion is required. | |
101 | */ | |
102 | static inline bool _cds_wfs_empty(struct cds_wfs_stack *s) | |
103 | { | |
104 | return ___cds_wfs_end(CMM_LOAD_SHARED(s->head)); | |
105 | } | |
106 | ||
107 | /* | |
108 | * cds_wfs_push: push a node into the stack. | |
109 | * | |
110 | * Issues a full memory barrier before push. No mutual exclusion is | |
111 | * required. | |
112 | * | |
113 | * Returns 0 if the stack was empty prior to adding the node. | |
114 | * Returns non-zero otherwise. | |
191098fc | 115 | */ |
756a0322 | 116 | static inline |
191098fc | 117 | int _cds_wfs_push(struct cds_wfs_stack *s, struct cds_wfs_node *node) |
294d3396 | 118 | { |
edac6b69 | 119 | struct cds_wfs_head *old_head, *new_head; |
294d3396 MD |
120 | |
121 | assert(node->next == NULL); | |
edac6b69 | 122 | new_head = caa_container_of(node, struct cds_wfs_head, node); |
294d3396 | 123 | /* |
edac6b69 MD |
124 | * uatomic_xchg() implicit memory barrier orders earlier stores |
125 | * to node (setting it to NULL) before publication. | |
294d3396 | 126 | */ |
edac6b69 | 127 | old_head = uatomic_xchg(&s->head, new_head); |
294d3396 | 128 | /* |
edac6b69 MD |
129 | * At this point, dequeuers see a NULL node->next, they should |
130 | * busy-wait until node->next is set to old_head. | |
294d3396 | 131 | */ |
edac6b69 MD |
132 | CMM_STORE_SHARED(node->next, &old_head->node); |
133 | return !___cds_wfs_end(old_head); | |
294d3396 MD |
134 | } |
135 | ||
136 | /* | |
edac6b69 | 137 | * Waiting for push to complete enqueue and return the next node. |
294d3396 | 138 | */ |
edac6b69 | 139 | static inline struct cds_wfs_node * |
af67624d | 140 | ___cds_wfs_node_sync_next(struct cds_wfs_node *node, int blocking) |
294d3396 | 141 | { |
edac6b69 | 142 | struct cds_wfs_node *next; |
294d3396 MD |
143 | int attempt = 0; |
144 | ||
294d3396 MD |
145 | /* |
146 | * Adaptative busy-looping waiting for push to complete. | |
147 | */ | |
edac6b69 | 148 | while ((next = CMM_LOAD_SHARED(node->next)) == NULL) { |
af67624d MD |
149 | if (!blocking) |
150 | return CDS_WFS_WOULDBLOCK; | |
16aa9ee8 DG |
151 | if (++attempt >= CDS_WFS_ADAPT_ATTEMPTS) { |
152 | poll(NULL, 0, CDS_WFS_WAIT); /* Wait for 10ms */ | |
294d3396 | 153 | attempt = 0; |
edac6b69 | 154 | } else { |
06f22bdb | 155 | caa_cpu_relax(); |
edac6b69 | 156 | } |
294d3396 | 157 | } |
edac6b69 MD |
158 | |
159 | return next; | |
294d3396 MD |
160 | } |
161 | ||
af67624d MD |
162 | static inline |
163 | struct cds_wfs_node * | |
c8975b94 | 164 | ___cds_wfs_pop(struct cds_wfs_stack *s, int *state, int blocking) |
af67624d MD |
165 | { |
166 | struct cds_wfs_head *head, *new_head; | |
167 | struct cds_wfs_node *next; | |
168 | ||
c8975b94 MD |
169 | if (state) |
170 | *state = 0; | |
af67624d MD |
171 | for (;;) { |
172 | head = CMM_LOAD_SHARED(s->head); | |
c8975b94 | 173 | if (___cds_wfs_end(head)) { |
af67624d | 174 | return NULL; |
c8975b94 | 175 | } |
af67624d | 176 | next = ___cds_wfs_node_sync_next(&head->node, blocking); |
c8975b94 | 177 | if (!blocking && next == CDS_WFS_WOULDBLOCK) { |
af67624d | 178 | return CDS_WFS_WOULDBLOCK; |
c8975b94 | 179 | } |
af67624d | 180 | new_head = caa_container_of(next, struct cds_wfs_head, node); |
c8975b94 MD |
181 | if (uatomic_cmpxchg(&s->head, head, new_head) == head) { |
182 | if (state && ___cds_wfs_end(new_head)) | |
183 | *state |= CDS_WFS_STATE_LAST; | |
af67624d | 184 | return &head->node; |
c8975b94 MD |
185 | } |
186 | if (!blocking) { | |
af67624d | 187 | return CDS_WFS_WOULDBLOCK; |
c8975b94 | 188 | } |
af67624d MD |
189 | /* busy-loop if head changed under us */ |
190 | } | |
191 | } | |
192 | ||
edac6b69 | 193 | /* |
c8975b94 | 194 | * __cds_wfs_pop_with_state_blocking: pop a node from the stack, with state. |
edac6b69 MD |
195 | * |
196 | * Returns NULL if stack is empty. | |
197 | * | |
198 | * __cds_wfs_pop_blocking needs to be synchronized using one of the | |
199 | * following techniques: | |
200 | * | |
201 | * 1) Calling __cds_wfs_pop_blocking under rcu read lock critical | |
202 | * section. The caller must wait for a grace period to pass before | |
203 | * freeing the returned node or modifying the cds_wfs_node structure. | |
204 | * 2) Using mutual exclusion (e.g. mutexes) to protect | |
205 | * __cds_wfs_pop_blocking and __cds_wfs_pop_all callers. | |
206 | * 3) Ensuring that only ONE thread can call __cds_wfs_pop_blocking() | |
207 | * and __cds_wfs_pop_all(). (multi-provider/single-consumer scheme). | |
c8975b94 MD |
208 | * |
209 | * "state" saves state flags atomically sampled with pop operation. | |
edac6b69 | 210 | */ |
c8975b94 MD |
211 | static inline |
212 | struct cds_wfs_node * | |
213 | ___cds_wfs_pop_with_state_blocking(struct cds_wfs_stack *s, int *state) | |
214 | { | |
215 | return ___cds_wfs_pop(s, state, 1); | |
216 | } | |
217 | ||
756a0322 | 218 | static inline |
16aa9ee8 | 219 | struct cds_wfs_node * |
edac6b69 MD |
220 | ___cds_wfs_pop_blocking(struct cds_wfs_stack *s) |
221 | { | |
c8975b94 MD |
222 | return ___cds_wfs_pop_with_state_blocking(s, NULL); |
223 | } | |
224 | ||
225 | /* | |
226 | * __cds_wfs_pop_with_state_nonblocking: pop a node from the stack. | |
227 | * | |
228 | * Same as __cds_wfs_pop_with_state_blocking, but returns | |
229 | * CDS_WFS_WOULDBLOCK if it needs to block. | |
230 | * | |
231 | * "state" saves state flags atomically sampled with pop operation. | |
232 | */ | |
233 | static inline | |
234 | struct cds_wfs_node * | |
235 | ___cds_wfs_pop_with_state_nonblocking(struct cds_wfs_stack *s, int *state) | |
236 | { | |
237 | return ___cds_wfs_pop(s, state, 0); | |
af67624d | 238 | } |
edac6b69 | 239 | |
af67624d MD |
240 | /* |
241 | * __cds_wfs_pop_nonblocking: pop a node from the stack. | |
242 | * | |
243 | * Same as __cds_wfs_pop_blocking, but returns CDS_WFS_WOULDBLOCK if | |
244 | * it needs to block. | |
245 | */ | |
246 | static inline | |
247 | struct cds_wfs_node * | |
248 | ___cds_wfs_pop_nonblocking(struct cds_wfs_stack *s) | |
249 | { | |
c8975b94 | 250 | return ___cds_wfs_pop_with_state_nonblocking(s, NULL); |
edac6b69 MD |
251 | } |
252 | ||
253 | /* | |
254 | * __cds_wfs_pop_all: pop all nodes from a stack. | |
255 | * | |
256 | * __cds_wfs_pop_all does not require any synchronization with other | |
257 | * push, nor with other __cds_wfs_pop_all, but requires synchronization | |
258 | * matching the technique used to synchronize __cds_wfs_pop_blocking: | |
259 | * | |
260 | * 1) If __cds_wfs_pop_blocking is called under rcu read lock critical | |
261 | * section, both __cds_wfs_pop_blocking and cds_wfs_pop_all callers | |
262 | * must wait for a grace period to pass before freeing the returned | |
263 | * node or modifying the cds_wfs_node structure. However, no RCU | |
264 | * read-side critical section is needed around __cds_wfs_pop_all. | |
265 | * 2) Using mutual exclusion (e.g. mutexes) to protect | |
266 | * __cds_wfs_pop_blocking and __cds_wfs_pop_all callers. | |
267 | * 3) Ensuring that only ONE thread can call __cds_wfs_pop_blocking() | |
268 | * and __cds_wfs_pop_all(). (multi-provider/single-consumer scheme). | |
269 | */ | |
270 | static inline | |
271 | struct cds_wfs_head * | |
272 | ___cds_wfs_pop_all(struct cds_wfs_stack *s) | |
273 | { | |
274 | struct cds_wfs_head *head; | |
275 | ||
276 | /* | |
277 | * Implicit memory barrier after uatomic_xchg() matches implicit | |
278 | * memory barrier before uatomic_xchg() in cds_wfs_push. It | |
279 | * ensures that all nodes of the returned list are consistent. | |
280 | * There is no need to issue memory barriers when iterating on | |
281 | * the returned list, because the full memory barrier issued | |
282 | * prior to each uatomic_cmpxchg, which each write to head, are | |
283 | * taking care to order writes to each node prior to the full | |
284 | * memory barrier after this uatomic_xchg(). | |
285 | */ | |
286 | head = uatomic_xchg(&s->head, CDS_WFS_END); | |
287 | if (___cds_wfs_end(head)) | |
288 | return NULL; | |
289 | return head; | |
290 | } | |
291 | ||
292 | /* | |
293 | * cds_wfs_pop_lock: lock stack pop-protection mutex. | |
294 | */ | |
295 | static inline void _cds_wfs_pop_lock(struct cds_wfs_stack *s) | |
294d3396 | 296 | { |
294d3396 MD |
297 | int ret; |
298 | ||
299 | ret = pthread_mutex_lock(&s->lock); | |
300 | assert(!ret); | |
edac6b69 MD |
301 | } |
302 | ||
303 | /* | |
304 | * cds_wfs_pop_unlock: unlock stack pop-protection mutex. | |
305 | */ | |
306 | static inline void _cds_wfs_pop_unlock(struct cds_wfs_stack *s) | |
307 | { | |
308 | int ret; | |
309 | ||
294d3396 MD |
310 | ret = pthread_mutex_unlock(&s->lock); |
311 | assert(!ret); | |
edac6b69 MD |
312 | } |
313 | ||
314 | /* | |
c8975b94 | 315 | * Call __cds_wfs_pop_with_state_blocking with an internal pop mutex held. |
edac6b69 MD |
316 | */ |
317 | static inline | |
318 | struct cds_wfs_node * | |
c8975b94 | 319 | _cds_wfs_pop_with_state_blocking(struct cds_wfs_stack *s, int *state) |
edac6b69 MD |
320 | { |
321 | struct cds_wfs_node *retnode; | |
322 | ||
323 | _cds_wfs_pop_lock(s); | |
c8975b94 | 324 | retnode = ___cds_wfs_pop_with_state_blocking(s, state); |
edac6b69 | 325 | _cds_wfs_pop_unlock(s); |
294d3396 MD |
326 | return retnode; |
327 | } | |
328 | ||
c8975b94 MD |
329 | /* |
330 | * Call _cds_wfs_pop_with_state_blocking without saving any state. | |
331 | */ | |
332 | static inline | |
333 | struct cds_wfs_node * | |
334 | _cds_wfs_pop_blocking(struct cds_wfs_stack *s) | |
335 | { | |
336 | return _cds_wfs_pop_with_state_blocking(s, NULL); | |
337 | } | |
338 | ||
edac6b69 MD |
339 | /* |
340 | * Call __cds_wfs_pop_all with an internal pop mutex held. | |
341 | */ | |
342 | static inline | |
343 | struct cds_wfs_head * | |
344 | _cds_wfs_pop_all_blocking(struct cds_wfs_stack *s) | |
345 | { | |
346 | struct cds_wfs_head *rethead; | |
347 | ||
348 | _cds_wfs_pop_lock(s); | |
349 | rethead = ___cds_wfs_pop_all(s); | |
350 | _cds_wfs_pop_unlock(s); | |
351 | return rethead; | |
352 | } | |
353 | ||
354 | /* | |
c7ba06ba | 355 | * cds_wfs_first: get first node of a popped stack. |
edac6b69 MD |
356 | * |
357 | * Content written into the node before enqueue is guaranteed to be | |
358 | * consistent, but no other memory ordering is ensured. | |
359 | * | |
360 | * Used by for-like iteration macros in urcu/wfstack.h: | |
361 | * cds_wfs_for_each_blocking() | |
362 | * cds_wfs_for_each_blocking_safe() | |
8af2956c MD |
363 | * |
364 | * Returns NULL if popped stack is empty, top stack node otherwise. | |
edac6b69 MD |
365 | */ |
366 | static inline struct cds_wfs_node * | |
c7ba06ba | 367 | _cds_wfs_first(struct cds_wfs_head *head) |
edac6b69 MD |
368 | { |
369 | if (___cds_wfs_end(head)) | |
370 | return NULL; | |
371 | return &head->node; | |
372 | } | |
373 | ||
af67624d MD |
374 | static inline struct cds_wfs_node * |
375 | ___cds_wfs_next(struct cds_wfs_node *node, int blocking) | |
376 | { | |
377 | struct cds_wfs_node *next; | |
378 | ||
379 | next = ___cds_wfs_node_sync_next(node, blocking); | |
380 | /* | |
381 | * CDS_WFS_WOULDBLOCK != CSD_WFS_END, so we can check for end | |
382 | * even if ___cds_wfs_node_sync_next returns CDS_WFS_WOULDBLOCK, | |
383 | * and still return CDS_WFS_WOULDBLOCK. | |
384 | */ | |
385 | if (___cds_wfs_end(next)) | |
386 | return NULL; | |
387 | return next; | |
388 | } | |
389 | ||
edac6b69 MD |
390 | /* |
391 | * cds_wfs_next_blocking: get next node of a popped stack. | |
392 | * | |
393 | * Content written into the node before enqueue is guaranteed to be | |
394 | * consistent, but no other memory ordering is ensured. | |
395 | * | |
396 | * Used by for-like iteration macros in urcu/wfstack.h: | |
397 | * cds_wfs_for_each_blocking() | |
398 | * cds_wfs_for_each_blocking_safe() | |
8af2956c MD |
399 | * |
400 | * Returns NULL if reached end of popped stack, non-NULL next stack | |
401 | * node otherwise. | |
edac6b69 MD |
402 | */ |
403 | static inline struct cds_wfs_node * | |
404 | _cds_wfs_next_blocking(struct cds_wfs_node *node) | |
405 | { | |
af67624d MD |
406 | return ___cds_wfs_next(node, 1); |
407 | } | |
edac6b69 | 408 | |
af67624d MD |
409 | |
410 | /* | |
411 | * cds_wfs_next_nonblocking: get next node of a popped stack. | |
412 | * | |
413 | * Same as cds_wfs_next_blocking, but returns CDS_WFS_WOULDBLOCK if it | |
414 | * needs to block. | |
415 | */ | |
416 | static inline struct cds_wfs_node * | |
417 | _cds_wfs_next_nonblocking(struct cds_wfs_node *node) | |
418 | { | |
419 | return ___cds_wfs_next(node, 0); | |
edac6b69 MD |
420 | } |
421 | ||
294d3396 MD |
422 | #ifdef __cplusplus |
423 | } | |
424 | #endif | |
425 | ||
edac6b69 | 426 | #endif /* _URCU_STATIC_WFSTACK_H */ |