Commit | Line | Data |
---|---|---|
edac6b69 MD |
1 | #ifndef _URCU_STATIC_WFSTACK_H |
2 | #define _URCU_STATIC_WFSTACK_H | |
294d3396 MD |
3 | |
4 | /* | |
edac6b69 | 5 | * urcu/static/wfstack.h |
294d3396 | 6 | * |
edac6b69 | 7 | * Userspace RCU library - Stack with with wait-free push, blocking traversal. |
294d3396 | 8 | * |
07c2a4fd MD |
9 | * TO BE INCLUDED ONLY IN LGPL-COMPATIBLE CODE. See urcu/wfstack.h for |
10 | * linking dynamically with the userspace rcu library. | |
294d3396 | 11 | * |
a03a0f42 | 12 | * Copyright 2010-2012 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com> |
294d3396 MD |
13 | * |
14 | * This library is free software; you can redistribute it and/or | |
15 | * modify it under the terms of the GNU Lesser General Public | |
16 | * License as published by the Free Software Foundation; either | |
17 | * version 2.1 of the License, or (at your option) any later version. | |
18 | * | |
19 | * This library is distributed in the hope that it will be useful, | |
20 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
21 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
22 | * Lesser General Public License for more details. | |
23 | * | |
24 | * You should have received a copy of the GNU Lesser General Public | |
25 | * License along with this library; if not, write to the Free Software | |
26 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |
27 | */ | |
28 | ||
29 | #include <pthread.h> | |
b57aee66 | 30 | #include <poll.h> |
edac6b69 | 31 | #include <stdbool.h> |
01477510 | 32 | #include <urcu/assert.h> |
294d3396 | 33 | #include <urcu/compiler.h> |
a2e7bf9c | 34 | #include <urcu/uatomic.h> |
294d3396 MD |
35 | |
36 | #ifdef __cplusplus | |
37 | extern "C" { | |
38 | #endif | |
39 | ||
edac6b69 | 40 | #define CDS_WFS_END ((void *) 0x1UL) |
16aa9ee8 DG |
41 | #define CDS_WFS_ADAPT_ATTEMPTS 10 /* Retry if being set */ |
42 | #define CDS_WFS_WAIT 10 /* Wait 10 ms if being set */ | |
294d3396 | 43 | |
edac6b69 MD |
44 | /* |
45 | * Stack with wait-free push, blocking traversal. | |
46 | * | |
47 | * Stack implementing push, pop, pop_all operations, as well as iterator | |
48 | * on the stack head returned by pop_all. | |
49 | * | |
c97c6ce5 MD |
50 | * Wait-free operations: cds_wfs_push, __cds_wfs_pop_all, cds_wfs_empty, |
51 | * cds_wfs_first. | |
52 | * Blocking operations: cds_wfs_pop, cds_wfs_pop_all, cds_wfs_next, | |
53 | * iteration on stack head returned by pop_all. | |
edac6b69 MD |
54 | * |
55 | * Synchronization table: | |
56 | * | |
57 | * External synchronization techniques described in the API below is | |
58 | * required between pairs marked with "X". No external synchronization | |
59 | * required between pairs marked with "-". | |
60 | * | |
61 | * cds_wfs_push __cds_wfs_pop __cds_wfs_pop_all | |
62 | * cds_wfs_push - - - | |
63 | * __cds_wfs_pop - X X | |
64 | * __cds_wfs_pop_all - X - | |
65 | * | |
66 | * cds_wfs_pop and cds_wfs_pop_all use an internal mutex to provide | |
67 | * synchronization. | |
68 | */ | |
69 | ||
70 | /* | |
71 | * cds_wfs_node_init: initialize wait-free stack node. | |
72 | */ | |
756a0322 | 73 | static inline |
16aa9ee8 | 74 | void _cds_wfs_node_init(struct cds_wfs_node *node) |
294d3396 MD |
75 | { |
76 | node->next = NULL; | |
77 | } | |
78 | ||
718eb63e | 79 | /* |
200d100e MD |
80 | * __cds_wfs_init: initialize wait-free stack. Don't pair with |
81 | * any destroy function. | |
718eb63e EW |
82 | */ |
83 | static inline void ___cds_wfs_init(struct __cds_wfs_stack *s) | |
84 | { | |
85 | s->head = CDS_WFS_END; | |
86 | } | |
87 | ||
edac6b69 | 88 | /* |
200d100e MD |
89 | * cds_wfs_init: initialize wait-free stack. Pair with |
90 | * cds_wfs_destroy(). | |
edac6b69 | 91 | */ |
756a0322 | 92 | static inline |
16aa9ee8 | 93 | void _cds_wfs_init(struct cds_wfs_stack *s) |
294d3396 MD |
94 | { |
95 | int ret; | |
96 | ||
edac6b69 | 97 | s->head = CDS_WFS_END; |
294d3396 | 98 | ret = pthread_mutex_init(&s->lock, NULL); |
01477510 | 99 | urcu_posix_assert(!ret); |
294d3396 MD |
100 | } |
101 | ||
200d100e MD |
102 | /* |
103 | * cds_wfs_destroy: destroy wait-free stack. Pair with | |
104 | * cds_wfs_init(). | |
105 | */ | |
106 | static inline | |
107 | void _cds_wfs_destroy(struct cds_wfs_stack *s) | |
108 | { | |
109 | int ret = pthread_mutex_destroy(&s->lock); | |
01477510 | 110 | urcu_posix_assert(!ret); |
200d100e MD |
111 | } |
112 | ||
edac6b69 MD |
113 | static inline bool ___cds_wfs_end(void *node) |
114 | { | |
115 | return node == CDS_WFS_END; | |
116 | } | |
117 | ||
191098fc | 118 | /* |
edac6b69 MD |
119 | * cds_wfs_empty: return whether wait-free stack is empty. |
120 | * | |
121 | * No memory barrier is issued. No mutual exclusion is required. | |
122 | */ | |
718eb63e | 123 | static inline bool _cds_wfs_empty(cds_wfs_stack_ptr_t u_stack) |
edac6b69 | 124 | { |
718eb63e EW |
125 | struct __cds_wfs_stack *s = u_stack._s; |
126 | ||
edac6b69 MD |
127 | return ___cds_wfs_end(CMM_LOAD_SHARED(s->head)); |
128 | } | |
129 | ||
130 | /* | |
131 | * cds_wfs_push: push a node into the stack. | |
132 | * | |
133 | * Issues a full memory barrier before push. No mutual exclusion is | |
134 | * required. | |
135 | * | |
136 | * Returns 0 if the stack was empty prior to adding the node. | |
137 | * Returns non-zero otherwise. | |
191098fc | 138 | */ |
756a0322 | 139 | static inline |
718eb63e | 140 | int _cds_wfs_push(cds_wfs_stack_ptr_t u_stack, struct cds_wfs_node *node) |
294d3396 | 141 | { |
718eb63e | 142 | struct __cds_wfs_stack *s = u_stack._s; |
edac6b69 | 143 | struct cds_wfs_head *old_head, *new_head; |
294d3396 | 144 | |
01477510 | 145 | urcu_posix_assert(node->next == NULL); |
edac6b69 | 146 | new_head = caa_container_of(node, struct cds_wfs_head, node); |
294d3396 | 147 | /* |
edac6b69 MD |
148 | * uatomic_xchg() implicit memory barrier orders earlier stores |
149 | * to node (setting it to NULL) before publication. | |
294d3396 | 150 | */ |
edac6b69 | 151 | old_head = uatomic_xchg(&s->head, new_head); |
294d3396 | 152 | /* |
edac6b69 MD |
153 | * At this point, dequeuers see a NULL node->next, they should |
154 | * busy-wait until node->next is set to old_head. | |
294d3396 | 155 | */ |
edac6b69 MD |
156 | CMM_STORE_SHARED(node->next, &old_head->node); |
157 | return !___cds_wfs_end(old_head); | |
294d3396 MD |
158 | } |
159 | ||
160 | /* | |
edac6b69 | 161 | * Waiting for push to complete enqueue and return the next node. |
294d3396 | 162 | */ |
edac6b69 | 163 | static inline struct cds_wfs_node * |
af67624d | 164 | ___cds_wfs_node_sync_next(struct cds_wfs_node *node, int blocking) |
294d3396 | 165 | { |
edac6b69 | 166 | struct cds_wfs_node *next; |
294d3396 MD |
167 | int attempt = 0; |
168 | ||
294d3396 MD |
169 | /* |
170 | * Adaptative busy-looping waiting for push to complete. | |
171 | */ | |
edac6b69 | 172 | while ((next = CMM_LOAD_SHARED(node->next)) == NULL) { |
af67624d MD |
173 | if (!blocking) |
174 | return CDS_WFS_WOULDBLOCK; | |
16aa9ee8 | 175 | if (++attempt >= CDS_WFS_ADAPT_ATTEMPTS) { |
d8a93add | 176 | (void) poll(NULL, 0, CDS_WFS_WAIT); /* Wait for 10ms */ |
294d3396 | 177 | attempt = 0; |
edac6b69 | 178 | } else { |
06f22bdb | 179 | caa_cpu_relax(); |
edac6b69 | 180 | } |
294d3396 | 181 | } |
edac6b69 MD |
182 | |
183 | return next; | |
294d3396 MD |
184 | } |
185 | ||
af67624d MD |
186 | static inline |
187 | struct cds_wfs_node * | |
711ff0f9 | 188 | ___cds_wfs_pop(cds_wfs_stack_ptr_t u_stack, int *state, int blocking) |
af67624d MD |
189 | { |
190 | struct cds_wfs_head *head, *new_head; | |
191 | struct cds_wfs_node *next; | |
711ff0f9 | 192 | struct __cds_wfs_stack *s = u_stack._s; |
af67624d | 193 | |
c8975b94 MD |
194 | if (state) |
195 | *state = 0; | |
af67624d MD |
196 | for (;;) { |
197 | head = CMM_LOAD_SHARED(s->head); | |
c8975b94 | 198 | if (___cds_wfs_end(head)) { |
af67624d | 199 | return NULL; |
c8975b94 | 200 | } |
af67624d | 201 | next = ___cds_wfs_node_sync_next(&head->node, blocking); |
c8975b94 | 202 | if (!blocking && next == CDS_WFS_WOULDBLOCK) { |
af67624d | 203 | return CDS_WFS_WOULDBLOCK; |
c8975b94 | 204 | } |
af67624d | 205 | new_head = caa_container_of(next, struct cds_wfs_head, node); |
c8975b94 MD |
206 | if (uatomic_cmpxchg(&s->head, head, new_head) == head) { |
207 | if (state && ___cds_wfs_end(new_head)) | |
208 | *state |= CDS_WFS_STATE_LAST; | |
af67624d | 209 | return &head->node; |
c8975b94 MD |
210 | } |
211 | if (!blocking) { | |
af67624d | 212 | return CDS_WFS_WOULDBLOCK; |
c8975b94 | 213 | } |
af67624d MD |
214 | /* busy-loop if head changed under us */ |
215 | } | |
216 | } | |
217 | ||
edac6b69 | 218 | /* |
c8975b94 | 219 | * __cds_wfs_pop_with_state_blocking: pop a node from the stack, with state. |
edac6b69 MD |
220 | * |
221 | * Returns NULL if stack is empty. | |
222 | * | |
223 | * __cds_wfs_pop_blocking needs to be synchronized using one of the | |
224 | * following techniques: | |
225 | * | |
226 | * 1) Calling __cds_wfs_pop_blocking under rcu read lock critical | |
227 | * section. The caller must wait for a grace period to pass before | |
228 | * freeing the returned node or modifying the cds_wfs_node structure. | |
229 | * 2) Using mutual exclusion (e.g. mutexes) to protect | |
230 | * __cds_wfs_pop_blocking and __cds_wfs_pop_all callers. | |
231 | * 3) Ensuring that only ONE thread can call __cds_wfs_pop_blocking() | |
232 | * and __cds_wfs_pop_all(). (multi-provider/single-consumer scheme). | |
c8975b94 MD |
233 | * |
234 | * "state" saves state flags atomically sampled with pop operation. | |
edac6b69 | 235 | */ |
c8975b94 MD |
236 | static inline |
237 | struct cds_wfs_node * | |
711ff0f9 | 238 | ___cds_wfs_pop_with_state_blocking(cds_wfs_stack_ptr_t u_stack, int *state) |
c8975b94 | 239 | { |
711ff0f9 | 240 | return ___cds_wfs_pop(u_stack, state, 1); |
c8975b94 MD |
241 | } |
242 | ||
756a0322 | 243 | static inline |
16aa9ee8 | 244 | struct cds_wfs_node * |
711ff0f9 | 245 | ___cds_wfs_pop_blocking(cds_wfs_stack_ptr_t u_stack) |
edac6b69 | 246 | { |
711ff0f9 | 247 | return ___cds_wfs_pop_with_state_blocking(u_stack, NULL); |
c8975b94 MD |
248 | } |
249 | ||
250 | /* | |
251 | * __cds_wfs_pop_with_state_nonblocking: pop a node from the stack. | |
252 | * | |
253 | * Same as __cds_wfs_pop_with_state_blocking, but returns | |
254 | * CDS_WFS_WOULDBLOCK if it needs to block. | |
255 | * | |
256 | * "state" saves state flags atomically sampled with pop operation. | |
257 | */ | |
258 | static inline | |
259 | struct cds_wfs_node * | |
711ff0f9 | 260 | ___cds_wfs_pop_with_state_nonblocking(cds_wfs_stack_ptr_t u_stack, int *state) |
c8975b94 | 261 | { |
711ff0f9 | 262 | return ___cds_wfs_pop(u_stack, state, 0); |
af67624d | 263 | } |
edac6b69 | 264 | |
af67624d MD |
265 | /* |
266 | * __cds_wfs_pop_nonblocking: pop a node from the stack. | |
267 | * | |
268 | * Same as __cds_wfs_pop_blocking, but returns CDS_WFS_WOULDBLOCK if | |
269 | * it needs to block. | |
270 | */ | |
271 | static inline | |
272 | struct cds_wfs_node * | |
711ff0f9 | 273 | ___cds_wfs_pop_nonblocking(cds_wfs_stack_ptr_t u_stack) |
af67624d | 274 | { |
711ff0f9 | 275 | return ___cds_wfs_pop_with_state_nonblocking(u_stack, NULL); |
edac6b69 MD |
276 | } |
277 | ||
278 | /* | |
279 | * __cds_wfs_pop_all: pop all nodes from a stack. | |
280 | * | |
281 | * __cds_wfs_pop_all does not require any synchronization with other | |
282 | * push, nor with other __cds_wfs_pop_all, but requires synchronization | |
283 | * matching the technique used to synchronize __cds_wfs_pop_blocking: | |
284 | * | |
285 | * 1) If __cds_wfs_pop_blocking is called under rcu read lock critical | |
286 | * section, both __cds_wfs_pop_blocking and cds_wfs_pop_all callers | |
287 | * must wait for a grace period to pass before freeing the returned | |
288 | * node or modifying the cds_wfs_node structure. However, no RCU | |
289 | * read-side critical section is needed around __cds_wfs_pop_all. | |
290 | * 2) Using mutual exclusion (e.g. mutexes) to protect | |
291 | * __cds_wfs_pop_blocking and __cds_wfs_pop_all callers. | |
292 | * 3) Ensuring that only ONE thread can call __cds_wfs_pop_blocking() | |
293 | * and __cds_wfs_pop_all(). (multi-provider/single-consumer scheme). | |
294 | */ | |
295 | static inline | |
296 | struct cds_wfs_head * | |
718eb63e | 297 | ___cds_wfs_pop_all(cds_wfs_stack_ptr_t u_stack) |
edac6b69 | 298 | { |
718eb63e | 299 | struct __cds_wfs_stack *s = u_stack._s; |
edac6b69 MD |
300 | struct cds_wfs_head *head; |
301 | ||
302 | /* | |
303 | * Implicit memory barrier after uatomic_xchg() matches implicit | |
304 | * memory barrier before uatomic_xchg() in cds_wfs_push. It | |
305 | * ensures that all nodes of the returned list are consistent. | |
306 | * There is no need to issue memory barriers when iterating on | |
307 | * the returned list, because the full memory barrier issued | |
308 | * prior to each uatomic_cmpxchg, which each write to head, are | |
309 | * taking care to order writes to each node prior to the full | |
310 | * memory barrier after this uatomic_xchg(). | |
311 | */ | |
312 | head = uatomic_xchg(&s->head, CDS_WFS_END); | |
313 | if (___cds_wfs_end(head)) | |
314 | return NULL; | |
315 | return head; | |
316 | } | |
317 | ||
318 | /* | |
319 | * cds_wfs_pop_lock: lock stack pop-protection mutex. | |
320 | */ | |
321 | static inline void _cds_wfs_pop_lock(struct cds_wfs_stack *s) | |
294d3396 | 322 | { |
294d3396 MD |
323 | int ret; |
324 | ||
325 | ret = pthread_mutex_lock(&s->lock); | |
01477510 | 326 | urcu_posix_assert(!ret); |
edac6b69 MD |
327 | } |
328 | ||
329 | /* | |
330 | * cds_wfs_pop_unlock: unlock stack pop-protection mutex. | |
331 | */ | |
332 | static inline void _cds_wfs_pop_unlock(struct cds_wfs_stack *s) | |
333 | { | |
334 | int ret; | |
335 | ||
294d3396 | 336 | ret = pthread_mutex_unlock(&s->lock); |
01477510 | 337 | urcu_posix_assert(!ret); |
edac6b69 MD |
338 | } |
339 | ||
340 | /* | |
c8975b94 | 341 | * Call __cds_wfs_pop_with_state_blocking with an internal pop mutex held. |
edac6b69 MD |
342 | */ |
343 | static inline | |
344 | struct cds_wfs_node * | |
c8975b94 | 345 | _cds_wfs_pop_with_state_blocking(struct cds_wfs_stack *s, int *state) |
edac6b69 MD |
346 | { |
347 | struct cds_wfs_node *retnode; | |
348 | ||
349 | _cds_wfs_pop_lock(s); | |
c8975b94 | 350 | retnode = ___cds_wfs_pop_with_state_blocking(s, state); |
edac6b69 | 351 | _cds_wfs_pop_unlock(s); |
294d3396 MD |
352 | return retnode; |
353 | } | |
354 | ||
c8975b94 MD |
355 | /* |
356 | * Call _cds_wfs_pop_with_state_blocking without saving any state. | |
357 | */ | |
358 | static inline | |
359 | struct cds_wfs_node * | |
360 | _cds_wfs_pop_blocking(struct cds_wfs_stack *s) | |
361 | { | |
362 | return _cds_wfs_pop_with_state_blocking(s, NULL); | |
363 | } | |
364 | ||
edac6b69 MD |
365 | /* |
366 | * Call __cds_wfs_pop_all with an internal pop mutex held. | |
367 | */ | |
368 | static inline | |
369 | struct cds_wfs_head * | |
370 | _cds_wfs_pop_all_blocking(struct cds_wfs_stack *s) | |
371 | { | |
372 | struct cds_wfs_head *rethead; | |
373 | ||
374 | _cds_wfs_pop_lock(s); | |
375 | rethead = ___cds_wfs_pop_all(s); | |
376 | _cds_wfs_pop_unlock(s); | |
377 | return rethead; | |
378 | } | |
379 | ||
380 | /* | |
c7ba06ba | 381 | * cds_wfs_first: get first node of a popped stack. |
edac6b69 MD |
382 | * |
383 | * Content written into the node before enqueue is guaranteed to be | |
384 | * consistent, but no other memory ordering is ensured. | |
385 | * | |
386 | * Used by for-like iteration macros in urcu/wfstack.h: | |
387 | * cds_wfs_for_each_blocking() | |
388 | * cds_wfs_for_each_blocking_safe() | |
8af2956c MD |
389 | * |
390 | * Returns NULL if popped stack is empty, top stack node otherwise. | |
edac6b69 MD |
391 | */ |
392 | static inline struct cds_wfs_node * | |
c7ba06ba | 393 | _cds_wfs_first(struct cds_wfs_head *head) |
edac6b69 MD |
394 | { |
395 | if (___cds_wfs_end(head)) | |
396 | return NULL; | |
397 | return &head->node; | |
398 | } | |
399 | ||
af67624d MD |
400 | static inline struct cds_wfs_node * |
401 | ___cds_wfs_next(struct cds_wfs_node *node, int blocking) | |
402 | { | |
403 | struct cds_wfs_node *next; | |
404 | ||
405 | next = ___cds_wfs_node_sync_next(node, blocking); | |
406 | /* | |
407 | * CDS_WFS_WOULDBLOCK != CSD_WFS_END, so we can check for end | |
408 | * even if ___cds_wfs_node_sync_next returns CDS_WFS_WOULDBLOCK, | |
409 | * and still return CDS_WFS_WOULDBLOCK. | |
410 | */ | |
411 | if (___cds_wfs_end(next)) | |
412 | return NULL; | |
413 | return next; | |
414 | } | |
415 | ||
edac6b69 MD |
416 | /* |
417 | * cds_wfs_next_blocking: get next node of a popped stack. | |
418 | * | |
419 | * Content written into the node before enqueue is guaranteed to be | |
420 | * consistent, but no other memory ordering is ensured. | |
421 | * | |
422 | * Used by for-like iteration macros in urcu/wfstack.h: | |
423 | * cds_wfs_for_each_blocking() | |
424 | * cds_wfs_for_each_blocking_safe() | |
8af2956c MD |
425 | * |
426 | * Returns NULL if reached end of popped stack, non-NULL next stack | |
427 | * node otherwise. | |
edac6b69 MD |
428 | */ |
429 | static inline struct cds_wfs_node * | |
430 | _cds_wfs_next_blocking(struct cds_wfs_node *node) | |
431 | { | |
af67624d MD |
432 | return ___cds_wfs_next(node, 1); |
433 | } | |
edac6b69 | 434 | |
af67624d MD |
435 | |
436 | /* | |
437 | * cds_wfs_next_nonblocking: get next node of a popped stack. | |
438 | * | |
439 | * Same as cds_wfs_next_blocking, but returns CDS_WFS_WOULDBLOCK if it | |
440 | * needs to block. | |
441 | */ | |
442 | static inline struct cds_wfs_node * | |
443 | _cds_wfs_next_nonblocking(struct cds_wfs_node *node) | |
444 | { | |
445 | return ___cds_wfs_next(node, 0); | |
edac6b69 MD |
446 | } |
447 | ||
294d3396 MD |
448 | #ifdef __cplusplus |
449 | } | |
450 | #endif | |
451 | ||
edac6b69 | 452 | #endif /* _URCU_STATIC_WFSTACK_H */ |