Commit | Line | Data |
---|---|---|
d3d3857f MJ |
1 | // SPDX-FileCopyrightText: 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> |
2 | // | |
3 | // SPDX-License-Identifier: LGPL-2.1-or-later | |
4 | ||
edac6b69 MD |
5 | #ifndef _URCU_STATIC_WFSTACK_H |
6 | #define _URCU_STATIC_WFSTACK_H | |
294d3396 MD |
7 | |
8 | /* | |
edac6b69 | 9 | * Userspace RCU library - Stack with with wait-free push, blocking traversal. |
294d3396 | 10 | * |
07c2a4fd MD |
11 | * TO BE INCLUDED ONLY IN LGPL-COMPATIBLE CODE. See urcu/wfstack.h for |
12 | * linking dynamically with the userspace rcu library. | |
294d3396 MD |
13 | */ |
14 | ||
15 | #include <pthread.h> | |
b57aee66 | 16 | #include <poll.h> |
edac6b69 | 17 | #include <stdbool.h> |
01477510 | 18 | #include <urcu/assert.h> |
294d3396 | 19 | #include <urcu/compiler.h> |
a2e7bf9c | 20 | #include <urcu/uatomic.h> |
294d3396 MD |
21 | |
22 | #ifdef __cplusplus | |
23 | extern "C" { | |
24 | #endif | |
25 | ||
28757437 | 26 | #define CDS_WFS_END ((struct cds_wfs_head *) 0x1UL) |
16aa9ee8 DG |
27 | #define CDS_WFS_ADAPT_ATTEMPTS 10 /* Retry if being set */ |
28 | #define CDS_WFS_WAIT 10 /* Wait 10 ms if being set */ | |
294d3396 | 29 | |
edac6b69 MD |
30 | /* |
31 | * Stack with wait-free push, blocking traversal. | |
32 | * | |
33 | * Stack implementing push, pop, pop_all operations, as well as iterator | |
34 | * on the stack head returned by pop_all. | |
35 | * | |
c97c6ce5 MD |
36 | * Wait-free operations: cds_wfs_push, __cds_wfs_pop_all, cds_wfs_empty, |
37 | * cds_wfs_first. | |
38 | * Blocking operations: cds_wfs_pop, cds_wfs_pop_all, cds_wfs_next, | |
39 | * iteration on stack head returned by pop_all. | |
edac6b69 MD |
40 | * |
41 | * Synchronization table: | |
42 | * | |
43 | * External synchronization techniques described in the API below is | |
44 | * required between pairs marked with "X". No external synchronization | |
45 | * required between pairs marked with "-". | |
46 | * | |
47 | * cds_wfs_push __cds_wfs_pop __cds_wfs_pop_all | |
48 | * cds_wfs_push - - - | |
49 | * __cds_wfs_pop - X X | |
50 | * __cds_wfs_pop_all - X - | |
51 | * | |
52 | * cds_wfs_pop and cds_wfs_pop_all use an internal mutex to provide | |
53 | * synchronization. | |
54 | */ | |
55 | ||
56 | /* | |
57 | * cds_wfs_node_init: initialize wait-free stack node. | |
58 | */ | |
756a0322 | 59 | static inline |
16aa9ee8 | 60 | void _cds_wfs_node_init(struct cds_wfs_node *node) |
294d3396 MD |
61 | { |
62 | node->next = NULL; | |
63 | } | |
64 | ||
718eb63e | 65 | /* |
200d100e MD |
66 | * __cds_wfs_init: initialize wait-free stack. Don't pair with |
67 | * any destroy function. | |
718eb63e EW |
68 | */ |
69 | static inline void ___cds_wfs_init(struct __cds_wfs_stack *s) | |
70 | { | |
71 | s->head = CDS_WFS_END; | |
72 | } | |
73 | ||
edac6b69 | 74 | /* |
200d100e MD |
75 | * cds_wfs_init: initialize wait-free stack. Pair with |
76 | * cds_wfs_destroy(). | |
edac6b69 | 77 | */ |
756a0322 | 78 | static inline |
16aa9ee8 | 79 | void _cds_wfs_init(struct cds_wfs_stack *s) |
294d3396 MD |
80 | { |
81 | int ret; | |
82 | ||
edac6b69 | 83 | s->head = CDS_WFS_END; |
294d3396 | 84 | ret = pthread_mutex_init(&s->lock, NULL); |
01477510 | 85 | urcu_posix_assert(!ret); |
294d3396 MD |
86 | } |
87 | ||
200d100e MD |
88 | /* |
89 | * cds_wfs_destroy: destroy wait-free stack. Pair with | |
90 | * cds_wfs_init(). | |
91 | */ | |
92 | static inline | |
93 | void _cds_wfs_destroy(struct cds_wfs_stack *s) | |
94 | { | |
95 | int ret = pthread_mutex_destroy(&s->lock); | |
01477510 | 96 | urcu_posix_assert(!ret); |
200d100e MD |
97 | } |
98 | ||
edac6b69 MD |
99 | static inline bool ___cds_wfs_end(void *node) |
100 | { | |
101 | return node == CDS_WFS_END; | |
102 | } | |
103 | ||
191098fc | 104 | /* |
edac6b69 MD |
105 | * cds_wfs_empty: return whether wait-free stack is empty. |
106 | * | |
107 | * No memory barrier is issued. No mutual exclusion is required. | |
108 | */ | |
718eb63e | 109 | static inline bool _cds_wfs_empty(cds_wfs_stack_ptr_t u_stack) |
edac6b69 | 110 | { |
718eb63e EW |
111 | struct __cds_wfs_stack *s = u_stack._s; |
112 | ||
0e2125fb | 113 | return ___cds_wfs_end(uatomic_load(&s->head, CMM_RELAXED)); |
edac6b69 MD |
114 | } |
115 | ||
116 | /* | |
117 | * cds_wfs_push: push a node into the stack. | |
118 | * | |
119 | * Issues a full memory barrier before push. No mutual exclusion is | |
120 | * required. | |
121 | * | |
0e2125fb OD |
122 | * Operations before push are consistent when observed after associated pop. |
123 | * | |
edac6b69 MD |
124 | * Returns 0 if the stack was empty prior to adding the node. |
125 | * Returns non-zero otherwise. | |
191098fc | 126 | */ |
756a0322 | 127 | static inline |
718eb63e | 128 | int _cds_wfs_push(cds_wfs_stack_ptr_t u_stack, struct cds_wfs_node *node) |
294d3396 | 129 | { |
718eb63e | 130 | struct __cds_wfs_stack *s = u_stack._s; |
edac6b69 | 131 | struct cds_wfs_head *old_head, *new_head; |
294d3396 | 132 | |
01477510 | 133 | urcu_posix_assert(node->next == NULL); |
edac6b69 | 134 | new_head = caa_container_of(node, struct cds_wfs_head, node); |
294d3396 | 135 | /* |
edac6b69 MD |
136 | * uatomic_xchg() implicit memory barrier orders earlier stores |
137 | * to node (setting it to NULL) before publication. | |
294d3396 | 138 | */ |
0e2125fb OD |
139 | cmm_emit_legacy_smp_mb(); |
140 | old_head = uatomic_xchg_mo(&s->head, new_head, CMM_SEQ_CST); | |
294d3396 | 141 | /* |
edac6b69 MD |
142 | * At this point, dequeuers see a NULL node->next, they should |
143 | * busy-wait until node->next is set to old_head. | |
294d3396 | 144 | */ |
0e2125fb | 145 | uatomic_store(&node->next, &old_head->node, CMM_RELEASE); |
edac6b69 | 146 | return !___cds_wfs_end(old_head); |
294d3396 MD |
147 | } |
148 | ||
149 | /* | |
edac6b69 | 150 | * Waiting for push to complete enqueue and return the next node. |
294d3396 | 151 | */ |
edac6b69 | 152 | static inline struct cds_wfs_node * |
af67624d | 153 | ___cds_wfs_node_sync_next(struct cds_wfs_node *node, int blocking) |
294d3396 | 154 | { |
edac6b69 | 155 | struct cds_wfs_node *next; |
294d3396 MD |
156 | int attempt = 0; |
157 | ||
294d3396 MD |
158 | /* |
159 | * Adaptative busy-looping waiting for push to complete. | |
160 | */ | |
0e2125fb | 161 | while ((next = uatomic_load(&node->next, CMM_CONSUME)) == NULL) { |
af67624d MD |
162 | if (!blocking) |
163 | return CDS_WFS_WOULDBLOCK; | |
16aa9ee8 | 164 | if (++attempt >= CDS_WFS_ADAPT_ATTEMPTS) { |
d8a93add | 165 | (void) poll(NULL, 0, CDS_WFS_WAIT); /* Wait for 10ms */ |
294d3396 | 166 | attempt = 0; |
edac6b69 | 167 | } else { |
06f22bdb | 168 | caa_cpu_relax(); |
edac6b69 | 169 | } |
294d3396 | 170 | } |
edac6b69 MD |
171 | |
172 | return next; | |
294d3396 MD |
173 | } |
174 | ||
af67624d MD |
175 | static inline |
176 | struct cds_wfs_node * | |
711ff0f9 | 177 | ___cds_wfs_pop(cds_wfs_stack_ptr_t u_stack, int *state, int blocking) |
af67624d MD |
178 | { |
179 | struct cds_wfs_head *head, *new_head; | |
180 | struct cds_wfs_node *next; | |
711ff0f9 | 181 | struct __cds_wfs_stack *s = u_stack._s; |
af67624d | 182 | |
c8975b94 MD |
183 | if (state) |
184 | *state = 0; | |
af67624d | 185 | for (;;) { |
0e2125fb | 186 | head = uatomic_load(&s->head, CMM_CONSUME); |
c8975b94 | 187 | if (___cds_wfs_end(head)) { |
af67624d | 188 | return NULL; |
c8975b94 | 189 | } |
af67624d | 190 | next = ___cds_wfs_node_sync_next(&head->node, blocking); |
c8975b94 | 191 | if (!blocking && next == CDS_WFS_WOULDBLOCK) { |
af67624d | 192 | return CDS_WFS_WOULDBLOCK; |
c8975b94 | 193 | } |
af67624d | 194 | new_head = caa_container_of(next, struct cds_wfs_head, node); |
0e2125fb OD |
195 | if (uatomic_cmpxchg_mo(&s->head, head, new_head, |
196 | CMM_SEQ_CST, CMM_SEQ_CST) == head) { | |
c8975b94 MD |
197 | if (state && ___cds_wfs_end(new_head)) |
198 | *state |= CDS_WFS_STATE_LAST; | |
0e2125fb | 199 | cmm_emit_legacy_smp_mb(); |
af67624d | 200 | return &head->node; |
c8975b94 MD |
201 | } |
202 | if (!blocking) { | |
af67624d | 203 | return CDS_WFS_WOULDBLOCK; |
c8975b94 | 204 | } |
af67624d MD |
205 | /* busy-loop if head changed under us */ |
206 | } | |
207 | } | |
208 | ||
edac6b69 | 209 | /* |
c8975b94 | 210 | * __cds_wfs_pop_with_state_blocking: pop a node from the stack, with state. |
edac6b69 MD |
211 | * |
212 | * Returns NULL if stack is empty. | |
213 | * | |
0e2125fb OD |
214 | * Operations after pop push are consistent when observed before associated push. |
215 | * | |
edac6b69 MD |
216 | * __cds_wfs_pop_blocking needs to be synchronized using one of the |
217 | * following techniques: | |
218 | * | |
219 | * 1) Calling __cds_wfs_pop_blocking under rcu read lock critical | |
220 | * section. The caller must wait for a grace period to pass before | |
221 | * freeing the returned node or modifying the cds_wfs_node structure. | |
222 | * 2) Using mutual exclusion (e.g. mutexes) to protect | |
223 | * __cds_wfs_pop_blocking and __cds_wfs_pop_all callers. | |
224 | * 3) Ensuring that only ONE thread can call __cds_wfs_pop_blocking() | |
225 | * and __cds_wfs_pop_all(). (multi-provider/single-consumer scheme). | |
c8975b94 MD |
226 | * |
227 | * "state" saves state flags atomically sampled with pop operation. | |
edac6b69 | 228 | */ |
c8975b94 MD |
229 | static inline |
230 | struct cds_wfs_node * | |
711ff0f9 | 231 | ___cds_wfs_pop_with_state_blocking(cds_wfs_stack_ptr_t u_stack, int *state) |
c8975b94 | 232 | { |
711ff0f9 | 233 | return ___cds_wfs_pop(u_stack, state, 1); |
c8975b94 MD |
234 | } |
235 | ||
756a0322 | 236 | static inline |
16aa9ee8 | 237 | struct cds_wfs_node * |
711ff0f9 | 238 | ___cds_wfs_pop_blocking(cds_wfs_stack_ptr_t u_stack) |
edac6b69 | 239 | { |
711ff0f9 | 240 | return ___cds_wfs_pop_with_state_blocking(u_stack, NULL); |
c8975b94 MD |
241 | } |
242 | ||
243 | /* | |
244 | * __cds_wfs_pop_with_state_nonblocking: pop a node from the stack. | |
245 | * | |
246 | * Same as __cds_wfs_pop_with_state_blocking, but returns | |
247 | * CDS_WFS_WOULDBLOCK if it needs to block. | |
248 | * | |
249 | * "state" saves state flags atomically sampled with pop operation. | |
250 | */ | |
251 | static inline | |
252 | struct cds_wfs_node * | |
711ff0f9 | 253 | ___cds_wfs_pop_with_state_nonblocking(cds_wfs_stack_ptr_t u_stack, int *state) |
c8975b94 | 254 | { |
711ff0f9 | 255 | return ___cds_wfs_pop(u_stack, state, 0); |
af67624d | 256 | } |
edac6b69 | 257 | |
af67624d MD |
258 | /* |
259 | * __cds_wfs_pop_nonblocking: pop a node from the stack. | |
260 | * | |
261 | * Same as __cds_wfs_pop_blocking, but returns CDS_WFS_WOULDBLOCK if | |
262 | * it needs to block. | |
263 | */ | |
264 | static inline | |
265 | struct cds_wfs_node * | |
711ff0f9 | 266 | ___cds_wfs_pop_nonblocking(cds_wfs_stack_ptr_t u_stack) |
af67624d | 267 | { |
711ff0f9 | 268 | return ___cds_wfs_pop_with_state_nonblocking(u_stack, NULL); |
edac6b69 MD |
269 | } |
270 | ||
271 | /* | |
272 | * __cds_wfs_pop_all: pop all nodes from a stack. | |
273 | * | |
0e2125fb OD |
274 | * Operations after pop push are consistent when observed before associated push. |
275 | * | |
edac6b69 MD |
276 | * __cds_wfs_pop_all does not require any synchronization with other |
277 | * push, nor with other __cds_wfs_pop_all, but requires synchronization | |
278 | * matching the technique used to synchronize __cds_wfs_pop_blocking: | |
279 | * | |
280 | * 1) If __cds_wfs_pop_blocking is called under rcu read lock critical | |
281 | * section, both __cds_wfs_pop_blocking and cds_wfs_pop_all callers | |
282 | * must wait for a grace period to pass before freeing the returned | |
283 | * node or modifying the cds_wfs_node structure. However, no RCU | |
284 | * read-side critical section is needed around __cds_wfs_pop_all. | |
285 | * 2) Using mutual exclusion (e.g. mutexes) to protect | |
286 | * __cds_wfs_pop_blocking and __cds_wfs_pop_all callers. | |
287 | * 3) Ensuring that only ONE thread can call __cds_wfs_pop_blocking() | |
288 | * and __cds_wfs_pop_all(). (multi-provider/single-consumer scheme). | |
289 | */ | |
290 | static inline | |
291 | struct cds_wfs_head * | |
718eb63e | 292 | ___cds_wfs_pop_all(cds_wfs_stack_ptr_t u_stack) |
edac6b69 | 293 | { |
718eb63e | 294 | struct __cds_wfs_stack *s = u_stack._s; |
edac6b69 MD |
295 | struct cds_wfs_head *head; |
296 | ||
297 | /* | |
298 | * Implicit memory barrier after uatomic_xchg() matches implicit | |
299 | * memory barrier before uatomic_xchg() in cds_wfs_push. It | |
300 | * ensures that all nodes of the returned list are consistent. | |
301 | * There is no need to issue memory barriers when iterating on | |
302 | * the returned list, because the full memory barrier issued | |
303 | * prior to each uatomic_cmpxchg, which each write to head, are | |
304 | * taking care to order writes to each node prior to the full | |
305 | * memory barrier after this uatomic_xchg(). | |
306 | */ | |
0e2125fb OD |
307 | head = uatomic_xchg_mo(&s->head, CDS_WFS_END, CMM_SEQ_CST); |
308 | cmm_emit_legacy_smp_mb(); | |
edac6b69 MD |
309 | if (___cds_wfs_end(head)) |
310 | return NULL; | |
311 | return head; | |
312 | } | |
313 | ||
314 | /* | |
315 | * cds_wfs_pop_lock: lock stack pop-protection mutex. | |
316 | */ | |
317 | static inline void _cds_wfs_pop_lock(struct cds_wfs_stack *s) | |
294d3396 | 318 | { |
294d3396 MD |
319 | int ret; |
320 | ||
321 | ret = pthread_mutex_lock(&s->lock); | |
01477510 | 322 | urcu_posix_assert(!ret); |
edac6b69 MD |
323 | } |
324 | ||
325 | /* | |
326 | * cds_wfs_pop_unlock: unlock stack pop-protection mutex. | |
327 | */ | |
328 | static inline void _cds_wfs_pop_unlock(struct cds_wfs_stack *s) | |
329 | { | |
330 | int ret; | |
331 | ||
294d3396 | 332 | ret = pthread_mutex_unlock(&s->lock); |
01477510 | 333 | urcu_posix_assert(!ret); |
edac6b69 MD |
334 | } |
335 | ||
336 | /* | |
c8975b94 | 337 | * Call __cds_wfs_pop_with_state_blocking with an internal pop mutex held. |
edac6b69 MD |
338 | */ |
339 | static inline | |
340 | struct cds_wfs_node * | |
c8975b94 | 341 | _cds_wfs_pop_with_state_blocking(struct cds_wfs_stack *s, int *state) |
edac6b69 MD |
342 | { |
343 | struct cds_wfs_node *retnode; | |
28757437 | 344 | cds_wfs_stack_ptr_t stack; |
edac6b69 MD |
345 | |
346 | _cds_wfs_pop_lock(s); | |
28757437 SM |
347 | stack.s = s; |
348 | retnode = ___cds_wfs_pop_with_state_blocking(stack, state); | |
edac6b69 | 349 | _cds_wfs_pop_unlock(s); |
294d3396 MD |
350 | return retnode; |
351 | } | |
352 | ||
c8975b94 MD |
353 | /* |
354 | * Call _cds_wfs_pop_with_state_blocking without saving any state. | |
355 | */ | |
356 | static inline | |
357 | struct cds_wfs_node * | |
358 | _cds_wfs_pop_blocking(struct cds_wfs_stack *s) | |
359 | { | |
360 | return _cds_wfs_pop_with_state_blocking(s, NULL); | |
361 | } | |
362 | ||
edac6b69 MD |
363 | /* |
364 | * Call __cds_wfs_pop_all with an internal pop mutex held. | |
365 | */ | |
366 | static inline | |
367 | struct cds_wfs_head * | |
368 | _cds_wfs_pop_all_blocking(struct cds_wfs_stack *s) | |
369 | { | |
370 | struct cds_wfs_head *rethead; | |
28757437 | 371 | cds_wfs_stack_ptr_t stack; |
edac6b69 MD |
372 | |
373 | _cds_wfs_pop_lock(s); | |
28757437 SM |
374 | stack.s = s; |
375 | rethead = ___cds_wfs_pop_all(stack); | |
edac6b69 MD |
376 | _cds_wfs_pop_unlock(s); |
377 | return rethead; | |
378 | } | |
379 | ||
380 | /* | |
c7ba06ba | 381 | * cds_wfs_first: get first node of a popped stack. |
edac6b69 MD |
382 | * |
383 | * Content written into the node before enqueue is guaranteed to be | |
384 | * consistent, but no other memory ordering is ensured. | |
385 | * | |
386 | * Used by for-like iteration macros in urcu/wfstack.h: | |
387 | * cds_wfs_for_each_blocking() | |
388 | * cds_wfs_for_each_blocking_safe() | |
8af2956c MD |
389 | * |
390 | * Returns NULL if popped stack is empty, top stack node otherwise. | |
edac6b69 MD |
391 | */ |
392 | static inline struct cds_wfs_node * | |
c7ba06ba | 393 | _cds_wfs_first(struct cds_wfs_head *head) |
edac6b69 MD |
394 | { |
395 | if (___cds_wfs_end(head)) | |
396 | return NULL; | |
397 | return &head->node; | |
398 | } | |
399 | ||
af67624d MD |
400 | static inline struct cds_wfs_node * |
401 | ___cds_wfs_next(struct cds_wfs_node *node, int blocking) | |
402 | { | |
403 | struct cds_wfs_node *next; | |
404 | ||
405 | next = ___cds_wfs_node_sync_next(node, blocking); | |
406 | /* | |
407 | * CDS_WFS_WOULDBLOCK != CSD_WFS_END, so we can check for end | |
408 | * even if ___cds_wfs_node_sync_next returns CDS_WFS_WOULDBLOCK, | |
409 | * and still return CDS_WFS_WOULDBLOCK. | |
410 | */ | |
411 | if (___cds_wfs_end(next)) | |
412 | return NULL; | |
413 | return next; | |
414 | } | |
415 | ||
edac6b69 MD |
416 | /* |
417 | * cds_wfs_next_blocking: get next node of a popped stack. | |
418 | * | |
419 | * Content written into the node before enqueue is guaranteed to be | |
420 | * consistent, but no other memory ordering is ensured. | |
421 | * | |
422 | * Used by for-like iteration macros in urcu/wfstack.h: | |
423 | * cds_wfs_for_each_blocking() | |
424 | * cds_wfs_for_each_blocking_safe() | |
8af2956c MD |
425 | * |
426 | * Returns NULL if reached end of popped stack, non-NULL next stack | |
427 | * node otherwise. | |
edac6b69 MD |
428 | */ |
429 | static inline struct cds_wfs_node * | |
430 | _cds_wfs_next_blocking(struct cds_wfs_node *node) | |
431 | { | |
af67624d MD |
432 | return ___cds_wfs_next(node, 1); |
433 | } | |
edac6b69 | 434 | |
af67624d MD |
435 | |
436 | /* | |
437 | * cds_wfs_next_nonblocking: get next node of a popped stack. | |
438 | * | |
439 | * Same as cds_wfs_next_blocking, but returns CDS_WFS_WOULDBLOCK if it | |
440 | * needs to block. | |
441 | */ | |
442 | static inline struct cds_wfs_node * | |
443 | _cds_wfs_next_nonblocking(struct cds_wfs_node *node) | |
444 | { | |
445 | return ___cds_wfs_next(node, 0); | |
edac6b69 MD |
446 | } |
447 | ||
294d3396 MD |
448 | #ifdef __cplusplus |
449 | } | |
450 | #endif | |
451 | ||
edac6b69 | 452 | #endif /* _URCU_STATIC_WFSTACK_H */ |