Commit | Line | Data |
---|---|---|
edac6b69 MD |
1 | #ifndef _URCU_STATIC_WFSTACK_H |
2 | #define _URCU_STATIC_WFSTACK_H | |
294d3396 MD |
3 | |
4 | /* | |
edac6b69 | 5 | * urcu/static/wfstack.h |
294d3396 | 6 | * |
edac6b69 | 7 | * Userspace RCU library - Stack with with wait-free push, blocking traversal. |
294d3396 | 8 | * |
07c2a4fd MD |
9 | * TO BE INCLUDED ONLY IN LGPL-COMPATIBLE CODE. See urcu/wfstack.h for |
10 | * linking dynamically with the userspace rcu library. | |
294d3396 | 11 | * |
a03a0f42 | 12 | * Copyright 2010-2012 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com> |
294d3396 MD |
13 | * |
14 | * This library is free software; you can redistribute it and/or | |
15 | * modify it under the terms of the GNU Lesser General Public | |
16 | * License as published by the Free Software Foundation; either | |
17 | * version 2.1 of the License, or (at your option) any later version. | |
18 | * | |
19 | * This library is distributed in the hope that it will be useful, | |
20 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
21 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
22 | * Lesser General Public License for more details. | |
23 | * | |
24 | * You should have received a copy of the GNU Lesser General Public | |
25 | * License along with this library; if not, write to the Free Software | |
26 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |
27 | */ | |
28 | ||
29 | #include <pthread.h> | |
30 | #include <assert.h> | |
b57aee66 | 31 | #include <poll.h> |
edac6b69 | 32 | #include <stdbool.h> |
294d3396 | 33 | #include <urcu/compiler.h> |
a2e7bf9c | 34 | #include <urcu/uatomic.h> |
294d3396 MD |
35 | |
36 | #ifdef __cplusplus | |
37 | extern "C" { | |
38 | #endif | |
39 | ||
edac6b69 | 40 | #define CDS_WFS_END ((void *) 0x1UL) |
16aa9ee8 DG |
41 | #define CDS_WFS_ADAPT_ATTEMPTS 10 /* Retry if being set */ |
42 | #define CDS_WFS_WAIT 10 /* Wait 10 ms if being set */ | |
294d3396 | 43 | |
edac6b69 MD |
44 | /* |
45 | * Stack with wait-free push, blocking traversal. | |
46 | * | |
47 | * Stack implementing push, pop, pop_all operations, as well as iterator | |
48 | * on the stack head returned by pop_all. | |
49 | * | |
c97c6ce5 MD |
50 | * Wait-free operations: cds_wfs_push, __cds_wfs_pop_all, cds_wfs_empty, |
51 | * cds_wfs_first. | |
52 | * Blocking operations: cds_wfs_pop, cds_wfs_pop_all, cds_wfs_next, | |
53 | * iteration on stack head returned by pop_all. | |
edac6b69 MD |
54 | * |
55 | * Synchronization table: | |
56 | * | |
57 | * External synchronization techniques described in the API below is | |
58 | * required between pairs marked with "X". No external synchronization | |
59 | * required between pairs marked with "-". | |
60 | * | |
61 | * cds_wfs_push __cds_wfs_pop __cds_wfs_pop_all | |
62 | * cds_wfs_push - - - | |
63 | * __cds_wfs_pop - X X | |
64 | * __cds_wfs_pop_all - X - | |
65 | * | |
66 | * cds_wfs_pop and cds_wfs_pop_all use an internal mutex to provide | |
67 | * synchronization. | |
68 | */ | |
69 | ||
70 | /* | |
71 | * cds_wfs_node_init: initialize wait-free stack node. | |
72 | */ | |
756a0322 | 73 | static inline |
16aa9ee8 | 74 | void _cds_wfs_node_init(struct cds_wfs_node *node) |
294d3396 MD |
75 | { |
76 | node->next = NULL; | |
77 | } | |
78 | ||
edac6b69 | 79 | /* |
93aee570 MD |
80 | * cds_wfs_init: initialize wait-free stack. Pair with |
81 | * cds_wfs_destroy(). | |
edac6b69 | 82 | */ |
756a0322 | 83 | static inline |
16aa9ee8 | 84 | void _cds_wfs_init(struct cds_wfs_stack *s) |
294d3396 MD |
85 | { |
86 | int ret; | |
87 | ||
edac6b69 | 88 | s->head = CDS_WFS_END; |
294d3396 MD |
89 | ret = pthread_mutex_init(&s->lock, NULL); |
90 | assert(!ret); | |
91 | } | |
92 | ||
93aee570 MD |
93 | /* |
94 | * cds_wfs_destroy: destroy wait-free stack. Pair with | |
95 | * cds_wfs_init(). | |
96 | */ | |
97 | static inline | |
98 | void _cds_wfs_destroy(struct cds_wfs_stack *s) | |
99 | { | |
100 | int ret = pthread_mutex_destroy(&s->lock); | |
101 | assert(!ret); | |
102 | } | |
103 | ||
edac6b69 MD |
104 | static inline bool ___cds_wfs_end(void *node) |
105 | { | |
106 | return node == CDS_WFS_END; | |
107 | } | |
108 | ||
191098fc | 109 | /* |
edac6b69 MD |
110 | * cds_wfs_empty: return whether wait-free stack is empty. |
111 | * | |
112 | * No memory barrier is issued. No mutual exclusion is required. | |
113 | */ | |
114 | static inline bool _cds_wfs_empty(struct cds_wfs_stack *s) | |
115 | { | |
116 | return ___cds_wfs_end(CMM_LOAD_SHARED(s->head)); | |
117 | } | |
118 | ||
119 | /* | |
120 | * cds_wfs_push: push a node into the stack. | |
121 | * | |
122 | * Issues a full memory barrier before push. No mutual exclusion is | |
123 | * required. | |
124 | * | |
125 | * Returns 0 if the stack was empty prior to adding the node. | |
126 | * Returns non-zero otherwise. | |
191098fc | 127 | */ |
756a0322 | 128 | static inline |
191098fc | 129 | int _cds_wfs_push(struct cds_wfs_stack *s, struct cds_wfs_node *node) |
294d3396 | 130 | { |
edac6b69 | 131 | struct cds_wfs_head *old_head, *new_head; |
294d3396 MD |
132 | |
133 | assert(node->next == NULL); | |
edac6b69 | 134 | new_head = caa_container_of(node, struct cds_wfs_head, node); |
294d3396 | 135 | /* |
edac6b69 MD |
136 | * uatomic_xchg() implicit memory barrier orders earlier stores |
137 | * to node (setting it to NULL) before publication. | |
294d3396 | 138 | */ |
edac6b69 | 139 | old_head = uatomic_xchg(&s->head, new_head); |
294d3396 | 140 | /* |
edac6b69 MD |
141 | * At this point, dequeuers see a NULL node->next, they should |
142 | * busy-wait until node->next is set to old_head. | |
294d3396 | 143 | */ |
edac6b69 MD |
144 | CMM_STORE_SHARED(node->next, &old_head->node); |
145 | return !___cds_wfs_end(old_head); | |
294d3396 MD |
146 | } |
147 | ||
148 | /* | |
edac6b69 | 149 | * Waiting for push to complete enqueue and return the next node. |
294d3396 | 150 | */ |
edac6b69 | 151 | static inline struct cds_wfs_node * |
af67624d | 152 | ___cds_wfs_node_sync_next(struct cds_wfs_node *node, int blocking) |
294d3396 | 153 | { |
edac6b69 | 154 | struct cds_wfs_node *next; |
294d3396 MD |
155 | int attempt = 0; |
156 | ||
294d3396 MD |
157 | /* |
158 | * Adaptative busy-looping waiting for push to complete. | |
159 | */ | |
edac6b69 | 160 | while ((next = CMM_LOAD_SHARED(node->next)) == NULL) { |
af67624d MD |
161 | if (!blocking) |
162 | return CDS_WFS_WOULDBLOCK; | |
16aa9ee8 DG |
163 | if (++attempt >= CDS_WFS_ADAPT_ATTEMPTS) { |
164 | poll(NULL, 0, CDS_WFS_WAIT); /* Wait for 10ms */ | |
294d3396 | 165 | attempt = 0; |
edac6b69 | 166 | } else { |
06f22bdb | 167 | caa_cpu_relax(); |
edac6b69 | 168 | } |
294d3396 | 169 | } |
edac6b69 MD |
170 | |
171 | return next; | |
294d3396 MD |
172 | } |
173 | ||
af67624d MD |
174 | static inline |
175 | struct cds_wfs_node * | |
c8975b94 | 176 | ___cds_wfs_pop(struct cds_wfs_stack *s, int *state, int blocking) |
af67624d MD |
177 | { |
178 | struct cds_wfs_head *head, *new_head; | |
179 | struct cds_wfs_node *next; | |
180 | ||
c8975b94 MD |
181 | if (state) |
182 | *state = 0; | |
af67624d MD |
183 | for (;;) { |
184 | head = CMM_LOAD_SHARED(s->head); | |
c8975b94 | 185 | if (___cds_wfs_end(head)) { |
af67624d | 186 | return NULL; |
c8975b94 | 187 | } |
af67624d | 188 | next = ___cds_wfs_node_sync_next(&head->node, blocking); |
c8975b94 | 189 | if (!blocking && next == CDS_WFS_WOULDBLOCK) { |
af67624d | 190 | return CDS_WFS_WOULDBLOCK; |
c8975b94 | 191 | } |
af67624d | 192 | new_head = caa_container_of(next, struct cds_wfs_head, node); |
c8975b94 MD |
193 | if (uatomic_cmpxchg(&s->head, head, new_head) == head) { |
194 | if (state && ___cds_wfs_end(new_head)) | |
195 | *state |= CDS_WFS_STATE_LAST; | |
af67624d | 196 | return &head->node; |
c8975b94 MD |
197 | } |
198 | if (!blocking) { | |
af67624d | 199 | return CDS_WFS_WOULDBLOCK; |
c8975b94 | 200 | } |
af67624d MD |
201 | /* busy-loop if head changed under us */ |
202 | } | |
203 | } | |
204 | ||
edac6b69 | 205 | /* |
c8975b94 | 206 | * __cds_wfs_pop_with_state_blocking: pop a node from the stack, with state. |
edac6b69 MD |
207 | * |
208 | * Returns NULL if stack is empty. | |
209 | * | |
210 | * __cds_wfs_pop_blocking needs to be synchronized using one of the | |
211 | * following techniques: | |
212 | * | |
213 | * 1) Calling __cds_wfs_pop_blocking under rcu read lock critical | |
214 | * section. The caller must wait for a grace period to pass before | |
215 | * freeing the returned node or modifying the cds_wfs_node structure. | |
216 | * 2) Using mutual exclusion (e.g. mutexes) to protect | |
217 | * __cds_wfs_pop_blocking and __cds_wfs_pop_all callers. | |
218 | * 3) Ensuring that only ONE thread can call __cds_wfs_pop_blocking() | |
219 | * and __cds_wfs_pop_all(). (multi-provider/single-consumer scheme). | |
c8975b94 MD |
220 | * |
221 | * "state" saves state flags atomically sampled with pop operation. | |
edac6b69 | 222 | */ |
c8975b94 MD |
223 | static inline |
224 | struct cds_wfs_node * | |
225 | ___cds_wfs_pop_with_state_blocking(struct cds_wfs_stack *s, int *state) | |
226 | { | |
227 | return ___cds_wfs_pop(s, state, 1); | |
228 | } | |
229 | ||
756a0322 | 230 | static inline |
16aa9ee8 | 231 | struct cds_wfs_node * |
edac6b69 MD |
232 | ___cds_wfs_pop_blocking(struct cds_wfs_stack *s) |
233 | { | |
c8975b94 MD |
234 | return ___cds_wfs_pop_with_state_blocking(s, NULL); |
235 | } | |
236 | ||
237 | /* | |
238 | * __cds_wfs_pop_with_state_nonblocking: pop a node from the stack. | |
239 | * | |
240 | * Same as __cds_wfs_pop_with_state_blocking, but returns | |
241 | * CDS_WFS_WOULDBLOCK if it needs to block. | |
242 | * | |
243 | * "state" saves state flags atomically sampled with pop operation. | |
244 | */ | |
245 | static inline | |
246 | struct cds_wfs_node * | |
247 | ___cds_wfs_pop_with_state_nonblocking(struct cds_wfs_stack *s, int *state) | |
248 | { | |
249 | return ___cds_wfs_pop(s, state, 0); | |
af67624d | 250 | } |
edac6b69 | 251 | |
af67624d MD |
252 | /* |
253 | * __cds_wfs_pop_nonblocking: pop a node from the stack. | |
254 | * | |
255 | * Same as __cds_wfs_pop_blocking, but returns CDS_WFS_WOULDBLOCK if | |
256 | * it needs to block. | |
257 | */ | |
258 | static inline | |
259 | struct cds_wfs_node * | |
260 | ___cds_wfs_pop_nonblocking(struct cds_wfs_stack *s) | |
261 | { | |
c8975b94 | 262 | return ___cds_wfs_pop_with_state_nonblocking(s, NULL); |
edac6b69 MD |
263 | } |
264 | ||
265 | /* | |
266 | * __cds_wfs_pop_all: pop all nodes from a stack. | |
267 | * | |
268 | * __cds_wfs_pop_all does not require any synchronization with other | |
269 | * push, nor with other __cds_wfs_pop_all, but requires synchronization | |
270 | * matching the technique used to synchronize __cds_wfs_pop_blocking: | |
271 | * | |
272 | * 1) If __cds_wfs_pop_blocking is called under rcu read lock critical | |
273 | * section, both __cds_wfs_pop_blocking and cds_wfs_pop_all callers | |
274 | * must wait for a grace period to pass before freeing the returned | |
275 | * node or modifying the cds_wfs_node structure. However, no RCU | |
276 | * read-side critical section is needed around __cds_wfs_pop_all. | |
277 | * 2) Using mutual exclusion (e.g. mutexes) to protect | |
278 | * __cds_wfs_pop_blocking and __cds_wfs_pop_all callers. | |
279 | * 3) Ensuring that only ONE thread can call __cds_wfs_pop_blocking() | |
280 | * and __cds_wfs_pop_all(). (multi-provider/single-consumer scheme). | |
281 | */ | |
282 | static inline | |
283 | struct cds_wfs_head * | |
284 | ___cds_wfs_pop_all(struct cds_wfs_stack *s) | |
285 | { | |
286 | struct cds_wfs_head *head; | |
287 | ||
288 | /* | |
289 | * Implicit memory barrier after uatomic_xchg() matches implicit | |
290 | * memory barrier before uatomic_xchg() in cds_wfs_push. It | |
291 | * ensures that all nodes of the returned list are consistent. | |
292 | * There is no need to issue memory barriers when iterating on | |
293 | * the returned list, because the full memory barrier issued | |
294 | * prior to each uatomic_cmpxchg, which each write to head, are | |
295 | * taking care to order writes to each node prior to the full | |
296 | * memory barrier after this uatomic_xchg(). | |
297 | */ | |
298 | head = uatomic_xchg(&s->head, CDS_WFS_END); | |
299 | if (___cds_wfs_end(head)) | |
300 | return NULL; | |
301 | return head; | |
302 | } | |
303 | ||
304 | /* | |
305 | * cds_wfs_pop_lock: lock stack pop-protection mutex. | |
306 | */ | |
307 | static inline void _cds_wfs_pop_lock(struct cds_wfs_stack *s) | |
294d3396 | 308 | { |
294d3396 MD |
309 | int ret; |
310 | ||
311 | ret = pthread_mutex_lock(&s->lock); | |
312 | assert(!ret); | |
edac6b69 MD |
313 | } |
314 | ||
315 | /* | |
316 | * cds_wfs_pop_unlock: unlock stack pop-protection mutex. | |
317 | */ | |
318 | static inline void _cds_wfs_pop_unlock(struct cds_wfs_stack *s) | |
319 | { | |
320 | int ret; | |
321 | ||
294d3396 MD |
322 | ret = pthread_mutex_unlock(&s->lock); |
323 | assert(!ret); | |
edac6b69 MD |
324 | } |
325 | ||
326 | /* | |
c8975b94 | 327 | * Call __cds_wfs_pop_with_state_blocking with an internal pop mutex held. |
edac6b69 MD |
328 | */ |
329 | static inline | |
330 | struct cds_wfs_node * | |
c8975b94 | 331 | _cds_wfs_pop_with_state_blocking(struct cds_wfs_stack *s, int *state) |
edac6b69 MD |
332 | { |
333 | struct cds_wfs_node *retnode; | |
334 | ||
335 | _cds_wfs_pop_lock(s); | |
c8975b94 | 336 | retnode = ___cds_wfs_pop_with_state_blocking(s, state); |
edac6b69 | 337 | _cds_wfs_pop_unlock(s); |
294d3396 MD |
338 | return retnode; |
339 | } | |
340 | ||
c8975b94 MD |
341 | /* |
342 | * Call _cds_wfs_pop_with_state_blocking without saving any state. | |
343 | */ | |
344 | static inline | |
345 | struct cds_wfs_node * | |
346 | _cds_wfs_pop_blocking(struct cds_wfs_stack *s) | |
347 | { | |
348 | return _cds_wfs_pop_with_state_blocking(s, NULL); | |
349 | } | |
350 | ||
edac6b69 MD |
351 | /* |
352 | * Call __cds_wfs_pop_all with an internal pop mutex held. | |
353 | */ | |
354 | static inline | |
355 | struct cds_wfs_head * | |
356 | _cds_wfs_pop_all_blocking(struct cds_wfs_stack *s) | |
357 | { | |
358 | struct cds_wfs_head *rethead; | |
359 | ||
360 | _cds_wfs_pop_lock(s); | |
361 | rethead = ___cds_wfs_pop_all(s); | |
362 | _cds_wfs_pop_unlock(s); | |
363 | return rethead; | |
364 | } | |
365 | ||
366 | /* | |
c7ba06ba | 367 | * cds_wfs_first: get first node of a popped stack. |
edac6b69 MD |
368 | * |
369 | * Content written into the node before enqueue is guaranteed to be | |
370 | * consistent, but no other memory ordering is ensured. | |
371 | * | |
372 | * Used by for-like iteration macros in urcu/wfstack.h: | |
373 | * cds_wfs_for_each_blocking() | |
374 | * cds_wfs_for_each_blocking_safe() | |
8af2956c MD |
375 | * |
376 | * Returns NULL if popped stack is empty, top stack node otherwise. | |
edac6b69 MD |
377 | */ |
378 | static inline struct cds_wfs_node * | |
c7ba06ba | 379 | _cds_wfs_first(struct cds_wfs_head *head) |
edac6b69 MD |
380 | { |
381 | if (___cds_wfs_end(head)) | |
382 | return NULL; | |
383 | return &head->node; | |
384 | } | |
385 | ||
af67624d MD |
386 | static inline struct cds_wfs_node * |
387 | ___cds_wfs_next(struct cds_wfs_node *node, int blocking) | |
388 | { | |
389 | struct cds_wfs_node *next; | |
390 | ||
391 | next = ___cds_wfs_node_sync_next(node, blocking); | |
392 | /* | |
393 | * CDS_WFS_WOULDBLOCK != CSD_WFS_END, so we can check for end | |
394 | * even if ___cds_wfs_node_sync_next returns CDS_WFS_WOULDBLOCK, | |
395 | * and still return CDS_WFS_WOULDBLOCK. | |
396 | */ | |
397 | if (___cds_wfs_end(next)) | |
398 | return NULL; | |
399 | return next; | |
400 | } | |
401 | ||
edac6b69 MD |
402 | /* |
403 | * cds_wfs_next_blocking: get next node of a popped stack. | |
404 | * | |
405 | * Content written into the node before enqueue is guaranteed to be | |
406 | * consistent, but no other memory ordering is ensured. | |
407 | * | |
408 | * Used by for-like iteration macros in urcu/wfstack.h: | |
409 | * cds_wfs_for_each_blocking() | |
410 | * cds_wfs_for_each_blocking_safe() | |
8af2956c MD |
411 | * |
412 | * Returns NULL if reached end of popped stack, non-NULL next stack | |
413 | * node otherwise. | |
edac6b69 MD |
414 | */ |
415 | static inline struct cds_wfs_node * | |
416 | _cds_wfs_next_blocking(struct cds_wfs_node *node) | |
417 | { | |
af67624d MD |
418 | return ___cds_wfs_next(node, 1); |
419 | } | |
edac6b69 | 420 | |
af67624d MD |
421 | |
422 | /* | |
423 | * cds_wfs_next_nonblocking: get next node of a popped stack. | |
424 | * | |
425 | * Same as cds_wfs_next_blocking, but returns CDS_WFS_WOULDBLOCK if it | |
426 | * needs to block. | |
427 | */ | |
428 | static inline struct cds_wfs_node * | |
429 | _cds_wfs_next_nonblocking(struct cds_wfs_node *node) | |
430 | { | |
431 | return ___cds_wfs_next(node, 0); | |
edac6b69 MD |
432 | } |
433 | ||
294d3396 MD |
434 | #ifdef __cplusplus |
435 | } | |
436 | #endif | |
437 | ||
edac6b69 | 438 | #endif /* _URCU_STATIC_WFSTACK_H */ |