Update wfstack copyright notice
[urcu.git] / urcu / static / wfstack.h
1 #ifndef _URCU_STATIC_WFSTACK_H
2 #define _URCU_STATIC_WFSTACK_H
3
4 /*
5 * urcu/static/wfstack.h
6 *
7 * Userspace RCU library - Stack with with wait-free push, blocking traversal.
8 *
9 * TO BE INCLUDED ONLY IN LGPL-COMPATIBLE CODE. See urcu/wfstack.h for
10 * linking dynamically with the userspace rcu library.
11 *
12 * Copyright 2010-2012 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
13 *
14 * This library is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU Lesser General Public
16 * License as published by the Free Software Foundation; either
17 * version 2.1 of the License, or (at your option) any later version.
18 *
19 * This library is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 * Lesser General Public License for more details.
23 *
24 * You should have received a copy of the GNU Lesser General Public
25 * License along with this library; if not, write to the Free Software
26 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 */
28
29 #include <pthread.h>
30 #include <assert.h>
31 #include <poll.h>
32 #include <stdbool.h>
33 #include <urcu/compiler.h>
34 #include <urcu/uatomic.h>
35
36 #ifdef __cplusplus
37 extern "C" {
38 #endif
39
40 #define CDS_WFS_END ((void *) 0x1UL)
41 #define CDS_WFS_ADAPT_ATTEMPTS 10 /* Retry if being set */
42 #define CDS_WFS_WAIT 10 /* Wait 10 ms if being set */
43
44 /*
45 * Stack with wait-free push, blocking traversal.
46 *
47 * Stack implementing push, pop, pop_all operations, as well as iterator
48 * on the stack head returned by pop_all.
49 *
50 * Wait-free operations: cds_wfs_push, __cds_wfs_pop_all.
51 * Blocking operations: cds_wfs_pop, cds_wfs_pop_all, iteration on stack
52 * head returned by pop_all.
53 *
54 * Synchronization table:
55 *
56 * External synchronization techniques described in the API below is
57 * required between pairs marked with "X". No external synchronization
58 * required between pairs marked with "-".
59 *
60 * cds_wfs_push __cds_wfs_pop __cds_wfs_pop_all
61 * cds_wfs_push - - -
62 * __cds_wfs_pop - X X
63 * __cds_wfs_pop_all - X -
64 *
65 * cds_wfs_pop and cds_wfs_pop_all use an internal mutex to provide
66 * synchronization.
67 */
68
69 /*
70 * cds_wfs_node_init: initialize wait-free stack node.
71 */
72 static inline
73 void _cds_wfs_node_init(struct cds_wfs_node *node)
74 {
75 node->next = NULL;
76 }
77
78 /*
79 * cds_wfs_init: initialize wait-free stack.
80 */
81 static inline
82 void _cds_wfs_init(struct cds_wfs_stack *s)
83 {
84 int ret;
85
86 s->head = CDS_WFS_END;
87 ret = pthread_mutex_init(&s->lock, NULL);
88 assert(!ret);
89 }
90
91 static inline bool ___cds_wfs_end(void *node)
92 {
93 return node == CDS_WFS_END;
94 }
95
96 /*
97 * cds_wfs_empty: return whether wait-free stack is empty.
98 *
99 * No memory barrier is issued. No mutual exclusion is required.
100 */
101 static inline bool _cds_wfs_empty(struct cds_wfs_stack *s)
102 {
103 return ___cds_wfs_end(CMM_LOAD_SHARED(s->head));
104 }
105
106 /*
107 * cds_wfs_push: push a node into the stack.
108 *
109 * Issues a full memory barrier before push. No mutual exclusion is
110 * required.
111 *
112 * Returns 0 if the stack was empty prior to adding the node.
113 * Returns non-zero otherwise.
114 */
115 static inline
116 int _cds_wfs_push(struct cds_wfs_stack *s, struct cds_wfs_node *node)
117 {
118 struct cds_wfs_head *old_head, *new_head;
119
120 assert(node->next == NULL);
121 new_head = caa_container_of(node, struct cds_wfs_head, node);
122 /*
123 * uatomic_xchg() implicit memory barrier orders earlier stores
124 * to node (setting it to NULL) before publication.
125 */
126 old_head = uatomic_xchg(&s->head, new_head);
127 /*
128 * At this point, dequeuers see a NULL node->next, they should
129 * busy-wait until node->next is set to old_head.
130 */
131 CMM_STORE_SHARED(node->next, &old_head->node);
132 return !___cds_wfs_end(old_head);
133 }
134
135 /*
136 * Waiting for push to complete enqueue and return the next node.
137 */
138 static inline struct cds_wfs_node *
139 ___cds_wfs_node_sync_next(struct cds_wfs_node *node)
140 {
141 struct cds_wfs_node *next;
142 int attempt = 0;
143
144 /*
145 * Adaptative busy-looping waiting for push to complete.
146 */
147 while ((next = CMM_LOAD_SHARED(node->next)) == NULL) {
148 if (++attempt >= CDS_WFS_ADAPT_ATTEMPTS) {
149 poll(NULL, 0, CDS_WFS_WAIT); /* Wait for 10ms */
150 attempt = 0;
151 } else {
152 caa_cpu_relax();
153 }
154 }
155
156 return next;
157 }
158
159 /*
160 * __cds_wfs_pop_blocking: pop a node from the stack.
161 *
162 * Returns NULL if stack is empty.
163 *
164 * __cds_wfs_pop_blocking needs to be synchronized using one of the
165 * following techniques:
166 *
167 * 1) Calling __cds_wfs_pop_blocking under rcu read lock critical
168 * section. The caller must wait for a grace period to pass before
169 * freeing the returned node or modifying the cds_wfs_node structure.
170 * 2) Using mutual exclusion (e.g. mutexes) to protect
171 * __cds_wfs_pop_blocking and __cds_wfs_pop_all callers.
172 * 3) Ensuring that only ONE thread can call __cds_wfs_pop_blocking()
173 * and __cds_wfs_pop_all(). (multi-provider/single-consumer scheme).
174 */
175 static inline
176 struct cds_wfs_node *
177 ___cds_wfs_pop_blocking(struct cds_wfs_stack *s)
178 {
179 struct cds_wfs_head *head, *new_head;
180 struct cds_wfs_node *next;
181
182 for (;;) {
183 head = CMM_LOAD_SHARED(s->head);
184 if (___cds_wfs_end(head))
185 return NULL;
186 next = ___cds_wfs_node_sync_next(&head->node);
187 new_head = caa_container_of(next, struct cds_wfs_head, node);
188 if (uatomic_cmpxchg(&s->head, head, new_head) == head)
189 return &head->node;
190 /* busy-loop if head changed under us */
191 }
192 }
193
194 /*
195 * __cds_wfs_pop_all: pop all nodes from a stack.
196 *
197 * __cds_wfs_pop_all does not require any synchronization with other
198 * push, nor with other __cds_wfs_pop_all, but requires synchronization
199 * matching the technique used to synchronize __cds_wfs_pop_blocking:
200 *
201 * 1) If __cds_wfs_pop_blocking is called under rcu read lock critical
202 * section, both __cds_wfs_pop_blocking and cds_wfs_pop_all callers
203 * must wait for a grace period to pass before freeing the returned
204 * node or modifying the cds_wfs_node structure. However, no RCU
205 * read-side critical section is needed around __cds_wfs_pop_all.
206 * 2) Using mutual exclusion (e.g. mutexes) to protect
207 * __cds_wfs_pop_blocking and __cds_wfs_pop_all callers.
208 * 3) Ensuring that only ONE thread can call __cds_wfs_pop_blocking()
209 * and __cds_wfs_pop_all(). (multi-provider/single-consumer scheme).
210 */
211 static inline
212 struct cds_wfs_head *
213 ___cds_wfs_pop_all(struct cds_wfs_stack *s)
214 {
215 struct cds_wfs_head *head;
216
217 /*
218 * Implicit memory barrier after uatomic_xchg() matches implicit
219 * memory barrier before uatomic_xchg() in cds_wfs_push. It
220 * ensures that all nodes of the returned list are consistent.
221 * There is no need to issue memory barriers when iterating on
222 * the returned list, because the full memory barrier issued
223 * prior to each uatomic_cmpxchg, which each write to head, are
224 * taking care to order writes to each node prior to the full
225 * memory barrier after this uatomic_xchg().
226 */
227 head = uatomic_xchg(&s->head, CDS_WFS_END);
228 if (___cds_wfs_end(head))
229 return NULL;
230 return head;
231 }
232
233 /*
234 * cds_wfs_pop_lock: lock stack pop-protection mutex.
235 */
236 static inline void _cds_wfs_pop_lock(struct cds_wfs_stack *s)
237 {
238 int ret;
239
240 ret = pthread_mutex_lock(&s->lock);
241 assert(!ret);
242 }
243
244 /*
245 * cds_wfs_pop_unlock: unlock stack pop-protection mutex.
246 */
247 static inline void _cds_wfs_pop_unlock(struct cds_wfs_stack *s)
248 {
249 int ret;
250
251 ret = pthread_mutex_unlock(&s->lock);
252 assert(!ret);
253 }
254
255 /*
256 * Call __cds_wfs_pop_blocking with an internal pop mutex held.
257 */
258 static inline
259 struct cds_wfs_node *
260 _cds_wfs_pop_blocking(struct cds_wfs_stack *s)
261 {
262 struct cds_wfs_node *retnode;
263
264 _cds_wfs_pop_lock(s);
265 retnode = ___cds_wfs_pop_blocking(s);
266 _cds_wfs_pop_unlock(s);
267 return retnode;
268 }
269
270 /*
271 * Call __cds_wfs_pop_all with an internal pop mutex held.
272 */
273 static inline
274 struct cds_wfs_head *
275 _cds_wfs_pop_all_blocking(struct cds_wfs_stack *s)
276 {
277 struct cds_wfs_head *rethead;
278
279 _cds_wfs_pop_lock(s);
280 rethead = ___cds_wfs_pop_all(s);
281 _cds_wfs_pop_unlock(s);
282 return rethead;
283 }
284
285 /*
286 * cds_wfs_first_blocking: get first node of a popped stack.
287 *
288 * Content written into the node before enqueue is guaranteed to be
289 * consistent, but no other memory ordering is ensured.
290 *
291 * Used by for-like iteration macros in urcu/wfstack.h:
292 * cds_wfs_for_each_blocking()
293 * cds_wfs_for_each_blocking_safe()
294 */
295 static inline struct cds_wfs_node *
296 _cds_wfs_first_blocking(struct cds_wfs_head *head)
297 {
298 if (___cds_wfs_end(head))
299 return NULL;
300 return &head->node;
301 }
302
303 /*
304 * cds_wfs_next_blocking: get next node of a popped stack.
305 *
306 * Content written into the node before enqueue is guaranteed to be
307 * consistent, but no other memory ordering is ensured.
308 *
309 * Used by for-like iteration macros in urcu/wfstack.h:
310 * cds_wfs_for_each_blocking()
311 * cds_wfs_for_each_blocking_safe()
312 */
313 static inline struct cds_wfs_node *
314 _cds_wfs_next_blocking(struct cds_wfs_node *node)
315 {
316 struct cds_wfs_node *next;
317
318 next = ___cds_wfs_node_sync_next(node);
319 if (___cds_wfs_end(next))
320 return NULL;
321 return next;
322 }
323
324 #ifdef __cplusplus
325 }
326 #endif
327
328 #endif /* _URCU_STATIC_WFSTACK_H */
This page took 0.035458 seconds and 5 git commands to generate.