lfstack: relax constraints on node re-use
[urcu.git] / urcu / static / lfstack.h
1 #ifndef _URCU_STATIC_LFSTACK_H
2 #define _URCU_STATIC_LFSTACK_H
3
4 /*
5 * urcu/static/lfstack.h
6 *
7 * Userspace RCU library - Lock-Free Stack
8 *
9 * Copyright 2010-2012 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
10 *
11 * TO BE INCLUDED ONLY IN LGPL-COMPATIBLE CODE. See urcu/lfstack.h for
12 * linking dynamically with the userspace rcu library.
13 *
14 * This library is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU Lesser General Public
16 * License as published by the Free Software Foundation; either
17 * version 2.1 of the License, or (at your option) any later version.
18 *
19 * This library is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 * Lesser General Public License for more details.
23 *
24 * You should have received a copy of the GNU Lesser General Public
25 * License along with this library; if not, write to the Free Software
26 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 */
28
29 #include <stdbool.h>
30 #include <pthread.h>
31 #include <assert.h>
32 #include <urcu/uatomic.h>
33 #include <urcu-pointer.h>
34
35 #ifdef __cplusplus
36 extern "C" {
37 #endif
38
39 /*
40 * Lock-free stack.
41 *
42 * Stack implementing push, pop, pop_all operations, as well as iterator
43 * on the stack head returned by pop_all.
44 *
45 * Synchronization table:
46 *
47 * External synchronization techniques described in the API below is
48 * required between pairs marked with "X". No external synchronization
49 * required between pairs marked with "-".
50 *
51 * cds_lfs_push __cds_lfs_pop __cds_lfs_pop_all
52 * cds_lfs_push - - -
53 * __cds_lfs_pop - X X
54 * __cds_lfs_pop_all - X -
55 *
56 * cds_lfs_pop_blocking and cds_lfs_pop_all_blocking use an internal
57 * mutex to provide synchronization.
58 */
59
60 /*
61 * cds_lfs_node_init: initialize lock-free stack node.
62 */
63 static inline
64 void _cds_lfs_node_init(struct cds_lfs_node *node)
65 {
66 }
67
68 /*
69 * cds_lfs_init: initialize lock-free stack.
70 */
71 static inline
72 void _cds_lfs_init(struct cds_lfs_stack *s)
73 {
74 int ret;
75
76 s->head = NULL;
77 ret = pthread_mutex_init(&s->lock, NULL);
78 assert(!ret);
79 }
80
81 /*
82 * ___cds_lfs_init: initialize lock-free stack.
83 */
84 static inline
85 void ___cds_lfs_init(struct __cds_lfs_stack *s)
86 {
87 s->head = NULL;
88 }
89
90 static inline
91 bool ___cds_lfs_empty_head(struct cds_lfs_head *head)
92 {
93 return head == NULL;
94 }
95
96 /*
97 * cds_lfs_empty: return whether lock-free stack is empty.
98 *
99 * No memory barrier is issued. No mutual exclusion is required.
100 */
101 static inline
102 bool _cds_lfs_empty(cds_lfs_stack_ptr_t s)
103 {
104 return ___cds_lfs_empty_head(CMM_LOAD_SHARED(s._s->head));
105 }
106
107 /*
108 * cds_lfs_push: push a node into the stack.
109 *
110 * Does not require any synchronization with other push nor pop.
111 *
112 * Lock-free stack push is not subject to ABA problem, so no need to
113 * take the RCU read-side lock. Even if "head" changes between two
114 * uatomic_cmpxchg() invocations here (being popped, and then pushed
115 * again by one or more concurrent threads), the second
116 * uatomic_cmpxchg() invocation only cares about pushing a new entry at
117 * the head of the stack, ensuring consistency by making sure the new
118 * node->next is the same pointer value as the value replaced as head.
119 * It does not care about the content of the actual next node, so it can
120 * very well be reallocated between the two uatomic_cmpxchg().
121 *
122 * We take the approach of expecting the stack to be usually empty, so
123 * we first try an initial uatomic_cmpxchg() on a NULL old_head, and
124 * retry if the old head was non-NULL (the value read by the first
125 * uatomic_cmpxchg() is used as old head for the following loop). The
126 * upside of this scheme is to minimize the amount of cacheline traffic,
127 * always performing an exclusive cacheline access, rather than doing
128 * non-exclusive followed by exclusive cacheline access (which would be
129 * required if we first read the old head value). This design decision
130 * might be revisited after more thorough benchmarking on various
131 * platforms.
132 *
133 * Returns 0 if the stack was empty prior to adding the node.
134 * Returns non-zero otherwise.
135 */
136 static inline
137 bool _cds_lfs_push(cds_lfs_stack_ptr_t u_s,
138 struct cds_lfs_node *node)
139 {
140 struct __cds_lfs_stack *s = u_s._s;
141 struct cds_lfs_head *head = NULL;
142 struct cds_lfs_head *new_head =
143 caa_container_of(node, struct cds_lfs_head, node);
144
145 for (;;) {
146 struct cds_lfs_head *old_head = head;
147
148 /*
149 * node->next is still private at this point, no need to
150 * perform a _CMM_STORE_SHARED().
151 */
152 node->next = &head->node;
153 /*
154 * uatomic_cmpxchg() implicit memory barrier orders earlier
155 * stores to node before publication.
156 */
157 head = uatomic_cmpxchg(&s->head, old_head, new_head);
158 if (old_head == head)
159 break;
160 }
161 return !___cds_lfs_empty_head(head);
162 }
163
164 /*
165 * __cds_lfs_pop: pop a node from the stack.
166 *
167 * Returns NULL if stack is empty.
168 *
169 * __cds_lfs_pop needs to be synchronized using one of the following
170 * techniques:
171 *
172 * 1) Calling __cds_lfs_pop under rcu read lock critical section.
173 * Both __cds_lfs_pop and __cds_lfs_pop_all callers must wait for a
174 * grace period to pass before freeing the returned node or pushing
175 * the node back into the stack. It is valid to overwrite the content
176 * of cds_lfs_node immediately after __cds_lfs_pop and
177 * __cds_lfs_pop_all. No RCU read-side critical section is needed
178 * around __cds_lfs_pop_all.
179 * 2) Using mutual exclusion (e.g. mutexes) to protect __cds_lfs_pop
180 * and __cds_lfs_pop_all callers.
181 * 3) Ensuring that only ONE thread can call __cds_lfs_pop() and
182 * __cds_lfs_pop_all(). (multi-provider/single-consumer scheme).
183 */
184 static inline
185 struct cds_lfs_node *___cds_lfs_pop(cds_lfs_stack_ptr_t u_s)
186 {
187 struct __cds_lfs_stack *s = u_s._s;
188
189 for (;;) {
190 struct cds_lfs_head *head, *next_head;
191 struct cds_lfs_node *next;
192
193 head = _CMM_LOAD_SHARED(s->head);
194 if (___cds_lfs_empty_head(head))
195 return NULL; /* Empty stack */
196
197 /*
198 * Read head before head->next. Matches the implicit
199 * memory barrier before uatomic_cmpxchg() in
200 * cds_lfs_push.
201 */
202 cmm_smp_read_barrier_depends();
203 next = _CMM_LOAD_SHARED(head->node.next);
204 next_head = caa_container_of(next,
205 struct cds_lfs_head, node);
206 if (uatomic_cmpxchg(&s->head, head, next_head) == head)
207 return &head->node;
208 /* busy-loop if head changed under us */
209 }
210 }
211
212 /*
213 * __cds_lfs_pop_all: pop all nodes from a stack.
214 *
215 * __cds_lfs_pop_all does not require any synchronization with other
216 * push, nor with other __cds_lfs_pop_all, but requires synchronization
217 * matching the technique used to synchronize __cds_lfs_pop:
218 *
219 * 1) If __cds_lfs_pop is called under rcu read lock critical section,
220 * both __cds_lfs_pop and __cds_lfs_pop_all callers must wait for a
221 * grace period to pass before freeing the returned node or pushing
222 * the node back into the stack. It is valid to overwrite the content
223 * of cds_lfs_node immediately after __cds_lfs_pop and
224 * __cds_lfs_pop_all. No RCU read-side critical section is needed
225 * around __cds_lfs_pop_all.
226 * 2) Using mutual exclusion (e.g. mutexes) to protect __cds_lfs_pop and
227 * __cds_lfs_pop_all callers.
228 * 3) Ensuring that only ONE thread can call __cds_lfs_pop() and
229 * __cds_lfs_pop_all(). (multi-provider/single-consumer scheme).
230 */
231 static inline
232 struct cds_lfs_head *___cds_lfs_pop_all(cds_lfs_stack_ptr_t u_s)
233 {
234 struct __cds_lfs_stack *s = u_s._s;
235
236 /*
237 * Implicit memory barrier after uatomic_xchg() matches implicit
238 * memory barrier before uatomic_cmpxchg() in cds_lfs_push. It
239 * ensures that all nodes of the returned list are consistent.
240 * There is no need to issue memory barriers when iterating on
241 * the returned list, because the full memory barrier issued
242 * prior to each uatomic_cmpxchg, which each write to head, are
243 * taking care to order writes to each node prior to the full
244 * memory barrier after this uatomic_xchg().
245 */
246 return uatomic_xchg(&s->head, NULL);
247 }
248
249 /*
250 * cds_lfs_pop_lock: lock stack pop-protection mutex.
251 */
252 static inline void _cds_lfs_pop_lock(struct cds_lfs_stack *s)
253 {
254 int ret;
255
256 ret = pthread_mutex_lock(&s->lock);
257 assert(!ret);
258 }
259
260 /*
261 * cds_lfs_pop_unlock: unlock stack pop-protection mutex.
262 */
263 static inline void _cds_lfs_pop_unlock(struct cds_lfs_stack *s)
264 {
265 int ret;
266
267 ret = pthread_mutex_unlock(&s->lock);
268 assert(!ret);
269 }
270
271 /*
272 * Call __cds_lfs_pop with an internal pop mutex held.
273 */
274 static inline
275 struct cds_lfs_node *
276 _cds_lfs_pop_blocking(struct cds_lfs_stack *s)
277 {
278 struct cds_lfs_node *retnode;
279
280 _cds_lfs_pop_lock(s);
281 retnode = ___cds_lfs_pop(s);
282 _cds_lfs_pop_unlock(s);
283 return retnode;
284 }
285
286 /*
287 * Call __cds_lfs_pop_all with an internal pop mutex held.
288 */
289 static inline
290 struct cds_lfs_head *
291 _cds_lfs_pop_all_blocking(struct cds_lfs_stack *s)
292 {
293 struct cds_lfs_head *rethead;
294
295 _cds_lfs_pop_lock(s);
296 rethead = ___cds_lfs_pop_all(s);
297 _cds_lfs_pop_unlock(s);
298 return rethead;
299 }
300
301 #ifdef __cplusplus
302 }
303 #endif
304
305 #endif /* _URCU_STATIC_LFSTACK_H */
This page took 0.0358 seconds and 5 git commands to generate.