Fix: lfstack reversed empty/non-empty return value
[userspace-rcu.git] / urcu / static / lfstack.h
1 #ifndef _URCU_STATIC_LFSTACK_H
2 #define _URCU_STATIC_LFSTACK_H
3
4 /*
5 * urcu/static/lfstack.h
6 *
7 * Userspace RCU library - Lock-Free Stack
8 *
9 * Copyright 2010-2012 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
10 *
11 * TO BE INCLUDED ONLY IN LGPL-COMPATIBLE CODE. See urcu/lfstack.h for
12 * linking dynamically with the userspace rcu library.
13 *
14 * This library is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU Lesser General Public
16 * License as published by the Free Software Foundation; either
17 * version 2.1 of the License, or (at your option) any later version.
18 *
19 * This library is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 * Lesser General Public License for more details.
23 *
24 * You should have received a copy of the GNU Lesser General Public
25 * License along with this library; if not, write to the Free Software
26 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 */
28
29 #include <stdbool.h>
30 #include <pthread.h>
31 #include <assert.h>
32 #include <urcu/uatomic.h>
33 #include <urcu-pointer.h>
34
35 #ifdef __cplusplus
36 extern "C" {
37 #endif
38
39 #define CDS_LFS_END NULL
40
41 /*
42 * Lock-free stack.
43 *
44 * Stack implementing push, pop, pop_all operations, as well as iterator
45 * on the stack head returned by pop_all.
46 *
47 * Synchronization table:
48 *
49 * External synchronization techniques described in the API below is
50 * required between pairs marked with "X". No external synchronization
51 * required between pairs marked with "-".
52 *
53 * cds_lfs_push __cds_lfs_pop __cds_lfs_pop_all
54 * cds_lfs_push - - -
55 * __cds_lfs_pop - X X
56 * __cds_lfs_pop_all - X -
57 *
58 * cds_lfs_pop_blocking and cds_lfs_pop_all_blocking use an internal
59 * mutex to provide synchronization.
60 */
61
62 /*
63 * cds_lfs_node_init: initialize lock-free stack node.
64 */
65 static inline
66 void _cds_lfs_node_init(struct cds_lfs_node *node)
67 {
68 }
69
70 /*
71 * cds_lfs_init: initialize lock-free stack.
72 */
73 static inline
74 void _cds_lfs_init(struct cds_lfs_stack *s)
75 {
76 int ret;
77
78 s->head = CDS_LFS_END;
79 ret = pthread_mutex_init(&s->lock, NULL);
80 assert(!ret);
81 }
82
83 /*
84 * ___cds_lfs_init: initialize lock-free stack.
85 */
86 static inline
87 void ___cds_lfs_init(struct __cds_lfs_stack *s)
88 {
89 s->head = CDS_LFS_END;
90 }
91
92 static inline
93 bool ___cds_lfs_empty_head(struct cds_lfs_head *head)
94 {
95 return head == CDS_LFS_END;
96 }
97
98 /*
99 * cds_lfs_empty: return whether lock-free stack is empty.
100 *
101 * No memory barrier is issued. No mutual exclusion is required.
102 */
103 static inline
104 bool _cds_lfs_empty(cds_lfs_stack_ptr_t s)
105 {
106 return ___cds_lfs_empty_head(CMM_LOAD_SHARED(s._s->head));
107 }
108
109 /*
110 * cds_lfs_push: push a node into the stack.
111 *
112 * Does not require any synchronization with other push nor pop.
113 *
114 * Lock-free stack push is not subject to ABA problem, so no need to
115 * take the RCU read-side lock. Even if "head" changes between two
116 * uatomic_cmpxchg() invocations here (being popped, and then pushed
117 * again by one or more concurrent threads), the second
118 * uatomic_cmpxchg() invocation only cares about pushing a new entry at
119 * the head of the stack, ensuring consistency by making sure the new
120 * node->next is the same pointer value as the value replaced as head.
121 * It does not care about the content of the actual next node, so it can
122 * very well be reallocated between the two uatomic_cmpxchg().
123 *
124 * We take the approach of expecting the stack to be usually empty, so
125 * we first try an initial uatomic_cmpxchg() on a NULL old_head, and
126 * retry if the old head was non-NULL (the value read by the first
127 * uatomic_cmpxchg() is used as old head for the following loop). The
128 * upside of this scheme is to minimize the amount of cacheline traffic,
129 * always performing an exclusive cacheline access, rather than doing
130 * non-exclusive followed by exclusive cacheline access (which would be
131 * required if we first read the old head value). This design decision
132 * might be revisited after more thorough benchmarking on various
133 * platforms.
134 *
135 * Returns 0 if the stack was empty prior to adding the node.
136 * Returns non-zero otherwise.
137 */
138 static inline
139 bool _cds_lfs_push(cds_lfs_stack_ptr_t u_s,
140 struct cds_lfs_node *node)
141 {
142 struct __cds_lfs_stack *s = u_s._s;
143 struct cds_lfs_head *head = CDS_LFS_END;
144 struct cds_lfs_head *new_head =
145 caa_container_of(node, struct cds_lfs_head, node);
146
147 for (;;) {
148 struct cds_lfs_head *old_head = head;
149
150 /*
151 * node->next is still private at this point, no need to
152 * perform a _CMM_STORE_SHARED().
153 */
154 node->next = &head->node;
155 /*
156 * uatomic_cmpxchg() implicit memory barrier orders earlier
157 * stores to node before publication.
158 */
159 head = uatomic_cmpxchg(&s->head, old_head, new_head);
160 if (old_head == head)
161 break;
162 }
163 return !___cds_lfs_empty_head(head);
164 }
165
166 /*
167 * __cds_lfs_pop: pop a node from the stack.
168 *
169 * Returns NULL if stack is empty.
170 *
171 * __cds_lfs_pop needs to be synchronized using one of the following
172 * techniques:
173 *
174 * 1) Calling __cds_lfs_pop under rcu read lock critical section. The
175 * caller must wait for a grace period to pass before freeing the
176 * returned node or modifying the cds_lfs_node structure.
177 * 2) Using mutual exclusion (e.g. mutexes) to protect __cds_lfs_pop
178 * and __cds_lfs_pop_all callers.
179 * 3) Ensuring that only ONE thread can call __cds_lfs_pop() and
180 * __cds_lfs_pop_all(). (multi-provider/single-consumer scheme).
181 */
182 static inline
183 struct cds_lfs_node *___cds_lfs_pop(cds_lfs_stack_ptr_t u_s)
184 {
185 struct __cds_lfs_stack *s = u_s._s;
186
187 for (;;) {
188 struct cds_lfs_head *head, *next_head;
189 struct cds_lfs_node *next;
190
191 head = _CMM_LOAD_SHARED(s->head);
192 if (___cds_lfs_empty_head(head))
193 return NULL; /* Empty stack */
194
195 /*
196 * Read head before head->next. Matches the implicit
197 * memory barrier before uatomic_cmpxchg() in
198 * cds_lfs_push.
199 */
200 cmm_smp_read_barrier_depends();
201 next = _CMM_LOAD_SHARED(head->node.next);
202 next_head = caa_container_of(next,
203 struct cds_lfs_head, node);
204 if (uatomic_cmpxchg(&s->head, head, next_head) == head)
205 return &head->node;
206 /* busy-loop if head changed under us */
207 }
208 }
209
210 /*
211 * __cds_lfs_pop_all: pop all nodes from a stack.
212 *
213 * __cds_lfs_pop_all does not require any synchronization with other
214 * push, nor with other __cds_lfs_pop_all, but requires synchronization
215 * matching the technique used to synchronize __cds_lfs_pop:
216 *
217 * 1) If __cds_lfs_pop is called under rcu read lock critical section,
218 * both __cds_lfs_pop and cds_lfs_pop_all callers must wait for a
219 * grace period to pass before freeing the returned node or modifying
220 * the cds_lfs_node structure. However, no RCU read-side critical
221 * section is needed around __cds_lfs_pop_all.
222 * 2) Using mutual exclusion (e.g. mutexes) to protect __cds_lfs_pop and
223 * __cds_lfs_pop_all callers.
224 * 3) Ensuring that only ONE thread can call __cds_lfs_pop() and
225 * __cds_lfs_pop_all(). (multi-provider/single-consumer scheme).
226 */
227 static inline
228 struct cds_lfs_head *___cds_lfs_pop_all(cds_lfs_stack_ptr_t u_s)
229 {
230 struct __cds_lfs_stack *s = u_s._s;
231
232 /*
233 * Implicit memory barrier after uatomic_xchg() matches implicit
234 * memory barrier before uatomic_cmpxchg() in cds_lfs_push. It
235 * ensures that all nodes of the returned list are consistent.
236 * There is no need to issue memory barriers when iterating on
237 * the returned list, because the full memory barrier issued
238 * prior to each uatomic_cmpxchg, which each write to head, are
239 * taking care to order writes to each node prior to the full
240 * memory barrier after this uatomic_xchg().
241 */
242 return uatomic_xchg(&s->head, CDS_LFS_END);
243 }
244
245 /*
246 * cds_lfs_pop_lock: lock stack pop-protection mutex.
247 */
248 static inline void _cds_lfs_pop_lock(struct cds_lfs_stack *s)
249 {
250 int ret;
251
252 ret = pthread_mutex_lock(&s->lock);
253 assert(!ret);
254 }
255
256 /*
257 * cds_lfs_pop_unlock: unlock stack pop-protection mutex.
258 */
259 static inline void _cds_lfs_pop_unlock(struct cds_lfs_stack *s)
260 {
261 int ret;
262
263 ret = pthread_mutex_unlock(&s->lock);
264 assert(!ret);
265 }
266
267 /*
268 * Call __cds_lfs_pop with an internal pop mutex held.
269 */
270 static inline
271 struct cds_lfs_node *
272 _cds_lfs_pop_blocking(struct cds_lfs_stack *s)
273 {
274 struct cds_lfs_node *retnode;
275
276 _cds_lfs_pop_lock(s);
277 retnode = ___cds_lfs_pop(s);
278 _cds_lfs_pop_unlock(s);
279 return retnode;
280 }
281
282 /*
283 * Call __cds_lfs_pop_all with an internal pop mutex held.
284 */
285 static inline
286 struct cds_lfs_head *
287 _cds_lfs_pop_all_blocking(struct cds_lfs_stack *s)
288 {
289 struct cds_lfs_head *rethead;
290
291 _cds_lfs_pop_lock(s);
292 rethead = ___cds_lfs_pop_all(s);
293 _cds_lfs_pop_unlock(s);
294 return rethead;
295 }
296
297 #ifdef __cplusplus
298 }
299 #endif
300
301 #endif /* _URCU_STATIC_LFSTACK_H */
This page took 0.035121 seconds and 4 git commands to generate.