1 #ifndef _URCU_STATIC_LFSTACK_H
2 #define _URCU_STATIC_LFSTACK_H
5 * urcu/static/lfstack.h
7 * Userspace RCU library - Lock-Free Stack
9 * Copyright 2010-2012 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
11 * TO BE INCLUDED ONLY IN LGPL-COMPATIBLE CODE. See urcu/lfstack.h for
12 * linking dynamically with the userspace rcu library.
14 * This library is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU Lesser General Public
16 * License as published by the Free Software Foundation; either
17 * version 2.1 of the License, or (at your option) any later version.
19 * This library is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 * Lesser General Public License for more details.
24 * You should have received a copy of the GNU Lesser General Public
25 * License along with this library; if not, write to the Free Software
26 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
31 #include <urcu/assert.h>
32 #include <urcu/uatomic.h>
33 #include <urcu-pointer.h>
42 * Stack implementing push, pop, pop_all operations, as well as iterator
43 * on the stack head returned by pop_all.
45 * Synchronization table:
47 * External synchronization techniques described in the API below is
48 * required between pairs marked with "X". No external synchronization
49 * required between pairs marked with "-".
51 * cds_lfs_push __cds_lfs_pop __cds_lfs_pop_all
54 * __cds_lfs_pop_all - X -
56 * cds_lfs_pop_blocking and cds_lfs_pop_all_blocking use an internal
57 * mutex to provide synchronization.
61 * cds_lfs_node_init: initialize lock-free stack node.
64 void _cds_lfs_node_init(struct cds_lfs_node
*node
__attribute__((unused
)))
69 * cds_lfs_init: initialize lock-free stack (with lock). Pair with
73 void _cds_lfs_init(struct cds_lfs_stack
*s
)
78 ret
= pthread_mutex_init(&s
->lock
, NULL
);
79 urcu_posix_assert(!ret
);
83 * cds_lfs_destroy: destroy lock-free stack (with lock). Pair with
87 void _cds_lfs_destroy(struct cds_lfs_stack
*s
)
89 int ret
= pthread_mutex_destroy(&s
->lock
);
90 urcu_posix_assert(!ret
);
94 * ___cds_lfs_init: initialize lock-free stack (without lock).
95 * Don't pair with any destroy function.
98 void ___cds_lfs_init(struct __cds_lfs_stack
*s
)
104 bool ___cds_lfs_empty_head(struct cds_lfs_head
*head
)
110 * cds_lfs_empty: return whether lock-free stack is empty.
112 * No memory barrier is issued. No mutual exclusion is required.
115 bool _cds_lfs_empty(cds_lfs_stack_ptr_t s
)
117 return ___cds_lfs_empty_head(CMM_LOAD_SHARED(s
._s
->head
));
121 * cds_lfs_push: push a node into the stack.
123 * Does not require any synchronization with other push nor pop.
125 * Lock-free stack push is not subject to ABA problem, so no need to
126 * take the RCU read-side lock. Even if "head" changes between two
127 * uatomic_cmpxchg() invocations here (being popped, and then pushed
128 * again by one or more concurrent threads), the second
129 * uatomic_cmpxchg() invocation only cares about pushing a new entry at
130 * the head of the stack, ensuring consistency by making sure the new
131 * node->next is the same pointer value as the value replaced as head.
132 * It does not care about the content of the actual next node, so it can
133 * very well be reallocated between the two uatomic_cmpxchg().
135 * We take the approach of expecting the stack to be usually empty, so
136 * we first try an initial uatomic_cmpxchg() on a NULL old_head, and
137 * retry if the old head was non-NULL (the value read by the first
138 * uatomic_cmpxchg() is used as old head for the following loop). The
139 * upside of this scheme is to minimize the amount of cacheline traffic,
140 * always performing an exclusive cacheline access, rather than doing
141 * non-exclusive followed by exclusive cacheline access (which would be
142 * required if we first read the old head value). This design decision
143 * might be revisited after more thorough benchmarking on various
146 * Returns 0 if the stack was empty prior to adding the node.
147 * Returns non-zero otherwise.
150 bool _cds_lfs_push(cds_lfs_stack_ptr_t u_s
,
151 struct cds_lfs_node
*node
)
153 struct __cds_lfs_stack
*s
= u_s
._s
;
154 struct cds_lfs_head
*head
= NULL
;
155 struct cds_lfs_head
*new_head
=
156 caa_container_of(node
, struct cds_lfs_head
, node
);
159 struct cds_lfs_head
*old_head
= head
;
162 * node->next is still private at this point, no need to
163 * perform a _CMM_STORE_SHARED().
165 node
->next
= &head
->node
;
167 * uatomic_cmpxchg() implicit memory barrier orders earlier
168 * stores to node before publication.
170 head
= uatomic_cmpxchg(&s
->head
, old_head
, new_head
);
171 if (old_head
== head
)
174 return !___cds_lfs_empty_head(head
);
178 * __cds_lfs_pop: pop a node from the stack.
180 * Returns NULL if stack is empty.
182 * __cds_lfs_pop needs to be synchronized using one of the following
185 * 1) Calling __cds_lfs_pop under rcu read lock critical section.
186 * Both __cds_lfs_pop and __cds_lfs_pop_all callers must wait for a
187 * grace period to pass before freeing the returned node or pushing
188 * the node back into the stack. It is valid to overwrite the content
189 * of cds_lfs_node immediately after __cds_lfs_pop and
190 * __cds_lfs_pop_all. No RCU read-side critical section is needed
191 * around __cds_lfs_pop_all.
192 * 2) Using mutual exclusion (e.g. mutexes) to protect __cds_lfs_pop
193 * and __cds_lfs_pop_all callers.
194 * 3) Ensuring that only ONE thread can call __cds_lfs_pop() and
195 * __cds_lfs_pop_all(). (multi-provider/single-consumer scheme).
198 struct cds_lfs_node
*___cds_lfs_pop(cds_lfs_stack_ptr_t u_s
)
200 struct __cds_lfs_stack
*s
= u_s
._s
;
203 struct cds_lfs_head
*head
, *next_head
;
204 struct cds_lfs_node
*next
;
206 head
= _CMM_LOAD_SHARED(s
->head
);
207 if (___cds_lfs_empty_head(head
))
208 return NULL
; /* Empty stack */
211 * Read head before head->next. Matches the implicit
212 * memory barrier before uatomic_cmpxchg() in
215 cmm_smp_read_barrier_depends();
216 next
= _CMM_LOAD_SHARED(head
->node
.next
);
217 next_head
= caa_container_of(next
,
218 struct cds_lfs_head
, node
);
219 if (uatomic_cmpxchg(&s
->head
, head
, next_head
) == head
)
221 /* busy-loop if head changed under us */
226 * __cds_lfs_pop_all: pop all nodes from a stack.
228 * __cds_lfs_pop_all does not require any synchronization with other
229 * push, nor with other __cds_lfs_pop_all, but requires synchronization
230 * matching the technique used to synchronize __cds_lfs_pop:
232 * 1) If __cds_lfs_pop is called under rcu read lock critical section,
233 * both __cds_lfs_pop and __cds_lfs_pop_all callers must wait for a
234 * grace period to pass before freeing the returned node or pushing
235 * the node back into the stack. It is valid to overwrite the content
236 * of cds_lfs_node immediately after __cds_lfs_pop and
237 * __cds_lfs_pop_all. No RCU read-side critical section is needed
238 * around __cds_lfs_pop_all.
239 * 2) Using mutual exclusion (e.g. mutexes) to protect __cds_lfs_pop and
240 * __cds_lfs_pop_all callers.
241 * 3) Ensuring that only ONE thread can call __cds_lfs_pop() and
242 * __cds_lfs_pop_all(). (multi-provider/single-consumer scheme).
245 struct cds_lfs_head
*___cds_lfs_pop_all(cds_lfs_stack_ptr_t u_s
)
247 struct __cds_lfs_stack
*s
= u_s
._s
;
250 * Implicit memory barrier after uatomic_xchg() matches implicit
251 * memory barrier before uatomic_cmpxchg() in cds_lfs_push. It
252 * ensures that all nodes of the returned list are consistent.
253 * There is no need to issue memory barriers when iterating on
254 * the returned list, because the full memory barrier issued
255 * prior to each uatomic_cmpxchg, which each write to head, are
256 * taking care to order writes to each node prior to the full
257 * memory barrier after this uatomic_xchg().
259 return uatomic_xchg(&s
->head
, NULL
);
263 * cds_lfs_pop_lock: lock stack pop-protection mutex.
265 static inline void _cds_lfs_pop_lock(struct cds_lfs_stack
*s
)
269 ret
= pthread_mutex_lock(&s
->lock
);
270 urcu_posix_assert(!ret
);
274 * cds_lfs_pop_unlock: unlock stack pop-protection mutex.
276 static inline void _cds_lfs_pop_unlock(struct cds_lfs_stack
*s
)
280 ret
= pthread_mutex_unlock(&s
->lock
);
281 urcu_posix_assert(!ret
);
285 * Call __cds_lfs_pop with an internal pop mutex held.
288 struct cds_lfs_node
*
289 _cds_lfs_pop_blocking(struct cds_lfs_stack
*s
)
291 struct cds_lfs_node
*retnode
;
292 cds_lfs_stack_ptr_t stack
;
294 _cds_lfs_pop_lock(s
);
296 retnode
= ___cds_lfs_pop(stack
);
297 _cds_lfs_pop_unlock(s
);
302 * Call __cds_lfs_pop_all with an internal pop mutex held.
305 struct cds_lfs_head
*
306 _cds_lfs_pop_all_blocking(struct cds_lfs_stack
*s
)
308 struct cds_lfs_head
*rethead
;
309 cds_lfs_stack_ptr_t stack
;
311 _cds_lfs_pop_lock(s
);
313 rethead
= ___cds_lfs_pop_all(stack
);
314 _cds_lfs_pop_unlock(s
);
322 #endif /* _URCU_STATIC_LFSTACK_H */