urcu/annotate: Add CMM annotation
[urcu.git] / include / urcu / static / lfstack.h
CommitLineData
d3d3857f
MJ
1// SPDX-FileCopyrightText: 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
2//
3// SPDX-License-Identifier: LGPL-2.1-or-later
4
d3bfcb24
MD
5#ifndef _URCU_STATIC_LFSTACK_H
6#define _URCU_STATIC_LFSTACK_H
7
8/*
d3bfcb24
MD
9 * Userspace RCU library - Lock-Free Stack
10 *
07c2a4fd
MD
11 * TO BE INCLUDED ONLY IN LGPL-COMPATIBLE CODE. See urcu/lfstack.h for
12 * linking dynamically with the userspace rcu library.
d3bfcb24
MD
13 */
14
7294103b
MD
15#include <stdbool.h>
16#include <pthread.h>
01477510 17#include <urcu/assert.h>
d3bfcb24
MD
18#include <urcu/uatomic.h>
19#include <urcu-pointer.h>
20
21#ifdef __cplusplus
22extern "C" {
23#endif
24
7294103b
MD
25/*
26 * Lock-free stack.
27 *
28 * Stack implementing push, pop, pop_all operations, as well as iterator
29 * on the stack head returned by pop_all.
30 *
31 * Synchronization table:
32 *
33 * External synchronization techniques described in the API below is
34 * required between pairs marked with "X". No external synchronization
35 * required between pairs marked with "-".
36 *
37 * cds_lfs_push __cds_lfs_pop __cds_lfs_pop_all
38 * cds_lfs_push - - -
39 * __cds_lfs_pop - X X
40 * __cds_lfs_pop_all - X -
41 *
42 * cds_lfs_pop_blocking and cds_lfs_pop_all_blocking use an internal
43 * mutex to provide synchronization.
44 */
45
46/*
47 * cds_lfs_node_init: initialize lock-free stack node.
48 */
d3bfcb24 49static inline
70469b43 50void _cds_lfs_node_init(struct cds_lfs_node *node __attribute__((unused)))
d3bfcb24
MD
51{
52}
53
7294103b 54/*
200d100e
MD
55 * cds_lfs_init: initialize lock-free stack (with lock). Pair with
56 * cds_lfs_destroy().
7294103b 57 */
d3bfcb24
MD
58static inline
59void _cds_lfs_init(struct cds_lfs_stack *s)
60{
7294103b
MD
61 int ret;
62
d3bfcb24 63 s->head = NULL;
7294103b 64 ret = pthread_mutex_init(&s->lock, NULL);
01477510 65 urcu_posix_assert(!ret);
7294103b
MD
66}
67
48a8832b 68/*
200d100e
MD
69 * cds_lfs_destroy: destroy lock-free stack (with lock). Pair with
70 * cds_lfs_init().
71 */
72static inline
73void _cds_lfs_destroy(struct cds_lfs_stack *s)
74{
75 int ret = pthread_mutex_destroy(&s->lock);
01477510 76 urcu_posix_assert(!ret);
200d100e
MD
77}
78
79/*
80 * ___cds_lfs_init: initialize lock-free stack (without lock).
81 * Don't pair with any destroy function.
48a8832b
MD
82 */
83static inline
84void ___cds_lfs_init(struct __cds_lfs_stack *s)
85{
86 s->head = NULL;
87}
88
7294103b
MD
89static inline
90bool ___cds_lfs_empty_head(struct cds_lfs_head *head)
91{
92 return head == NULL;
93}
94
95/*
96 * cds_lfs_empty: return whether lock-free stack is empty.
97 *
98 * No memory barrier is issued. No mutual exclusion is required.
99 */
100static inline
48a8832b 101bool _cds_lfs_empty(cds_lfs_stack_ptr_t s)
7294103b 102{
48a8832b 103 return ___cds_lfs_empty_head(CMM_LOAD_SHARED(s._s->head));
d3bfcb24
MD
104}
105
106/*
107 * cds_lfs_push: push a node into the stack.
108 *
109 * Does not require any synchronization with other push nor pop.
110 *
111 * Lock-free stack push is not subject to ABA problem, so no need to
112 * take the RCU read-side lock. Even if "head" changes between two
113 * uatomic_cmpxchg() invocations here (being popped, and then pushed
114 * again by one or more concurrent threads), the second
115 * uatomic_cmpxchg() invocation only cares about pushing a new entry at
116 * the head of the stack, ensuring consistency by making sure the new
117 * node->next is the same pointer value as the value replaced as head.
118 * It does not care about the content of the actual next node, so it can
119 * very well be reallocated between the two uatomic_cmpxchg().
120 *
121 * We take the approach of expecting the stack to be usually empty, so
122 * we first try an initial uatomic_cmpxchg() on a NULL old_head, and
123 * retry if the old head was non-NULL (the value read by the first
124 * uatomic_cmpxchg() is used as old head for the following loop). The
125 * upside of this scheme is to minimize the amount of cacheline traffic,
126 * always performing an exclusive cacheline access, rather than doing
127 * non-exclusive followed by exclusive cacheline access (which would be
128 * required if we first read the old head value). This design decision
ffa11a18 129 * might be revisited after more thorough benchmarking on various
d3bfcb24
MD
130 * platforms.
131 *
132 * Returns 0 if the stack was empty prior to adding the node.
133 * Returns non-zero otherwise.
134 */
135static inline
48a8832b 136bool _cds_lfs_push(cds_lfs_stack_ptr_t u_s,
d3bfcb24
MD
137 struct cds_lfs_node *node)
138{
48a8832b 139 struct __cds_lfs_stack *s = u_s._s;
7294103b
MD
140 struct cds_lfs_head *head = NULL;
141 struct cds_lfs_head *new_head =
142 caa_container_of(node, struct cds_lfs_head, node);
d3bfcb24
MD
143
144 for (;;) {
7294103b 145 struct cds_lfs_head *old_head = head;
d3bfcb24
MD
146
147 /*
148 * node->next is still private at this point, no need to
149 * perform a _CMM_STORE_SHARED().
150 */
7294103b 151 node->next = &head->node;
d3bfcb24
MD
152 /*
153 * uatomic_cmpxchg() implicit memory barrier orders earlier
154 * stores to node before publication.
155 */
7294103b 156 head = uatomic_cmpxchg(&s->head, old_head, new_head);
d3bfcb24
MD
157 if (old_head == head)
158 break;
159 }
e47048cc 160 return !___cds_lfs_empty_head(head);
d3bfcb24
MD
161}
162
163/*
7294103b 164 * __cds_lfs_pop: pop a node from the stack.
d3bfcb24
MD
165 *
166 * Returns NULL if stack is empty.
167 *
7294103b 168 * __cds_lfs_pop needs to be synchronized using one of the following
d3bfcb24
MD
169 * techniques:
170 *
f9b5c2b6
MD
171 * 1) Calling __cds_lfs_pop under rcu read lock critical section.
172 * Both __cds_lfs_pop and __cds_lfs_pop_all callers must wait for a
173 * grace period to pass before freeing the returned node or pushing
174 * the node back into the stack. It is valid to overwrite the content
175 * of cds_lfs_node immediately after __cds_lfs_pop and
176 * __cds_lfs_pop_all. No RCU read-side critical section is needed
177 * around __cds_lfs_pop_all.
7294103b
MD
178 * 2) Using mutual exclusion (e.g. mutexes) to protect __cds_lfs_pop
179 * and __cds_lfs_pop_all callers.
180 * 3) Ensuring that only ONE thread can call __cds_lfs_pop() and
181 * __cds_lfs_pop_all(). (multi-provider/single-consumer scheme).
d3bfcb24
MD
182 */
183static inline
48a8832b 184struct cds_lfs_node *___cds_lfs_pop(cds_lfs_stack_ptr_t u_s)
d3bfcb24 185{
48a8832b
MD
186 struct __cds_lfs_stack *s = u_s._s;
187
d3bfcb24 188 for (;;) {
7294103b
MD
189 struct cds_lfs_head *head, *next_head;
190 struct cds_lfs_node *next;
d3bfcb24
MD
191
192 head = _CMM_LOAD_SHARED(s->head);
7294103b
MD
193 if (___cds_lfs_empty_head(head))
194 return NULL; /* Empty stack */
195
196 /*
197 * Read head before head->next. Matches the implicit
198 * memory barrier before uatomic_cmpxchg() in
199 * cds_lfs_push.
200 */
201 cmm_smp_read_barrier_depends();
202 next = _CMM_LOAD_SHARED(head->node.next);
203 next_head = caa_container_of(next,
204 struct cds_lfs_head, node);
205 if (uatomic_cmpxchg(&s->head, head, next_head) == head)
206 return &head->node;
207 /* busy-loop if head changed under us */
d3bfcb24
MD
208 }
209}
210
7294103b
MD
211/*
212 * __cds_lfs_pop_all: pop all nodes from a stack.
213 *
214 * __cds_lfs_pop_all does not require any synchronization with other
215 * push, nor with other __cds_lfs_pop_all, but requires synchronization
216 * matching the technique used to synchronize __cds_lfs_pop:
217 *
218 * 1) If __cds_lfs_pop is called under rcu read lock critical section,
f9b5c2b6
MD
219 * both __cds_lfs_pop and __cds_lfs_pop_all callers must wait for a
220 * grace period to pass before freeing the returned node or pushing
221 * the node back into the stack. It is valid to overwrite the content
222 * of cds_lfs_node immediately after __cds_lfs_pop and
223 * __cds_lfs_pop_all. No RCU read-side critical section is needed
224 * around __cds_lfs_pop_all.
7294103b
MD
225 * 2) Using mutual exclusion (e.g. mutexes) to protect __cds_lfs_pop and
226 * __cds_lfs_pop_all callers.
227 * 3) Ensuring that only ONE thread can call __cds_lfs_pop() and
228 * __cds_lfs_pop_all(). (multi-provider/single-consumer scheme).
229 */
230static inline
48a8832b 231struct cds_lfs_head *___cds_lfs_pop_all(cds_lfs_stack_ptr_t u_s)
7294103b 232{
48a8832b
MD
233 struct __cds_lfs_stack *s = u_s._s;
234
7294103b
MD
235 /*
236 * Implicit memory barrier after uatomic_xchg() matches implicit
237 * memory barrier before uatomic_cmpxchg() in cds_lfs_push. It
238 * ensures that all nodes of the returned list are consistent.
239 * There is no need to issue memory barriers when iterating on
240 * the returned list, because the full memory barrier issued
241 * prior to each uatomic_cmpxchg, which each write to head, are
242 * taking care to order writes to each node prior to the full
243 * memory barrier after this uatomic_xchg().
244 */
245 return uatomic_xchg(&s->head, NULL);
246}
247
248/*
249 * cds_lfs_pop_lock: lock stack pop-protection mutex.
250 */
251static inline void _cds_lfs_pop_lock(struct cds_lfs_stack *s)
252{
253 int ret;
254
255 ret = pthread_mutex_lock(&s->lock);
01477510 256 urcu_posix_assert(!ret);
7294103b
MD
257}
258
259/*
260 * cds_lfs_pop_unlock: unlock stack pop-protection mutex.
261 */
262static inline void _cds_lfs_pop_unlock(struct cds_lfs_stack *s)
263{
264 int ret;
265
266 ret = pthread_mutex_unlock(&s->lock);
01477510 267 urcu_posix_assert(!ret);
7294103b
MD
268}
269
270/*
271 * Call __cds_lfs_pop with an internal pop mutex held.
272 */
273static inline
274struct cds_lfs_node *
275_cds_lfs_pop_blocking(struct cds_lfs_stack *s)
276{
277 struct cds_lfs_node *retnode;
28757437 278 cds_lfs_stack_ptr_t stack;
7294103b
MD
279
280 _cds_lfs_pop_lock(s);
28757437
SM
281 stack.s = s;
282 retnode = ___cds_lfs_pop(stack);
7294103b
MD
283 _cds_lfs_pop_unlock(s);
284 return retnode;
285}
286
287/*
288 * Call __cds_lfs_pop_all with an internal pop mutex held.
289 */
290static inline
291struct cds_lfs_head *
292_cds_lfs_pop_all_blocking(struct cds_lfs_stack *s)
293{
294 struct cds_lfs_head *rethead;
28757437 295 cds_lfs_stack_ptr_t stack;
7294103b
MD
296
297 _cds_lfs_pop_lock(s);
28757437
SM
298 stack.s = s;
299 rethead = ___cds_lfs_pop_all(stack);
7294103b
MD
300 _cds_lfs_pop_unlock(s);
301 return rethead;
302}
303
d3bfcb24
MD
304#ifdef __cplusplus
305}
306#endif
307
308#endif /* _URCU_STATIC_LFSTACK_H */
This page took 0.05654 seconds and 4 git commands to generate.