7acbf54c1f2f3e4e25ea240d06da7b3a1d9b35e8
[urcu.git] / urcu / static / lfstack.h
1 #ifndef _URCU_STATIC_LFSTACK_H
2 #define _URCU_STATIC_LFSTACK_H
3
4 /*
5 * urcu/static/lfstack.h
6 *
7 * Userspace RCU library - Lock-Free Stack
8 *
9 * Copyright 2010-2012 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
10 *
11 * TO BE INCLUDED ONLY IN LGPL-COMPATIBLE CODE. See rculfstack.h for linking
12 * dynamically with the userspace rcu library.
13 *
14 * This library is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU Lesser General Public
16 * License as published by the Free Software Foundation; either
17 * version 2.1 of the License, or (at your option) any later version.
18 *
19 * This library is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 * Lesser General Public License for more details.
23 *
24 * You should have received a copy of the GNU Lesser General Public
25 * License along with this library; if not, write to the Free Software
26 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 */
28
29 #include <urcu/uatomic.h>
30 #include <urcu-pointer.h>
31
32 #ifdef __cplusplus
33 extern "C" {
34 #endif
35
36 static inline
37 void _cds_lfs_node_init(struct cds_lfs_node *node)
38 {
39 }
40
41 static inline
42 void _cds_lfs_init(struct cds_lfs_stack *s)
43 {
44 s->head = NULL;
45 }
46
47 /*
48 * cds_lfs_push: push a node into the stack.
49 *
50 * Does not require any synchronization with other push nor pop.
51 *
52 * Lock-free stack push is not subject to ABA problem, so no need to
53 * take the RCU read-side lock. Even if "head" changes between two
54 * uatomic_cmpxchg() invocations here (being popped, and then pushed
55 * again by one or more concurrent threads), the second
56 * uatomic_cmpxchg() invocation only cares about pushing a new entry at
57 * the head of the stack, ensuring consistency by making sure the new
58 * node->next is the same pointer value as the value replaced as head.
59 * It does not care about the content of the actual next node, so it can
60 * very well be reallocated between the two uatomic_cmpxchg().
61 *
62 * We take the approach of expecting the stack to be usually empty, so
63 * we first try an initial uatomic_cmpxchg() on a NULL old_head, and
64 * retry if the old head was non-NULL (the value read by the first
65 * uatomic_cmpxchg() is used as old head for the following loop). The
66 * upside of this scheme is to minimize the amount of cacheline traffic,
67 * always performing an exclusive cacheline access, rather than doing
68 * non-exclusive followed by exclusive cacheline access (which would be
69 * required if we first read the old head value). This design decision
70 * might be revisited after more throrough benchmarking on various
71 * platforms.
72 *
73 * Returns 0 if the stack was empty prior to adding the node.
74 * Returns non-zero otherwise.
75 */
76 static inline
77 int _cds_lfs_push(struct cds_lfs_stack *s,
78 struct cds_lfs_node *node)
79 {
80 struct cds_lfs_node *head = NULL;
81
82 for (;;) {
83 struct cds_lfs_node *old_head = head;
84
85 /*
86 * node->next is still private at this point, no need to
87 * perform a _CMM_STORE_SHARED().
88 */
89 node->next = head;
90 /*
91 * uatomic_cmpxchg() implicit memory barrier orders earlier
92 * stores to node before publication.
93 */
94 head = uatomic_cmpxchg(&s->head, old_head, node);
95 if (old_head == head)
96 break;
97 }
98 return (int) !!((unsigned long) head);
99 }
100
101 /*
102 * cds_lfs_pop: pop a node from the stack.
103 *
104 * Returns NULL if stack is empty.
105 *
106 * cds_lfs_pop needs to be synchronized using one of the following
107 * techniques:
108 *
109 * 1) Calling cds_lfs_pop under rcu read lock critical section. The
110 * caller must wait for a grace period to pass before freeing the
111 * returned node or modifying the cds_lfs_node structure.
112 * 2) Using mutual exclusion (e.g. mutexes) to protect cds_lfs_pop
113 * callers.
114 * 3) Ensuring that only ONE thread can call cds_lfs_pop().
115 * (multi-provider/single-consumer scheme).
116 */
117 static inline
118 struct cds_lfs_node *_cds_lfs_pop(struct cds_lfs_stack *s)
119 {
120 for (;;) {
121 struct cds_lfs_node *head;
122
123 head = _CMM_LOAD_SHARED(s->head);
124 if (head) {
125 struct cds_lfs_node *next;
126
127 /*
128 * Read head before head->next. Matches the
129 * implicit memory barrier before
130 * uatomic_cmpxchg() in cds_lfs_push.
131 */
132 cmm_smp_read_barrier_depends();
133 next = _CMM_LOAD_SHARED(head->next);
134 if (uatomic_cmpxchg(&s->head, head, next) == head) {
135 return head;
136 } else {
137 /* Concurrent modification. Retry. */
138 continue;
139 }
140 } else {
141 /* Empty stack */
142 return NULL;
143 }
144 }
145 }
146
147 #ifdef __cplusplus
148 }
149 #endif
150
151 #endif /* _URCU_STATIC_LFSTACK_H */
This page took 0.030704 seconds and 3 git commands to generate.