waitqueue: add in_waitqueue field
[userspace-rcu.git] / urcu / waitqueue-lifo.h
1 #ifndef _URCU_WAITQUEUE_LIFO_H
2 #define _URCU_WAITQUEUE_LIFO_H
3
4 /*
5 * urcu/waitqueue-lifo.h
6 *
7 * Userspace RCU library - wait queue scheme with LIFO semantic
8 *
9 * Copyright (c) 2012-2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
10 *
11 * This library is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; either
14 * version 2.1 of the License, or (at your option) any later version.
15 *
16 * This library is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
20 *
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with this library; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24 */
25
26 #include <urcu/uatomic.h>
27 #include <urcu/lfstack.h>
28 #include <urcu/futex.h>
29
30 /*
31 * Number of busy-loop attempts before waiting on futex for grace period
32 * batching.
33 */
34 #define URCU_WAIT_ATTEMPTS 1000
35
36 enum urcu_wait_state {
37 /* URCU_WAIT_WAITING is compared directly (futex compares it). */
38 URCU_WAIT_WAITING = 0,
39 /* non-zero are used as masks. */
40 URCU_WAIT_WAKEUP = (1 << 0),
41 URCU_WAIT_RUNNING = (1 << 1),
42 URCU_WAIT_TEARDOWN = (1 << 2),
43 };
44
45 struct urcu_wait_node {
46 struct cds_lfs_node node;
47 int32_t state; /* enum urcu_wait_state */
48 int in_waitqueue;
49 };
50
51 #define URCU_WAIT_NODE_INIT(name, _state) \
52 { .state = _state }
53
54 #define DEFINE_URCU_WAIT_NODE(name, state) \
55 struct urcu_wait_node name = URCU_WAIT_NODE_INIT(name, state)
56
57 #define DECLARE_URCU_WAIT_NODE(name) \
58 struct urcu_wait_node name
59
60 struct urcu_wait_queue {
61 struct __cds_lfs_stack stack;
62 };
63
64 #define URCU_WAIT_QUEUE_HEAD_INIT(name) \
65 { .stack.head = CDS_LFS_END, }
66
67 #define DECLARE_URCU_WAIT_QUEUE(name) \
68 struct urcu_wait_queue name
69
70 #define DEFINE_URCU_WAIT_QUEUE(name) \
71 struct urcu_wait_queue name = URCU_WAIT_QUEUE_HEAD_INIT(name)
72
73 static inline
74 void urcu_wait_queue_init(struct urcu_wait_queue *queue)
75 {
76 __cds_lfs_init(&queue->stack);
77 }
78
79 struct urcu_waiters {
80 struct cds_lfs_head *head;
81 };
82
83 /*
84 * Add ourself atomically to a wait queue. Return 0 if queue was
85 * previously empty, else return 1.
86 * A full memory barrier is issued before being added to the wait queue.
87 */
88 static inline
89 bool urcu_wait_add(struct urcu_wait_queue *queue,
90 struct urcu_wait_node *node)
91 {
92 cds_lfs_node_init(&node->node);
93 CMM_STORE_SHARED(node->in_waitqueue, true);
94 return cds_lfs_push(&queue->stack, &node->node);
95 }
96
97 /*
98 * Atomically move all waiters from wait queue into our local struct
99 * urcu_waiters.
100 */
101 static inline
102 void urcu_move_waiters(struct urcu_waiters *waiters,
103 struct urcu_wait_queue *queue)
104 {
105 waiters->head = __cds_lfs_pop_all(&queue->stack);
106 }
107
108 static inline
109 void urcu_wait_set_state(struct urcu_wait_node *node,
110 enum urcu_wait_state state)
111 {
112 node->state = state;
113 }
114
115 static inline
116 void urcu_wait_or_state(struct urcu_wait_node *node,
117 enum urcu_wait_state state)
118 {
119 uatomic_or(&node->state, state);
120 }
121
122 static inline
123 void urcu_wait_node_init(struct urcu_wait_node *node,
124 enum urcu_wait_state state)
125 {
126 urcu_wait_set_state(node, state);
127 cds_lfs_node_init(&node->node);
128 node->in_waitqueue = false;
129 }
130
131 static inline
132 bool urcu_in_waitqueue(struct urcu_wait_node *node)
133 {
134 return CMM_LOAD_SHARED(node->in_waitqueue);
135 }
136
137 /*
138 * Note: urcu_adaptative_wake_up needs "value" to stay allocated
139 * throughout its execution. In this scheme, the waiter owns the node
140 * memory, and we only allow it to free this memory when it receives the
141 * URCU_WAIT_TEARDOWN flag.
142 * Return true if wakeup is performed, false if thread was already
143 * running.
144 */
145 static inline
146 bool urcu_adaptative_wake_up(struct urcu_wait_node *wait)
147 {
148 bool wakeup_performed = false;
149
150 cmm_smp_mb();
151 /*
152 * "or" of WAKEUP flag rather than "set" is useful for multiple
153 * concurrent wakeup sources. Note that "WAIT_TEARDOWN" becomes
154 * useless when we use multiple wakeup sources: lifetime of the
155 * "value" should then be handled by the caller.
156 */
157 uatomic_or(&wait->state, URCU_WAIT_WAKEUP);
158 if (!(uatomic_read(&wait->state) & URCU_WAIT_RUNNING)) {
159 futex_noasync(&wait->state, FUTEX_WAKE, 1, NULL, NULL, 0);
160 wakeup_performed = true;
161 }
162 /* Allow teardown of struct urcu_wait memory. */
163 uatomic_or(&wait->state, URCU_WAIT_TEARDOWN);
164 return wakeup_performed;
165 }
166
167 /*
168 * Caller must initialize "value" to URCU_WAIT_WAITING before passing its
169 * memory to waker thread.
170 */
171 static inline
172 void urcu_adaptative_busy_wait(struct urcu_wait_node *wait)
173 {
174 unsigned int i;
175
176 /* Load and test condition before read state */
177 cmm_smp_rmb();
178 for (i = 0; i < URCU_WAIT_ATTEMPTS; i++) {
179 if (uatomic_read(&wait->state) != URCU_WAIT_WAITING)
180 goto skip_futex_wait;
181 caa_cpu_relax();
182 }
183 futex_noasync(&wait->state, FUTEX_WAIT,
184 URCU_WAIT_WAITING, NULL, NULL, 0);
185 skip_futex_wait:
186
187 /* Tell waker thread than we are running. */
188 uatomic_or(&wait->state, URCU_WAIT_RUNNING);
189
190 /*
191 * Wait until waker thread lets us know it's ok to tear down
192 * memory allocated for struct urcu_wait.
193 */
194 for (i = 0; i < URCU_WAIT_ATTEMPTS; i++) {
195 if (uatomic_read(&wait->state) & URCU_WAIT_TEARDOWN)
196 break;
197 caa_cpu_relax();
198 }
199 while (!(uatomic_read(&wait->state) & URCU_WAIT_TEARDOWN))
200 poll(NULL, 0, 10);
201 assert(uatomic_read(&wait->state) & URCU_WAIT_TEARDOWN);
202 }
203
204 /*
205 * Need mutual exclusion against other wakeup and move waiters
206 * operations. It is provided by the caller.
207 */
208 static inline
209 int urcu_dequeue_wake_single(struct urcu_wait_queue *queue)
210 {
211 struct cds_lfs_node *node;
212 struct urcu_wait_node *wait_node;
213 int ret = 0;
214
215 node = __cds_lfs_pop(&queue->stack);
216 if (!node)
217 return -ENOENT;
218 wait_node = caa_container_of(node, struct urcu_wait_node, node);
219 CMM_STORE_SHARED(wait_node->in_waitqueue, false);
220 /* Don't wake already running threads */
221 if (!(wait_node->state & URCU_WAIT_RUNNING))
222 ret = urcu_adaptative_wake_up(wait_node);
223 return ret;
224 }
225
226 /*
227 * Need mutual exclusion against other wakeup and move waiters
228 * operations. It is provided by the caller.
229 */
230 static inline
231 int urcu_dequeue_wake_n(struct urcu_wait_queue *queue, int n)
232 {
233 int nr_wakeup = 0;
234
235 for (;;) {
236 int ret;
237
238 ret = urcu_dequeue_wake_single(queue);
239 if (ret < 0)
240 return nr_wakeup;
241 else if (ret > 0)
242 nr_wakeup++;
243 else
244 break;
245 }
246 return nr_wakeup;
247 }
248
249 static inline
250 int urcu_wake_all_waiters(struct urcu_waiters *waiters)
251 {
252 struct cds_lfs_node *iter, *iter_n;
253 int nr_wakeup = 0;
254
255 /* Wake all waiters in our stack head */
256 cds_lfs_for_each_safe(waiters->head, iter, iter_n) {
257 struct urcu_wait_node *wait_node =
258 caa_container_of(iter, struct urcu_wait_node, node);
259
260 CMM_STORE_SHARED(wait_node->in_waitqueue, false);
261 /* Don't wake already running threads */
262 if (wait_node->state & URCU_WAIT_RUNNING)
263 continue;
264 if (urcu_adaptative_wake_up(wait_node))
265 nr_wakeup++;
266 }
267 return nr_wakeup;
268 }
269
270 #endif /* _URCU_WAITQUEUE_LIFO_H */
This page took 0.035484 seconds and 4 git commands to generate.