Implement waitqueue and workqueue APIs
[userspace-rcu.git] / urcu / waitqueue-lifo.h
1 #ifndef _URCU_WAITQUEUE_LIFO_H
2 #define _URCU_WAITQUEUE_LIFO_H
3
4 /*
5 * urcu/waitqueue-lifo.h
6 *
7 * Userspace RCU library - wait queue scheme with LIFO semantic
8 *
9 * Copyright (c) 2012-2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
10 *
11 * This library is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; either
14 * version 2.1 of the License, or (at your option) any later version.
15 *
16 * This library is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
20 *
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with this library; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24 */
25
26 #include <urcu/uatomic.h>
27 #include <urcu/wfstack.h>
28 #include <urcu/futex.h>
29
30 /*
31 * Number of busy-loop attempts before waiting on futex for grace period
32 * batching.
33 */
34 #define URCU_WAIT_ATTEMPTS 1000
35
36 enum urcu_wait_state {
37 /* URCU_WAIT_WAITING is compared directly (futex compares it). */
38 URCU_WAIT_WAITING = 0,
39 /* non-zero are used as masks. */
40 URCU_WAIT_WAKEUP = (1 << 0),
41 URCU_WAIT_RUNNING = (1 << 1),
42 URCU_WAIT_TEARDOWN = (1 << 2),
43 };
44
45 struct urcu_wait_node {
46 struct cds_wfs_node node;
47 int32_t state; /* enum urcu_wait_state */
48 };
49
50 #define URCU_WAIT_NODE_INIT(name, _state) \
51 { .state = _state }
52
53 #define DEFINE_URCU_WAIT_NODE(name, state) \
54 struct urcu_wait_node name = URCU_WAIT_NODE_INIT(name, state)
55
56 #define DECLARE_URCU_WAIT_NODE(name) \
57 struct urcu_wait_node name
58
59 struct urcu_wait_queue {
60 struct __cds_wfs_stack stack;
61 };
62
63 #define URCU_WAIT_QUEUE_HEAD_INIT(name) \
64 { .stack.head = CDS_WFS_END, }
65
66 #define DECLARE_URCU_WAIT_QUEUE(name) \
67 struct urcu_wait_queue name
68
69 #define DEFINE_URCU_WAIT_QUEUE(name) \
70 struct urcu_wait_queue name = URCU_WAIT_QUEUE_HEAD_INIT(name)
71
72 static inline
73 void urcu_wait_queue_init(struct urcu_wait_queue *queue)
74 {
75 __cds_wfs_init(&queue->stack);
76 }
77
78 struct urcu_waiters {
79 struct cds_wfs_head *head;
80 };
81
82 /*
83 * Add ourself atomically to a wait queue. Return 0 if queue was
84 * previously empty, else return 1.
85 * A full memory barrier is issued before being added to the wait queue.
86 */
87 static inline
88 bool urcu_wait_add(struct urcu_wait_queue *queue,
89 struct urcu_wait_node *node)
90 {
91 return cds_wfs_push(&queue->stack, &node->node);
92 }
93
94 /*
95 * Atomically move all waiters from wait queue into our local struct
96 * urcu_waiters.
97 */
98 static inline
99 void urcu_move_waiters(struct urcu_waiters *waiters,
100 struct urcu_wait_queue *queue)
101 {
102 waiters->head = __cds_wfs_pop_all(&queue->stack);
103 }
104
105 static inline
106 void urcu_wait_set_state(struct urcu_wait_node *node,
107 enum urcu_wait_state state)
108 {
109 node->state = state;
110 }
111
112 static inline
113 void urcu_wait_or_state(struct urcu_wait_node *node,
114 enum urcu_wait_state state)
115 {
116 uatomic_or(&node->state, state);
117 }
118
119 static inline
120 void urcu_wait_node_init(struct urcu_wait_node *node,
121 enum urcu_wait_state state)
122 {
123 urcu_wait_set_state(node, state);
124 cds_wfs_node_init(&node->node);
125 }
126
127 /*
128 * Note: urcu_adaptative_wake_up needs "value" to stay allocated
129 * throughout its execution. In this scheme, the waiter owns the node
130 * memory, and we only allow it to free this memory when it receives the
131 * URCU_WAIT_TEARDOWN flag.
132 */
133 static inline
134 void urcu_adaptative_wake_up(struct urcu_wait_node *wait)
135 {
136 cmm_smp_mb();
137 /*
138 * "or" of WAKEUP flag rather than "set" is useful for multiple
139 * concurrent wakeup sources. Note that "WAIT_TEARDOWN" becomes
140 * useless when we use multiple wakeup sources: lifetime of the
141 * "value" should then be handled by the caller.
142 */
143 uatomic_or(&wait->state, URCU_WAIT_WAKEUP);
144 if (!(uatomic_read(&wait->state) & URCU_WAIT_RUNNING))
145 futex_noasync(&wait->state, FUTEX_WAKE, 1, NULL, NULL, 0);
146 /* Allow teardown of struct urcu_wait memory. */
147 uatomic_or(&wait->state, URCU_WAIT_TEARDOWN);
148 }
149
150 /*
151 * Caller must initialize "value" to URCU_WAIT_WAITING before passing its
152 * memory to waker thread.
153 */
154 static inline
155 void urcu_adaptative_busy_wait(struct urcu_wait_node *wait)
156 {
157 unsigned int i;
158
159 /* Load and test condition before read state */
160 cmm_smp_rmb();
161 for (i = 0; i < URCU_WAIT_ATTEMPTS; i++) {
162 if (uatomic_read(&wait->state) != URCU_WAIT_WAITING)
163 goto skip_futex_wait;
164 caa_cpu_relax();
165 }
166 futex_noasync(&wait->state, FUTEX_WAIT,
167 URCU_WAIT_WAITING, NULL, NULL, 0);
168 skip_futex_wait:
169
170 /* Tell waker thread than we are running. */
171 uatomic_or(&wait->state, URCU_WAIT_RUNNING);
172
173 /*
174 * Wait until waker thread lets us know it's ok to tear down
175 * memory allocated for struct urcu_wait.
176 */
177 for (i = 0; i < URCU_WAIT_ATTEMPTS; i++) {
178 if (uatomic_read(&wait->state) & URCU_WAIT_TEARDOWN)
179 break;
180 caa_cpu_relax();
181 }
182 while (!(uatomic_read(&wait->state) & URCU_WAIT_TEARDOWN))
183 poll(NULL, 0, 10);
184 assert(uatomic_read(&wait->state) & URCU_WAIT_TEARDOWN);
185 }
186
187 /*
188 * Need mutual exclusion against other wakeup and move waiters
189 * operations. It is provided by the caller.
190 */
191 static inline
192 int urcu_dequeue_wake_single(struct urcu_wait_queue *queue)
193 {
194 struct cds_wfs_node *node;
195 struct urcu_wait_node *wait_node;
196 int wakeup_done = 0;
197
198 node = __cds_wfs_pop_blocking(&queue->stack);
199 if (!node)
200 return -ENOENT;
201 wait_node = caa_container_of(node, struct urcu_wait_node, node);
202 CMM_STORE_SHARED(wait_node->node.next, NULL);
203 /* Don't wake already running threads */
204 if (!(wait_node->state & URCU_WAIT_RUNNING)) {
205 urcu_adaptative_wake_up(wait_node);
206 wakeup_done = 1;
207 }
208 return wakeup_done;
209 }
210
211 /*
212 * Need mutual exclusion against other wakeup and move waiters
213 * operations. It is provided by the caller.
214 */
215 static inline
216 int urcu_dequeue_wake_n(struct urcu_wait_queue *queue, int n)
217 {
218 int nr_wakeup = 0;
219
220 for (;;) {
221 int ret;
222
223 ret = urcu_dequeue_wake_single(queue);
224 if (ret < 0)
225 return nr_wakeup;
226 else if (ret > 0)
227 nr_wakeup++;
228 else
229 break;
230 }
231 return nr_wakeup;
232 }
233
234 static inline
235 int urcu_wake_all_waiters(struct urcu_waiters *waiters)
236 {
237 struct cds_wfs_node *iter, *iter_n;
238 int nr_wakeup = 0;
239
240 /* Wake all waiters in our stack head */
241 cds_wfs_for_each_blocking_safe(waiters->head, iter, iter_n) {
242 struct urcu_wait_node *wait_node =
243 caa_container_of(iter, struct urcu_wait_node, node);
244
245 CMM_STORE_SHARED(wait_node->node.next, NULL);
246 /* Don't wake already running threads */
247 if (wait_node->state & URCU_WAIT_RUNNING)
248 continue;
249 urcu_adaptative_wake_up(wait_node);
250 nr_wakeup++;
251 }
252 return nr_wakeup;
253 }
254
255 #endif /* _URCU_WAITQUEUE_LIFO_H */
This page took 0.034346 seconds and 4 git commands to generate.