workqueue: fix uninitialized mutex
[userspace-rcu.git] / urcu / workqueue-fifo.h
CommitLineData
13652c4b
MD
1#ifndef _URCU_WORKQUEUE_FIFO_H
2#define _URCU_WORKQUEUE_FIFO_H
3
4/*
5 * urcu/workqueue-fifo.h
6 *
7 * Userspace RCU library - work queue scheme with FIFO semantic
8 *
9 * Copyright (c) 2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
10 *
11 * This library is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; either
14 * version 2.1 of the License, or (at your option) any later version.
15 *
16 * This library is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
20 *
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with this library; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24 */
25
26#include <urcu/uatomic.h>
7a618cf7 27#include <urcu/lfstack.h>
13652c4b
MD
28#include <urcu/waitqueue-lifo.h>
29#include <urcu/wfcqueue.h>
30#include <urcu/rculist.h>
31#include <pthread.h>
e10c65b3 32#include <assert.h>
13652c4b 33
1b0a9891
MD
34enum urcu_accept_ret {
35 URCU_ACCEPT_WORK = 0,
36 URCU_ACCEPT_SHUTDOWN = 1,
37};
38
13652c4b
MD
39/*
40 * We use RCU to steal work from siblings. Therefore, one of RCU flavors
41 * need to be included before this header. All worker that participate
42 * in stealing (initialized with the URCU_WORKER_STEAL flag) need to be
43 * registered RCU readers threads.
44 */
45
46struct urcu_work {
47 struct cds_wfcq_node node;
48};
49
50struct urcu_workqueue {
51 /* FIFO work queue */
52 struct __cds_wfcq_head head;
53 struct cds_wfcq_tail tail;
54
55 /* Associated wait queue for LIFO wait/wakeup */
56 struct urcu_wait_queue waitqueue;
57
58 /* RCU linked list head of siblings for work stealing. */
59 struct cds_list_head sibling_head;
60 pthread_mutex_t sibling_lock; /* Protect sibling list updates */
1b0a9891
MD
61
62 bool shutdown; /* Shutdown performed */
13652c4b
MD
63};
64
65struct urcu_worker {
a6492159 66 /* Workqueue which can be either used by worker, or stolen. */
13652c4b
MD
67 struct cds_wfcq_head head;
68 struct cds_wfcq_tail tail;
69
a6492159
MD
70 /* Work belonging to worker. Cannot be stolen. */
71 struct urcu_work *own;
72
13652c4b
MD
73 struct urcu_wait_node wait_node;
74 /* RCU linked list node of siblings for work stealing. */
75 struct cds_list_head sibling_node;
76 int flags; /* enum urcu_worker_flags */
77};
78
79enum urcu_worker_flags {
80 URCU_WORKER_STEAL = (1 << 0),
81};
82
83static inline
84void urcu_workqueue_init(struct urcu_workqueue *queue)
85{
86 __cds_wfcq_init(&queue->head, &queue->tail);
87 urcu_wait_queue_init(&queue->waitqueue);
88 CDS_INIT_LIST_HEAD(&queue->sibling_head);
8313fa62 89 pthread_mutex_init(&queue->sibling_lock, NULL);
1b0a9891 90 queue->shutdown = false;
13652c4b
MD
91}
92
93static inline
94void urcu_queue_work(struct urcu_workqueue *queue, struct urcu_work *work)
95{
96 bool was_empty;
97
98 cds_wfcq_node_init(&work->node);
99
100 /* Enqueue work. */
101 was_empty = !cds_wfcq_enqueue(&queue->head, &queue->tail,
102 &work->node);
103 /*
104 * If workqueue was previously empty, wakeup one worker thread.
105 * It will eventually grab the entire content of the work-queue
106 * (therefore grabbing a "work batch"). After having grabbed the
107 * work batch, while that thread is running and taking care of
108 * that work batch, when we enqueue more work, we will wake
109 * another thread (if there is one waiting), which will
110 * eventually grab the new batch, and so on. This scheme ensures
111 * that contiguous batch of work are handled by the same thread
112 * (for locality), and also ensures that we scale work to many
113 * worker threads when threads are busy enough to still be
114 * running when work is enqueued.
115 */
d3afe039
MD
116 if (was_empty) {
117 rcu_read_lock(); /* Protect stack dequeue */
13652c4b 118 (void) urcu_dequeue_wake_single(&queue->waitqueue);
d3afe039
MD
119 rcu_read_unlock(); /* Protect stack dequeue */
120 }
13652c4b
MD
121}
122
123static inline
1b0a9891 124void __urcu_workqueue_wakeup_all(struct urcu_workqueue *queue)
13652c4b
MD
125{
126 struct urcu_waiters waiters;
127
d3afe039 128 rcu_read_lock(); /* Protect stack dequeue */
13652c4b 129 urcu_move_waiters(&waiters, &queue->waitqueue);
d3afe039
MD
130 rcu_read_unlock(); /* Protect stack dequeue */
131
13652c4b
MD
132 (void) urcu_wake_all_waiters(&waiters);
133}
134
135static inline
136void urcu_worker_init(struct urcu_worker *worker, int flags)
137{
138 cds_wfcq_init(&worker->head, &worker->tail);
139 worker->flags = flags;
140 urcu_wait_node_init(&worker->wait_node, URCU_WAIT_RUNNING);
a6492159 141 worker->own = NULL;
13652c4b
MD
142}
143
144static inline
145void urcu_worker_register(struct urcu_workqueue *queue,
146 struct urcu_worker *worker)
147{
148 if (worker->flags & URCU_WORKER_STEAL) {
149 pthread_mutex_lock(&queue->sibling_lock);
150 cds_list_add_rcu(&worker->sibling_node, &queue->sibling_head);
151 pthread_mutex_unlock(&queue->sibling_lock);
152 }
153}
154
155static inline
156void urcu_worker_unregister(struct urcu_workqueue *queue,
157 struct urcu_worker *worker)
158{
159 enum cds_wfcq_ret wfcq_ret;
160
161 if (worker->flags & URCU_WORKER_STEAL) {
162 pthread_mutex_lock(&queue->sibling_lock);
163 cds_list_del_rcu(&worker->sibling_node);
164 pthread_mutex_unlock(&queue->sibling_lock);
13652c4b
MD
165 }
166
d3afe039 167 /*
6e17009c 168 * Make sure we are removed from waitqueue.
d3afe039 169 */
6e17009c
MD
170 if (CMM_LOAD_SHARED(worker->wait_node.node.next))
171 __urcu_workqueue_wakeup_all(queue);
d3afe039 172
13652c4b
MD
173 /*
174 * Put any local work we still have back into the workqueue.
175 */
176 wfcq_ret = __cds_wfcq_splice_blocking(&queue->head,
177 &queue->tail,
178 &worker->head,
179 &worker->tail);
180 if (wfcq_ret != CDS_WFCQ_RET_SRC_EMPTY
181 && wfcq_ret == CDS_WFCQ_RET_DEST_EMPTY) {
182 /*
183 * Wakeup worker thread if we have put work back into
184 * workqueue that was previously empty.
185 */
d3afe039 186 rcu_read_lock(); /* Protect stack dequeue */
13652c4b 187 (void) urcu_dequeue_wake_single(&queue->waitqueue);
d3afe039 188 rcu_read_unlock(); /* Protect stack dequeue */
13652c4b 189 }
6e17009c
MD
190
191 /*
192 * Wait for grace period before freeing or reusing
193 * "worker" because used by RCU linked list.
194 * Also prevents ABA for waitqueue stack dequeue: matches RCU
195 * read-side critical sections around dequeue and move all
196 * operations on waitqueue).
197 */
198 synchronize_rcu();
13652c4b
MD
199}
200
13652c4b 201static inline
a6492159
MD
202bool ___urcu_grab_work(struct urcu_worker *worker,
203 cds_wfcq_head_ptr_t src_head,
204 struct cds_wfcq_tail *src_tail,
205 bool steal)
13652c4b 206{
e10c65b3 207 enum cds_wfcq_ret splice_ret;
8313fa62 208 struct __cds_wfcq_head tmp_head;
a6492159
MD
209 struct cds_wfcq_tail tmp_tail;
210 struct cds_wfcq_node *node;
e10c65b3 211
30926570 212 /*
a6492159 213 * Don't bother grabbing the src queue lock if it is empty.
30926570 214 */
a6492159 215 if (cds_wfcq_empty(src_head, src_tail))
e10c65b3 216 return false;
8313fa62 217 __cds_wfcq_init(&tmp_head, &tmp_tail);
a6492159
MD
218
219 /* Ensure that we preserve FIFO work order. */
220 assert(!steal || worker->own == NULL);
221
222 /* Splice to temporary queue. */
223 if (steal)
224 cds_wfcq_dequeue_lock(src_head.h, src_tail);
225 splice_ret = __cds_wfcq_splice_blocking(&tmp_head,
226 &tmp_tail,
227 src_head,
228 src_tail);
229 if (steal)
230 cds_wfcq_dequeue_unlock(src_head.h, src_tail);
231 if (splice_ret == CDS_WFCQ_RET_SRC_EMPTY)
232 return false;
233
234 /*
235 * Keep one work entry for ourself. This ensures forward
236 * progress amongst stealing co-workers. This also ensures that
237 * when a worker grab some work from the global workqueue, it
238 * will have at least one work item to deal with.
239 */
240 if (worker->own == NULL) {
241 if (!steal) {
242 /*
243 * Try to grab own work from worker workqueue to
244 * preserve FIFO order.
245 */
246 node = cds_wfcq_dequeue_blocking(&worker->head,
247 &worker->tail);
248 if (node)
249 goto got_node;
250 }
251 node = __cds_wfcq_dequeue_blocking(&tmp_head, &tmp_tail);
252 assert(node != NULL);
253got_node:
254 worker->own = caa_container_of(node, struct urcu_work, node);
255 }
256
257 /* Splice into worker workqueue. */
8313fa62 258 splice_ret = __cds_wfcq_splice_blocking(&worker->head,
13652c4b 259 &worker->tail,
a6492159
MD
260 &tmp_head,
261 &tmp_tail);
e10c65b3 262 /* Ensure that we preserve FIFO work order. */
a6492159
MD
263 assert(!steal || splice_ret != CDS_WFCQ_RET_DEST_NON_EMPTY);
264 return true;
265}
266
267/*
268 * Try stealing work from siblings when we have nothing to do.
269 */
270static inline
271bool ___urcu_steal_work(struct urcu_worker *worker,
272 struct urcu_worker *sibling)
273{
274 return ___urcu_grab_work(worker, &sibling->head, &sibling->tail, 1);
13652c4b
MD
275}
276
277static inline
e10c65b3 278bool __urcu_steal_work(struct urcu_workqueue *queue,
13652c4b
MD
279 struct urcu_worker *worker)
280{
281 struct urcu_worker *sibling_prev, *sibling_next;
282 struct cds_list_head *sibling_node;
e10c65b3 283 bool steal_performed = 0;
13652c4b
MD
284
285 if (!(worker->flags & URCU_WORKER_STEAL))
e10c65b3 286 return false;
13652c4b
MD
287
288 rcu_read_lock();
289
290 sibling_node = rcu_dereference(worker->sibling_node.next);
291 if (sibling_node == &queue->sibling_head)
292 sibling_node = rcu_dereference(sibling_node->next);
293 sibling_next = caa_container_of(sibling_node, struct urcu_worker,
294 sibling_node);
295 if (sibling_next != worker)
e10c65b3
MD
296 steal_performed = ___urcu_steal_work(worker, sibling_next);
297 if (steal_performed)
298 goto end;
13652c4b
MD
299
300 sibling_node = rcu_dereference(worker->sibling_node.prev);
301 if (sibling_node == &queue->sibling_head)
302 sibling_node = rcu_dereference(sibling_node->prev);
303 sibling_prev = caa_container_of(sibling_node, struct urcu_worker,
304 sibling_node);
305 if (sibling_prev != worker && sibling_prev != sibling_next)
e10c65b3
MD
306 steal_performed = ___urcu_steal_work(worker, sibling_prev);
307end:
13652c4b
MD
308 rcu_read_unlock();
309
e10c65b3 310 return steal_performed;
13652c4b
MD
311}
312
313static inline
5d30bf32 314bool ___urcu_wakeup_sibling(struct urcu_worker *sibling)
13652c4b 315{
5d30bf32 316 return urcu_adaptative_wake_up(&sibling->wait_node);
13652c4b
MD
317}
318
319static inline
5d30bf32 320bool __urcu_wakeup_siblings(struct urcu_workqueue *queue,
13652c4b
MD
321 struct urcu_worker *worker)
322{
323 struct urcu_worker *sibling_prev, *sibling_next;
324 struct cds_list_head *sibling_node;
5d30bf32 325 bool wakeup_performed = 0;
13652c4b
MD
326
327 if (!(worker->flags & URCU_WORKER_STEAL))
328 return;
329
330 /* Only wakeup siblings if we have work in our own queue. */
331 if (cds_wfcq_empty(&worker->head, &worker->tail))
332 return;
333
334 rcu_read_lock();
335
336 sibling_node = rcu_dereference(worker->sibling_node.next);
337 if (sibling_node == &queue->sibling_head)
338 sibling_node = rcu_dereference(sibling_node->next);
339 sibling_next = caa_container_of(sibling_node, struct urcu_worker,
340 sibling_node);
341 if (sibling_next != worker)
5d30bf32
MD
342 wakeup_performed = ___urcu_wakeup_sibling(sibling_next);
343 if (wakeup_performed)
344 goto end;
13652c4b
MD
345
346 sibling_node = rcu_dereference(worker->sibling_node.prev);
347 if (sibling_node == &queue->sibling_head)
348 sibling_node = rcu_dereference(sibling_node->prev);
349 sibling_prev = caa_container_of(sibling_node, struct urcu_worker,
350 sibling_node);
351 if (sibling_prev != worker && sibling_prev != sibling_next)
5d30bf32
MD
352 wakeup_performed = ___urcu_wakeup_sibling(sibling_prev);
353end:
13652c4b 354 rcu_read_unlock();
5d30bf32
MD
355
356 return wakeup_performed;
13652c4b
MD
357}
358
359static inline
1b0a9891
MD
360enum urcu_accept_ret urcu_accept_work(struct urcu_workqueue *queue,
361 struct urcu_worker *worker)
13652c4b
MD
362{
363 enum cds_wfcq_ret wfcq_ret;
a6492159 364 bool has_work;
13652c4b 365
a6492159 366 has_work = ___urcu_grab_work(worker, &queue->head, &queue->tail, 0);
13652c4b 367 /* Don't wait if we have work to do. */
a6492159 368 if (has_work || !cds_wfcq_empty(&worker->head, &worker->tail))
13652c4b
MD
369 goto do_work;
370 /* Try to steal work from sibling instead of blocking */
371 if (__urcu_steal_work(queue, worker))
372 goto do_work;
1b0a9891
MD
373 /* No more work to do, check shutdown state */
374 if (CMM_LOAD_SHARED(queue->shutdown))
375 return URCU_ACCEPT_SHUTDOWN;
13652c4b
MD
376 urcu_wait_set_state(&worker->wait_node,
377 URCU_WAIT_WAITING);
378 if (!CMM_LOAD_SHARED(worker->wait_node.node.next)) {
379 int was_empty;
380
381 /*
382 * NULL next pointer. We are therefore not in
383 * the queue.
384 */
7a618cf7 385 cds_lfs_node_init(&worker->wait_node.node);
d3afe039
MD
386 /* Protect stack dequeue against ABA */
387 synchronize_rcu();
13652c4b
MD
388 was_empty = !urcu_wait_add(&queue->waitqueue,
389 &worker->wait_node);
390 /*
391 * If the wait queue was empty, it means we are the
392 * first thread to be put back into an otherwise empty
393 * wait queue. Re-check if work queue is empty after
394 * adding ourself to wait queue, so we can wakeup the
395 * top of wait queue since new work have appeared, and
396 * work enqueuer may not have seen that it needed to do
397 * a wake up.
398 */
399 if (was_empty && !cds_wfcq_empty(&queue->head,
d3afe039
MD
400 &queue->tail)) {
401 rcu_read_lock(); /* Protect stack dequeue */
13652c4b 402 (void) urcu_dequeue_wake_single(&queue->waitqueue);
d3afe039
MD
403 rcu_read_unlock(); /* Protect stack dequeue */
404 }
13652c4b
MD
405 } else {
406 /*
407 * Non-NULL next pointer. We are therefore in
408 * the queue, or the dispatcher just removed us
409 * from it (after we read the next pointer), and
410 * is therefore awakening us. The state will
411 * therefore have been changed from WAITING to
412 * some other state, which will let the busy
413 * wait pass through.
414 */
415 }
416 urcu_adaptative_busy_wait(&worker->wait_node);
417 return;
418
419do_work:
420 /*
421 * We will be busy handling the work batch, awaken siblings so
422 * they can steal from us.
423 */
5d30bf32 424 (void) __urcu_wakeup_siblings(queue, worker);
1b0a9891 425 return URCU_ACCEPT_WORK;
13652c4b
MD
426}
427
428static inline
429struct urcu_work *urcu_dequeue_work(struct urcu_worker *worker)
430{
431 struct cds_wfcq_node *node;
432
a6492159
MD
433 if (worker->own) {
434 struct urcu_work *work;
435
436 /* Process our own work entry. */
437 work = worker->own;
438 worker->own = NULL;
439 return work;
440 }
13652c4b
MD
441 /*
442 * If we are registered for work stealing, we need to dequeue
443 * safely against siblings.
444 */
30926570
MD
445 if (worker->flags & URCU_WORKER_STEAL) {
446 /*
447 * Don't bother grabbing the worker queue lock if it is
448 * empty.
449 */
450 if (cds_wfcq_empty(&worker->head, &worker->tail))
451 return NULL;
13652c4b
MD
452 node = cds_wfcq_dequeue_blocking(&worker->head,
453 &worker->tail);
30926570 454 } else {
13652c4b
MD
455 node = ___cds_wfcq_dequeue_with_state(&worker->head,
456 &worker->tail, NULL, 1, 0);
30926570 457 }
13652c4b
MD
458 if (!node)
459 return NULL;
460 return caa_container_of(node, struct urcu_work, node);
461}
462
1b0a9891
MD
463static inline
464void urcu_workqueue_shutdown(struct urcu_workqueue *queue)
465{
466 /* Set shutdown */
467 CMM_STORE_SHARED(queue->shutdown, true);
468 /* Wakeup all workers */
469 __urcu_workqueue_wakeup_all(queue);
470}
471
13652c4b 472#endif /* _URCU_WORKQUEUE_FIFO_H */
This page took 0.042563 seconds and 4 git commands to generate.