workqueue: consider "own" item for do_work criterion
[userspace-rcu.git] / urcu / workqueue-fifo.h
CommitLineData
13652c4b
MD
1#ifndef _URCU_WORKQUEUE_FIFO_H
2#define _URCU_WORKQUEUE_FIFO_H
3
4/*
5 * urcu/workqueue-fifo.h
6 *
7 * Userspace RCU library - work queue scheme with FIFO semantic
8 *
9 * Copyright (c) 2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
10 *
11 * This library is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; either
14 * version 2.1 of the License, or (at your option) any later version.
15 *
16 * This library is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
20 *
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with this library; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24 */
25
26#include <urcu/uatomic.h>
7a618cf7 27#include <urcu/lfstack.h>
13652c4b
MD
28#include <urcu/waitqueue-lifo.h>
29#include <urcu/wfcqueue.h>
30#include <urcu/rculist.h>
31#include <pthread.h>
e10c65b3 32#include <assert.h>
13652c4b 33
1b0a9891
MD
34enum urcu_accept_ret {
35 URCU_ACCEPT_WORK = 0,
36 URCU_ACCEPT_SHUTDOWN = 1,
37};
38
13652c4b
MD
39/*
40 * We use RCU to steal work from siblings. Therefore, one of RCU flavors
41 * need to be included before this header. All worker that participate
42 * in stealing (initialized with the URCU_WORKER_STEAL flag) need to be
43 * registered RCU readers threads.
44 */
45
46struct urcu_work {
47 struct cds_wfcq_node node;
48};
49
50struct urcu_workqueue {
51 /* FIFO work queue */
52 struct __cds_wfcq_head head;
53 struct cds_wfcq_tail tail;
54
55 /* Associated wait queue for LIFO wait/wakeup */
56 struct urcu_wait_queue waitqueue;
57
58 /* RCU linked list head of siblings for work stealing. */
59 struct cds_list_head sibling_head;
60 pthread_mutex_t sibling_lock; /* Protect sibling list updates */
1b0a9891
MD
61
62 bool shutdown; /* Shutdown performed */
13652c4b
MD
63};
64
65struct urcu_worker {
a6492159 66 /* Workqueue which can be either used by worker, or stolen. */
13652c4b
MD
67 struct cds_wfcq_head head;
68 struct cds_wfcq_tail tail;
69
a6492159
MD
70 /* Work belonging to worker. Cannot be stolen. */
71 struct urcu_work *own;
72
13652c4b
MD
73 struct urcu_wait_node wait_node;
74 /* RCU linked list node of siblings for work stealing. */
75 struct cds_list_head sibling_node;
76 int flags; /* enum urcu_worker_flags */
77};
78
79enum urcu_worker_flags {
80 URCU_WORKER_STEAL = (1 << 0),
81};
82
83static inline
84void urcu_workqueue_init(struct urcu_workqueue *queue)
85{
86 __cds_wfcq_init(&queue->head, &queue->tail);
87 urcu_wait_queue_init(&queue->waitqueue);
88 CDS_INIT_LIST_HEAD(&queue->sibling_head);
8313fa62 89 pthread_mutex_init(&queue->sibling_lock, NULL);
1b0a9891 90 queue->shutdown = false;
13652c4b
MD
91}
92
93static inline
94void urcu_queue_work(struct urcu_workqueue *queue, struct urcu_work *work)
95{
96 bool was_empty;
97
98 cds_wfcq_node_init(&work->node);
99
100 /* Enqueue work. */
101 was_empty = !cds_wfcq_enqueue(&queue->head, &queue->tail,
102 &work->node);
103 /*
104 * If workqueue was previously empty, wakeup one worker thread.
105 * It will eventually grab the entire content of the work-queue
106 * (therefore grabbing a "work batch"). After having grabbed the
107 * work batch, while that thread is running and taking care of
108 * that work batch, when we enqueue more work, we will wake
109 * another thread (if there is one waiting), which will
110 * eventually grab the new batch, and so on. This scheme ensures
111 * that contiguous batch of work are handled by the same thread
112 * (for locality), and also ensures that we scale work to many
113 * worker threads when threads are busy enough to still be
114 * running when work is enqueued.
115 */
d3afe039
MD
116 if (was_empty) {
117 rcu_read_lock(); /* Protect stack dequeue */
13652c4b 118 (void) urcu_dequeue_wake_single(&queue->waitqueue);
d3afe039
MD
119 rcu_read_unlock(); /* Protect stack dequeue */
120 }
13652c4b
MD
121}
122
123static inline
1b0a9891 124void __urcu_workqueue_wakeup_all(struct urcu_workqueue *queue)
13652c4b
MD
125{
126 struct urcu_waiters waiters;
127
d3afe039 128 rcu_read_lock(); /* Protect stack dequeue */
13652c4b 129 urcu_move_waiters(&waiters, &queue->waitqueue);
d3afe039
MD
130 rcu_read_unlock(); /* Protect stack dequeue */
131
13652c4b
MD
132 (void) urcu_wake_all_waiters(&waiters);
133}
134
135static inline
136void urcu_worker_init(struct urcu_worker *worker, int flags)
137{
138 cds_wfcq_init(&worker->head, &worker->tail);
139 worker->flags = flags;
140 urcu_wait_node_init(&worker->wait_node, URCU_WAIT_RUNNING);
a6492159 141 worker->own = NULL;
db9916c6 142 worker->wait_node.node.next = NULL;
13652c4b
MD
143}
144
145static inline
146void urcu_worker_register(struct urcu_workqueue *queue,
147 struct urcu_worker *worker)
148{
149 if (worker->flags & URCU_WORKER_STEAL) {
150 pthread_mutex_lock(&queue->sibling_lock);
151 cds_list_add_rcu(&worker->sibling_node, &queue->sibling_head);
152 pthread_mutex_unlock(&queue->sibling_lock);
153 }
154}
155
156static inline
157void urcu_worker_unregister(struct urcu_workqueue *queue,
158 struct urcu_worker *worker)
159{
160 enum cds_wfcq_ret wfcq_ret;
161
162 if (worker->flags & URCU_WORKER_STEAL) {
163 pthread_mutex_lock(&queue->sibling_lock);
164 cds_list_del_rcu(&worker->sibling_node);
165 pthread_mutex_unlock(&queue->sibling_lock);
13652c4b
MD
166 }
167
d3afe039 168 /*
6e17009c 169 * Make sure we are removed from waitqueue.
d3afe039 170 */
6e17009c
MD
171 if (CMM_LOAD_SHARED(worker->wait_node.node.next))
172 __urcu_workqueue_wakeup_all(queue);
d3afe039 173
13652c4b
MD
174 /*
175 * Put any local work we still have back into the workqueue.
176 */
177 wfcq_ret = __cds_wfcq_splice_blocking(&queue->head,
178 &queue->tail,
179 &worker->head,
180 &worker->tail);
181 if (wfcq_ret != CDS_WFCQ_RET_SRC_EMPTY
182 && wfcq_ret == CDS_WFCQ_RET_DEST_EMPTY) {
183 /*
184 * Wakeup worker thread if we have put work back into
185 * workqueue that was previously empty.
186 */
d3afe039 187 rcu_read_lock(); /* Protect stack dequeue */
13652c4b 188 (void) urcu_dequeue_wake_single(&queue->waitqueue);
d3afe039 189 rcu_read_unlock(); /* Protect stack dequeue */
13652c4b 190 }
6e17009c
MD
191
192 /*
193 * Wait for grace period before freeing or reusing
194 * "worker" because used by RCU linked list.
195 * Also prevents ABA for waitqueue stack dequeue: matches RCU
196 * read-side critical sections around dequeue and move all
197 * operations on waitqueue).
198 */
199 synchronize_rcu();
13652c4b
MD
200}
201
13652c4b 202static inline
a6492159
MD
203bool ___urcu_grab_work(struct urcu_worker *worker,
204 cds_wfcq_head_ptr_t src_head,
205 struct cds_wfcq_tail *src_tail,
206 bool steal)
13652c4b 207{
e10c65b3 208 enum cds_wfcq_ret splice_ret;
8313fa62 209 struct __cds_wfcq_head tmp_head;
a6492159
MD
210 struct cds_wfcq_tail tmp_tail;
211 struct cds_wfcq_node *node;
e10c65b3 212
30926570 213 /*
a6492159 214 * Don't bother grabbing the src queue lock if it is empty.
30926570 215 */
a6492159 216 if (cds_wfcq_empty(src_head, src_tail))
e10c65b3 217 return false;
8313fa62 218 __cds_wfcq_init(&tmp_head, &tmp_tail);
a6492159
MD
219
220 /* Ensure that we preserve FIFO work order. */
221 assert(!steal || worker->own == NULL);
222
223 /* Splice to temporary queue. */
224 if (steal)
225 cds_wfcq_dequeue_lock(src_head.h, src_tail);
226 splice_ret = __cds_wfcq_splice_blocking(&tmp_head,
227 &tmp_tail,
228 src_head,
229 src_tail);
230 if (steal)
231 cds_wfcq_dequeue_unlock(src_head.h, src_tail);
232 if (splice_ret == CDS_WFCQ_RET_SRC_EMPTY)
233 return false;
234
235 /*
236 * Keep one work entry for ourself. This ensures forward
237 * progress amongst stealing co-workers. This also ensures that
238 * when a worker grab some work from the global workqueue, it
239 * will have at least one work item to deal with.
240 */
241 if (worker->own == NULL) {
242 if (!steal) {
243 /*
244 * Try to grab own work from worker workqueue to
245 * preserve FIFO order.
246 */
247 node = cds_wfcq_dequeue_blocking(&worker->head,
248 &worker->tail);
249 if (node)
250 goto got_node;
251 }
252 node = __cds_wfcq_dequeue_blocking(&tmp_head, &tmp_tail);
253 assert(node != NULL);
254got_node:
255 worker->own = caa_container_of(node, struct urcu_work, node);
256 }
257
258 /* Splice into worker workqueue. */
8313fa62 259 splice_ret = __cds_wfcq_splice_blocking(&worker->head,
13652c4b 260 &worker->tail,
a6492159
MD
261 &tmp_head,
262 &tmp_tail);
e10c65b3 263 /* Ensure that we preserve FIFO work order. */
a6492159
MD
264 assert(!steal || splice_ret != CDS_WFCQ_RET_DEST_NON_EMPTY);
265 return true;
266}
267
268/*
269 * Try stealing work from siblings when we have nothing to do.
270 */
271static inline
272bool ___urcu_steal_work(struct urcu_worker *worker,
273 struct urcu_worker *sibling)
274{
275 return ___urcu_grab_work(worker, &sibling->head, &sibling->tail, 1);
13652c4b
MD
276}
277
278static inline
e10c65b3 279bool __urcu_steal_work(struct urcu_workqueue *queue,
13652c4b
MD
280 struct urcu_worker *worker)
281{
282 struct urcu_worker *sibling_prev, *sibling_next;
283 struct cds_list_head *sibling_node;
e10c65b3 284 bool steal_performed = 0;
13652c4b
MD
285
286 if (!(worker->flags & URCU_WORKER_STEAL))
e10c65b3 287 return false;
13652c4b
MD
288
289 rcu_read_lock();
290
291 sibling_node = rcu_dereference(worker->sibling_node.next);
292 if (sibling_node == &queue->sibling_head)
293 sibling_node = rcu_dereference(sibling_node->next);
294 sibling_next = caa_container_of(sibling_node, struct urcu_worker,
295 sibling_node);
296 if (sibling_next != worker)
e10c65b3
MD
297 steal_performed = ___urcu_steal_work(worker, sibling_next);
298 if (steal_performed)
299 goto end;
13652c4b
MD
300
301 sibling_node = rcu_dereference(worker->sibling_node.prev);
302 if (sibling_node == &queue->sibling_head)
303 sibling_node = rcu_dereference(sibling_node->prev);
304 sibling_prev = caa_container_of(sibling_node, struct urcu_worker,
305 sibling_node);
306 if (sibling_prev != worker && sibling_prev != sibling_next)
e10c65b3
MD
307 steal_performed = ___urcu_steal_work(worker, sibling_prev);
308end:
13652c4b
MD
309 rcu_read_unlock();
310
e10c65b3 311 return steal_performed;
13652c4b
MD
312}
313
314static inline
5d30bf32 315bool ___urcu_wakeup_sibling(struct urcu_worker *sibling)
13652c4b 316{
5d30bf32 317 return urcu_adaptative_wake_up(&sibling->wait_node);
13652c4b
MD
318}
319
320static inline
5d30bf32 321bool __urcu_wakeup_siblings(struct urcu_workqueue *queue,
13652c4b
MD
322 struct urcu_worker *worker)
323{
324 struct urcu_worker *sibling_prev, *sibling_next;
325 struct cds_list_head *sibling_node;
5d30bf32 326 bool wakeup_performed = 0;
13652c4b
MD
327
328 if (!(worker->flags & URCU_WORKER_STEAL))
329 return;
330
331 /* Only wakeup siblings if we have work in our own queue. */
332 if (cds_wfcq_empty(&worker->head, &worker->tail))
333 return;
334
335 rcu_read_lock();
336
337 sibling_node = rcu_dereference(worker->sibling_node.next);
338 if (sibling_node == &queue->sibling_head)
339 sibling_node = rcu_dereference(sibling_node->next);
340 sibling_next = caa_container_of(sibling_node, struct urcu_worker,
341 sibling_node);
342 if (sibling_next != worker)
5d30bf32
MD
343 wakeup_performed = ___urcu_wakeup_sibling(sibling_next);
344 if (wakeup_performed)
345 goto end;
13652c4b
MD
346
347 sibling_node = rcu_dereference(worker->sibling_node.prev);
348 if (sibling_node == &queue->sibling_head)
349 sibling_node = rcu_dereference(sibling_node->prev);
350 sibling_prev = caa_container_of(sibling_node, struct urcu_worker,
351 sibling_node);
352 if (sibling_prev != worker && sibling_prev != sibling_next)
5d30bf32
MD
353 wakeup_performed = ___urcu_wakeup_sibling(sibling_prev);
354end:
13652c4b 355 rcu_read_unlock();
5d30bf32
MD
356
357 return wakeup_performed;
13652c4b
MD
358}
359
360static inline
1b0a9891
MD
361enum urcu_accept_ret urcu_accept_work(struct urcu_workqueue *queue,
362 struct urcu_worker *worker)
13652c4b
MD
363{
364 enum cds_wfcq_ret wfcq_ret;
a6492159 365 bool has_work;
13652c4b 366
a6492159 367 has_work = ___urcu_grab_work(worker, &queue->head, &queue->tail, 0);
13652c4b 368 /* Don't wait if we have work to do. */
0a14cd14
MD
369 if (has_work || worker->own
370 || !cds_wfcq_empty(&worker->head, &worker->tail))
13652c4b
MD
371 goto do_work;
372 /* Try to steal work from sibling instead of blocking */
373 if (__urcu_steal_work(queue, worker))
374 goto do_work;
1b0a9891
MD
375 /* No more work to do, check shutdown state */
376 if (CMM_LOAD_SHARED(queue->shutdown))
377 return URCU_ACCEPT_SHUTDOWN;
13652c4b
MD
378 urcu_wait_set_state(&worker->wait_node,
379 URCU_WAIT_WAITING);
380 if (!CMM_LOAD_SHARED(worker->wait_node.node.next)) {
381 int was_empty;
382
383 /*
384 * NULL next pointer. We are therefore not in
385 * the queue.
386 */
7a618cf7 387 cds_lfs_node_init(&worker->wait_node.node);
d3afe039
MD
388 /* Protect stack dequeue against ABA */
389 synchronize_rcu();
13652c4b
MD
390 was_empty = !urcu_wait_add(&queue->waitqueue,
391 &worker->wait_node);
392 /*
393 * If the wait queue was empty, it means we are the
394 * first thread to be put back into an otherwise empty
395 * wait queue. Re-check if work queue is empty after
396 * adding ourself to wait queue, so we can wakeup the
397 * top of wait queue since new work have appeared, and
398 * work enqueuer may not have seen that it needed to do
399 * a wake up.
400 */
401 if (was_empty && !cds_wfcq_empty(&queue->head,
d3afe039
MD
402 &queue->tail)) {
403 rcu_read_lock(); /* Protect stack dequeue */
13652c4b 404 (void) urcu_dequeue_wake_single(&queue->waitqueue);
d3afe039
MD
405 rcu_read_unlock(); /* Protect stack dequeue */
406 }
13652c4b
MD
407 } else {
408 /*
409 * Non-NULL next pointer. We are therefore in
410 * the queue, or the dispatcher just removed us
411 * from it (after we read the next pointer), and
412 * is therefore awakening us. The state will
413 * therefore have been changed from WAITING to
414 * some other state, which will let the busy
415 * wait pass through.
416 */
417 }
418 urcu_adaptative_busy_wait(&worker->wait_node);
419 return;
420
421do_work:
422 /*
423 * We will be busy handling the work batch, awaken siblings so
424 * they can steal from us.
425 */
5d30bf32 426 (void) __urcu_wakeup_siblings(queue, worker);
1b0a9891 427 return URCU_ACCEPT_WORK;
13652c4b
MD
428}
429
430static inline
431struct urcu_work *urcu_dequeue_work(struct urcu_worker *worker)
432{
433 struct cds_wfcq_node *node;
434
a6492159
MD
435 if (worker->own) {
436 struct urcu_work *work;
437
438 /* Process our own work entry. */
439 work = worker->own;
440 worker->own = NULL;
441 return work;
442 }
13652c4b
MD
443 /*
444 * If we are registered for work stealing, we need to dequeue
445 * safely against siblings.
446 */
30926570
MD
447 if (worker->flags & URCU_WORKER_STEAL) {
448 /*
449 * Don't bother grabbing the worker queue lock if it is
450 * empty.
451 */
452 if (cds_wfcq_empty(&worker->head, &worker->tail))
453 return NULL;
13652c4b
MD
454 node = cds_wfcq_dequeue_blocking(&worker->head,
455 &worker->tail);
30926570 456 } else {
13652c4b
MD
457 node = ___cds_wfcq_dequeue_with_state(&worker->head,
458 &worker->tail, NULL, 1, 0);
30926570 459 }
13652c4b
MD
460 if (!node)
461 return NULL;
462 return caa_container_of(node, struct urcu_work, node);
463}
464
1b0a9891
MD
465static inline
466void urcu_workqueue_shutdown(struct urcu_workqueue *queue)
467{
468 /* Set shutdown */
469 CMM_STORE_SHARED(queue->shutdown, true);
470 /* Wakeup all workers */
471 __urcu_workqueue_wakeup_all(queue);
472}
473
13652c4b 474#endif /* _URCU_WORKQUEUE_FIFO_H */
This page took 0.04545 seconds and 4 git commands to generate.