workqueue: fix uninitialized mutex
[userspace-rcu.git] / urcu / workqueue-fifo.h
1 #ifndef _URCU_WORKQUEUE_FIFO_H
2 #define _URCU_WORKQUEUE_FIFO_H
3
4 /*
5 * urcu/workqueue-fifo.h
6 *
7 * Userspace RCU library - work queue scheme with FIFO semantic
8 *
9 * Copyright (c) 2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
10 *
11 * This library is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; either
14 * version 2.1 of the License, or (at your option) any later version.
15 *
16 * This library is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
20 *
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with this library; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24 */
25
26 #include <urcu/uatomic.h>
27 #include <urcu/lfstack.h>
28 #include <urcu/waitqueue-lifo.h>
29 #include <urcu/wfcqueue.h>
30 #include <urcu/rculist.h>
31 #include <pthread.h>
32 #include <assert.h>
33
34 enum urcu_accept_ret {
35 URCU_ACCEPT_WORK = 0,
36 URCU_ACCEPT_SHUTDOWN = 1,
37 };
38
39 /*
40 * We use RCU to steal work from siblings. Therefore, one of RCU flavors
41 * need to be included before this header. All worker that participate
42 * in stealing (initialized with the URCU_WORKER_STEAL flag) need to be
43 * registered RCU readers threads.
44 */
45
46 struct urcu_work {
47 struct cds_wfcq_node node;
48 };
49
50 struct urcu_workqueue {
51 /* FIFO work queue */
52 struct __cds_wfcq_head head;
53 struct cds_wfcq_tail tail;
54
55 /* Associated wait queue for LIFO wait/wakeup */
56 struct urcu_wait_queue waitqueue;
57
58 /* RCU linked list head of siblings for work stealing. */
59 struct cds_list_head sibling_head;
60 pthread_mutex_t sibling_lock; /* Protect sibling list updates */
61
62 bool shutdown; /* Shutdown performed */
63 };
64
65 struct urcu_worker {
66 /* Workqueue which can be either used by worker, or stolen. */
67 struct cds_wfcq_head head;
68 struct cds_wfcq_tail tail;
69
70 /* Work belonging to worker. Cannot be stolen. */
71 struct urcu_work *own;
72
73 struct urcu_wait_node wait_node;
74 /* RCU linked list node of siblings for work stealing. */
75 struct cds_list_head sibling_node;
76 int flags; /* enum urcu_worker_flags */
77 };
78
79 enum urcu_worker_flags {
80 URCU_WORKER_STEAL = (1 << 0),
81 };
82
83 static inline
84 void urcu_workqueue_init(struct urcu_workqueue *queue)
85 {
86 __cds_wfcq_init(&queue->head, &queue->tail);
87 urcu_wait_queue_init(&queue->waitqueue);
88 CDS_INIT_LIST_HEAD(&queue->sibling_head);
89 pthread_mutex_init(&queue->sibling_lock, NULL);
90 queue->shutdown = false;
91 }
92
93 static inline
94 void urcu_queue_work(struct urcu_workqueue *queue, struct urcu_work *work)
95 {
96 bool was_empty;
97
98 cds_wfcq_node_init(&work->node);
99
100 /* Enqueue work. */
101 was_empty = !cds_wfcq_enqueue(&queue->head, &queue->tail,
102 &work->node);
103 /*
104 * If workqueue was previously empty, wakeup one worker thread.
105 * It will eventually grab the entire content of the work-queue
106 * (therefore grabbing a "work batch"). After having grabbed the
107 * work batch, while that thread is running and taking care of
108 * that work batch, when we enqueue more work, we will wake
109 * another thread (if there is one waiting), which will
110 * eventually grab the new batch, and so on. This scheme ensures
111 * that contiguous batch of work are handled by the same thread
112 * (for locality), and also ensures that we scale work to many
113 * worker threads when threads are busy enough to still be
114 * running when work is enqueued.
115 */
116 if (was_empty) {
117 rcu_read_lock(); /* Protect stack dequeue */
118 (void) urcu_dequeue_wake_single(&queue->waitqueue);
119 rcu_read_unlock(); /* Protect stack dequeue */
120 }
121 }
122
123 static inline
124 void __urcu_workqueue_wakeup_all(struct urcu_workqueue *queue)
125 {
126 struct urcu_waiters waiters;
127
128 rcu_read_lock(); /* Protect stack dequeue */
129 urcu_move_waiters(&waiters, &queue->waitqueue);
130 rcu_read_unlock(); /* Protect stack dequeue */
131
132 (void) urcu_wake_all_waiters(&waiters);
133 }
134
135 static inline
136 void urcu_worker_init(struct urcu_worker *worker, int flags)
137 {
138 cds_wfcq_init(&worker->head, &worker->tail);
139 worker->flags = flags;
140 urcu_wait_node_init(&worker->wait_node, URCU_WAIT_RUNNING);
141 worker->own = NULL;
142 }
143
144 static inline
145 void urcu_worker_register(struct urcu_workqueue *queue,
146 struct urcu_worker *worker)
147 {
148 if (worker->flags & URCU_WORKER_STEAL) {
149 pthread_mutex_lock(&queue->sibling_lock);
150 cds_list_add_rcu(&worker->sibling_node, &queue->sibling_head);
151 pthread_mutex_unlock(&queue->sibling_lock);
152 }
153 }
154
155 static inline
156 void urcu_worker_unregister(struct urcu_workqueue *queue,
157 struct urcu_worker *worker)
158 {
159 enum cds_wfcq_ret wfcq_ret;
160
161 if (worker->flags & URCU_WORKER_STEAL) {
162 pthread_mutex_lock(&queue->sibling_lock);
163 cds_list_del_rcu(&worker->sibling_node);
164 pthread_mutex_unlock(&queue->sibling_lock);
165 }
166
167 /*
168 * Make sure we are removed from waitqueue.
169 */
170 if (CMM_LOAD_SHARED(worker->wait_node.node.next))
171 __urcu_workqueue_wakeup_all(queue);
172
173 /*
174 * Put any local work we still have back into the workqueue.
175 */
176 wfcq_ret = __cds_wfcq_splice_blocking(&queue->head,
177 &queue->tail,
178 &worker->head,
179 &worker->tail);
180 if (wfcq_ret != CDS_WFCQ_RET_SRC_EMPTY
181 && wfcq_ret == CDS_WFCQ_RET_DEST_EMPTY) {
182 /*
183 * Wakeup worker thread if we have put work back into
184 * workqueue that was previously empty.
185 */
186 rcu_read_lock(); /* Protect stack dequeue */
187 (void) urcu_dequeue_wake_single(&queue->waitqueue);
188 rcu_read_unlock(); /* Protect stack dequeue */
189 }
190
191 /*
192 * Wait for grace period before freeing or reusing
193 * "worker" because used by RCU linked list.
194 * Also prevents ABA for waitqueue stack dequeue: matches RCU
195 * read-side critical sections around dequeue and move all
196 * operations on waitqueue).
197 */
198 synchronize_rcu();
199 }
200
201 static inline
202 bool ___urcu_grab_work(struct urcu_worker *worker,
203 cds_wfcq_head_ptr_t src_head,
204 struct cds_wfcq_tail *src_tail,
205 bool steal)
206 {
207 enum cds_wfcq_ret splice_ret;
208 struct __cds_wfcq_head tmp_head;
209 struct cds_wfcq_tail tmp_tail;
210 struct cds_wfcq_node *node;
211
212 /*
213 * Don't bother grabbing the src queue lock if it is empty.
214 */
215 if (cds_wfcq_empty(src_head, src_tail))
216 return false;
217 __cds_wfcq_init(&tmp_head, &tmp_tail);
218
219 /* Ensure that we preserve FIFO work order. */
220 assert(!steal || worker->own == NULL);
221
222 /* Splice to temporary queue. */
223 if (steal)
224 cds_wfcq_dequeue_lock(src_head.h, src_tail);
225 splice_ret = __cds_wfcq_splice_blocking(&tmp_head,
226 &tmp_tail,
227 src_head,
228 src_tail);
229 if (steal)
230 cds_wfcq_dequeue_unlock(src_head.h, src_tail);
231 if (splice_ret == CDS_WFCQ_RET_SRC_EMPTY)
232 return false;
233
234 /*
235 * Keep one work entry for ourself. This ensures forward
236 * progress amongst stealing co-workers. This also ensures that
237 * when a worker grab some work from the global workqueue, it
238 * will have at least one work item to deal with.
239 */
240 if (worker->own == NULL) {
241 if (!steal) {
242 /*
243 * Try to grab own work from worker workqueue to
244 * preserve FIFO order.
245 */
246 node = cds_wfcq_dequeue_blocking(&worker->head,
247 &worker->tail);
248 if (node)
249 goto got_node;
250 }
251 node = __cds_wfcq_dequeue_blocking(&tmp_head, &tmp_tail);
252 assert(node != NULL);
253 got_node:
254 worker->own = caa_container_of(node, struct urcu_work, node);
255 }
256
257 /* Splice into worker workqueue. */
258 splice_ret = __cds_wfcq_splice_blocking(&worker->head,
259 &worker->tail,
260 &tmp_head,
261 &tmp_tail);
262 /* Ensure that we preserve FIFO work order. */
263 assert(!steal || splice_ret != CDS_WFCQ_RET_DEST_NON_EMPTY);
264 return true;
265 }
266
267 /*
268 * Try stealing work from siblings when we have nothing to do.
269 */
270 static inline
271 bool ___urcu_steal_work(struct urcu_worker *worker,
272 struct urcu_worker *sibling)
273 {
274 return ___urcu_grab_work(worker, &sibling->head, &sibling->tail, 1);
275 }
276
277 static inline
278 bool __urcu_steal_work(struct urcu_workqueue *queue,
279 struct urcu_worker *worker)
280 {
281 struct urcu_worker *sibling_prev, *sibling_next;
282 struct cds_list_head *sibling_node;
283 bool steal_performed = 0;
284
285 if (!(worker->flags & URCU_WORKER_STEAL))
286 return false;
287
288 rcu_read_lock();
289
290 sibling_node = rcu_dereference(worker->sibling_node.next);
291 if (sibling_node == &queue->sibling_head)
292 sibling_node = rcu_dereference(sibling_node->next);
293 sibling_next = caa_container_of(sibling_node, struct urcu_worker,
294 sibling_node);
295 if (sibling_next != worker)
296 steal_performed = ___urcu_steal_work(worker, sibling_next);
297 if (steal_performed)
298 goto end;
299
300 sibling_node = rcu_dereference(worker->sibling_node.prev);
301 if (sibling_node == &queue->sibling_head)
302 sibling_node = rcu_dereference(sibling_node->prev);
303 sibling_prev = caa_container_of(sibling_node, struct urcu_worker,
304 sibling_node);
305 if (sibling_prev != worker && sibling_prev != sibling_next)
306 steal_performed = ___urcu_steal_work(worker, sibling_prev);
307 end:
308 rcu_read_unlock();
309
310 return steal_performed;
311 }
312
313 static inline
314 bool ___urcu_wakeup_sibling(struct urcu_worker *sibling)
315 {
316 return urcu_adaptative_wake_up(&sibling->wait_node);
317 }
318
319 static inline
320 bool __urcu_wakeup_siblings(struct urcu_workqueue *queue,
321 struct urcu_worker *worker)
322 {
323 struct urcu_worker *sibling_prev, *sibling_next;
324 struct cds_list_head *sibling_node;
325 bool wakeup_performed = 0;
326
327 if (!(worker->flags & URCU_WORKER_STEAL))
328 return;
329
330 /* Only wakeup siblings if we have work in our own queue. */
331 if (cds_wfcq_empty(&worker->head, &worker->tail))
332 return;
333
334 rcu_read_lock();
335
336 sibling_node = rcu_dereference(worker->sibling_node.next);
337 if (sibling_node == &queue->sibling_head)
338 sibling_node = rcu_dereference(sibling_node->next);
339 sibling_next = caa_container_of(sibling_node, struct urcu_worker,
340 sibling_node);
341 if (sibling_next != worker)
342 wakeup_performed = ___urcu_wakeup_sibling(sibling_next);
343 if (wakeup_performed)
344 goto end;
345
346 sibling_node = rcu_dereference(worker->sibling_node.prev);
347 if (sibling_node == &queue->sibling_head)
348 sibling_node = rcu_dereference(sibling_node->prev);
349 sibling_prev = caa_container_of(sibling_node, struct urcu_worker,
350 sibling_node);
351 if (sibling_prev != worker && sibling_prev != sibling_next)
352 wakeup_performed = ___urcu_wakeup_sibling(sibling_prev);
353 end:
354 rcu_read_unlock();
355
356 return wakeup_performed;
357 }
358
359 static inline
360 enum urcu_accept_ret urcu_accept_work(struct urcu_workqueue *queue,
361 struct urcu_worker *worker)
362 {
363 enum cds_wfcq_ret wfcq_ret;
364 bool has_work;
365
366 has_work = ___urcu_grab_work(worker, &queue->head, &queue->tail, 0);
367 /* Don't wait if we have work to do. */
368 if (has_work || !cds_wfcq_empty(&worker->head, &worker->tail))
369 goto do_work;
370 /* Try to steal work from sibling instead of blocking */
371 if (__urcu_steal_work(queue, worker))
372 goto do_work;
373 /* No more work to do, check shutdown state */
374 if (CMM_LOAD_SHARED(queue->shutdown))
375 return URCU_ACCEPT_SHUTDOWN;
376 urcu_wait_set_state(&worker->wait_node,
377 URCU_WAIT_WAITING);
378 if (!CMM_LOAD_SHARED(worker->wait_node.node.next)) {
379 int was_empty;
380
381 /*
382 * NULL next pointer. We are therefore not in
383 * the queue.
384 */
385 cds_lfs_node_init(&worker->wait_node.node);
386 /* Protect stack dequeue against ABA */
387 synchronize_rcu();
388 was_empty = !urcu_wait_add(&queue->waitqueue,
389 &worker->wait_node);
390 /*
391 * If the wait queue was empty, it means we are the
392 * first thread to be put back into an otherwise empty
393 * wait queue. Re-check if work queue is empty after
394 * adding ourself to wait queue, so we can wakeup the
395 * top of wait queue since new work have appeared, and
396 * work enqueuer may not have seen that it needed to do
397 * a wake up.
398 */
399 if (was_empty && !cds_wfcq_empty(&queue->head,
400 &queue->tail)) {
401 rcu_read_lock(); /* Protect stack dequeue */
402 (void) urcu_dequeue_wake_single(&queue->waitqueue);
403 rcu_read_unlock(); /* Protect stack dequeue */
404 }
405 } else {
406 /*
407 * Non-NULL next pointer. We are therefore in
408 * the queue, or the dispatcher just removed us
409 * from it (after we read the next pointer), and
410 * is therefore awakening us. The state will
411 * therefore have been changed from WAITING to
412 * some other state, which will let the busy
413 * wait pass through.
414 */
415 }
416 urcu_adaptative_busy_wait(&worker->wait_node);
417 return;
418
419 do_work:
420 /*
421 * We will be busy handling the work batch, awaken siblings so
422 * they can steal from us.
423 */
424 (void) __urcu_wakeup_siblings(queue, worker);
425 return URCU_ACCEPT_WORK;
426 }
427
428 static inline
429 struct urcu_work *urcu_dequeue_work(struct urcu_worker *worker)
430 {
431 struct cds_wfcq_node *node;
432
433 if (worker->own) {
434 struct urcu_work *work;
435
436 /* Process our own work entry. */
437 work = worker->own;
438 worker->own = NULL;
439 return work;
440 }
441 /*
442 * If we are registered for work stealing, we need to dequeue
443 * safely against siblings.
444 */
445 if (worker->flags & URCU_WORKER_STEAL) {
446 /*
447 * Don't bother grabbing the worker queue lock if it is
448 * empty.
449 */
450 if (cds_wfcq_empty(&worker->head, &worker->tail))
451 return NULL;
452 node = cds_wfcq_dequeue_blocking(&worker->head,
453 &worker->tail);
454 } else {
455 node = ___cds_wfcq_dequeue_with_state(&worker->head,
456 &worker->tail, NULL, 1, 0);
457 }
458 if (!node)
459 return NULL;
460 return caa_container_of(node, struct urcu_work, node);
461 }
462
463 static inline
464 void urcu_workqueue_shutdown(struct urcu_workqueue *queue)
465 {
466 /* Set shutdown */
467 CMM_STORE_SHARED(queue->shutdown, true);
468 /* Wakeup all workers */
469 __urcu_workqueue_wakeup_all(queue);
470 }
471
472 #endif /* _URCU_WORKQUEUE_FIFO_H */
This page took 0.038562 seconds and 4 git commands to generate.