workqueue: fix uninitialized next pointer
[userspace-rcu.git] / urcu / workqueue-fifo.h
1 #ifndef _URCU_WORKQUEUE_FIFO_H
2 #define _URCU_WORKQUEUE_FIFO_H
3
4 /*
5 * urcu/workqueue-fifo.h
6 *
7 * Userspace RCU library - work queue scheme with FIFO semantic
8 *
9 * Copyright (c) 2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
10 *
11 * This library is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; either
14 * version 2.1 of the License, or (at your option) any later version.
15 *
16 * This library is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
20 *
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with this library; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24 */
25
26 #include <urcu/uatomic.h>
27 #include <urcu/lfstack.h>
28 #include <urcu/waitqueue-lifo.h>
29 #include <urcu/wfcqueue.h>
30 #include <urcu/rculist.h>
31 #include <pthread.h>
32 #include <assert.h>
33
34 enum urcu_accept_ret {
35 URCU_ACCEPT_WORK = 0,
36 URCU_ACCEPT_SHUTDOWN = 1,
37 };
38
39 /*
40 * We use RCU to steal work from siblings. Therefore, one of RCU flavors
41 * need to be included before this header. All worker that participate
42 * in stealing (initialized with the URCU_WORKER_STEAL flag) need to be
43 * registered RCU readers threads.
44 */
45
46 struct urcu_work {
47 struct cds_wfcq_node node;
48 };
49
50 struct urcu_workqueue {
51 /* FIFO work queue */
52 struct __cds_wfcq_head head;
53 struct cds_wfcq_tail tail;
54
55 /* Associated wait queue for LIFO wait/wakeup */
56 struct urcu_wait_queue waitqueue;
57
58 /* RCU linked list head of siblings for work stealing. */
59 struct cds_list_head sibling_head;
60 pthread_mutex_t sibling_lock; /* Protect sibling list updates */
61
62 bool shutdown; /* Shutdown performed */
63 };
64
65 struct urcu_worker {
66 /* Workqueue which can be either used by worker, or stolen. */
67 struct cds_wfcq_head head;
68 struct cds_wfcq_tail tail;
69
70 /* Work belonging to worker. Cannot be stolen. */
71 struct urcu_work *own;
72
73 struct urcu_wait_node wait_node;
74 /* RCU linked list node of siblings for work stealing. */
75 struct cds_list_head sibling_node;
76 int flags; /* enum urcu_worker_flags */
77 };
78
79 enum urcu_worker_flags {
80 URCU_WORKER_STEAL = (1 << 0),
81 };
82
83 static inline
84 void urcu_workqueue_init(struct urcu_workqueue *queue)
85 {
86 __cds_wfcq_init(&queue->head, &queue->tail);
87 urcu_wait_queue_init(&queue->waitqueue);
88 CDS_INIT_LIST_HEAD(&queue->sibling_head);
89 pthread_mutex_init(&queue->sibling_lock, NULL);
90 queue->shutdown = false;
91 }
92
93 static inline
94 void urcu_queue_work(struct urcu_workqueue *queue, struct urcu_work *work)
95 {
96 bool was_empty;
97
98 cds_wfcq_node_init(&work->node);
99
100 /* Enqueue work. */
101 was_empty = !cds_wfcq_enqueue(&queue->head, &queue->tail,
102 &work->node);
103 /*
104 * If workqueue was previously empty, wakeup one worker thread.
105 * It will eventually grab the entire content of the work-queue
106 * (therefore grabbing a "work batch"). After having grabbed the
107 * work batch, while that thread is running and taking care of
108 * that work batch, when we enqueue more work, we will wake
109 * another thread (if there is one waiting), which will
110 * eventually grab the new batch, and so on. This scheme ensures
111 * that contiguous batch of work are handled by the same thread
112 * (for locality), and also ensures that we scale work to many
113 * worker threads when threads are busy enough to still be
114 * running when work is enqueued.
115 */
116 if (was_empty) {
117 rcu_read_lock(); /* Protect stack dequeue */
118 (void) urcu_dequeue_wake_single(&queue->waitqueue);
119 rcu_read_unlock(); /* Protect stack dequeue */
120 }
121 }
122
123 static inline
124 void __urcu_workqueue_wakeup_all(struct urcu_workqueue *queue)
125 {
126 struct urcu_waiters waiters;
127
128 rcu_read_lock(); /* Protect stack dequeue */
129 urcu_move_waiters(&waiters, &queue->waitqueue);
130 rcu_read_unlock(); /* Protect stack dequeue */
131
132 (void) urcu_wake_all_waiters(&waiters);
133 }
134
135 static inline
136 void urcu_worker_init(struct urcu_worker *worker, int flags)
137 {
138 cds_wfcq_init(&worker->head, &worker->tail);
139 worker->flags = flags;
140 urcu_wait_node_init(&worker->wait_node, URCU_WAIT_RUNNING);
141 worker->own = NULL;
142 worker->wait_node.node.next = NULL;
143 }
144
145 static inline
146 void urcu_worker_register(struct urcu_workqueue *queue,
147 struct urcu_worker *worker)
148 {
149 if (worker->flags & URCU_WORKER_STEAL) {
150 pthread_mutex_lock(&queue->sibling_lock);
151 cds_list_add_rcu(&worker->sibling_node, &queue->sibling_head);
152 pthread_mutex_unlock(&queue->sibling_lock);
153 }
154 }
155
156 static inline
157 void urcu_worker_unregister(struct urcu_workqueue *queue,
158 struct urcu_worker *worker)
159 {
160 enum cds_wfcq_ret wfcq_ret;
161
162 if (worker->flags & URCU_WORKER_STEAL) {
163 pthread_mutex_lock(&queue->sibling_lock);
164 cds_list_del_rcu(&worker->sibling_node);
165 pthread_mutex_unlock(&queue->sibling_lock);
166 }
167
168 /*
169 * Make sure we are removed from waitqueue.
170 */
171 if (CMM_LOAD_SHARED(worker->wait_node.node.next))
172 __urcu_workqueue_wakeup_all(queue);
173
174 /*
175 * Put any local work we still have back into the workqueue.
176 */
177 wfcq_ret = __cds_wfcq_splice_blocking(&queue->head,
178 &queue->tail,
179 &worker->head,
180 &worker->tail);
181 if (wfcq_ret != CDS_WFCQ_RET_SRC_EMPTY
182 && wfcq_ret == CDS_WFCQ_RET_DEST_EMPTY) {
183 /*
184 * Wakeup worker thread if we have put work back into
185 * workqueue that was previously empty.
186 */
187 rcu_read_lock(); /* Protect stack dequeue */
188 (void) urcu_dequeue_wake_single(&queue->waitqueue);
189 rcu_read_unlock(); /* Protect stack dequeue */
190 }
191
192 /*
193 * Wait for grace period before freeing or reusing
194 * "worker" because used by RCU linked list.
195 * Also prevents ABA for waitqueue stack dequeue: matches RCU
196 * read-side critical sections around dequeue and move all
197 * operations on waitqueue).
198 */
199 synchronize_rcu();
200 }
201
202 static inline
203 bool ___urcu_grab_work(struct urcu_worker *worker,
204 cds_wfcq_head_ptr_t src_head,
205 struct cds_wfcq_tail *src_tail,
206 bool steal)
207 {
208 enum cds_wfcq_ret splice_ret;
209 struct __cds_wfcq_head tmp_head;
210 struct cds_wfcq_tail tmp_tail;
211 struct cds_wfcq_node *node;
212
213 /*
214 * Don't bother grabbing the src queue lock if it is empty.
215 */
216 if (cds_wfcq_empty(src_head, src_tail))
217 return false;
218 __cds_wfcq_init(&tmp_head, &tmp_tail);
219
220 /* Ensure that we preserve FIFO work order. */
221 assert(!steal || worker->own == NULL);
222
223 /* Splice to temporary queue. */
224 if (steal)
225 cds_wfcq_dequeue_lock(src_head.h, src_tail);
226 splice_ret = __cds_wfcq_splice_blocking(&tmp_head,
227 &tmp_tail,
228 src_head,
229 src_tail);
230 if (steal)
231 cds_wfcq_dequeue_unlock(src_head.h, src_tail);
232 if (splice_ret == CDS_WFCQ_RET_SRC_EMPTY)
233 return false;
234
235 /*
236 * Keep one work entry for ourself. This ensures forward
237 * progress amongst stealing co-workers. This also ensures that
238 * when a worker grab some work from the global workqueue, it
239 * will have at least one work item to deal with.
240 */
241 if (worker->own == NULL) {
242 if (!steal) {
243 /*
244 * Try to grab own work from worker workqueue to
245 * preserve FIFO order.
246 */
247 node = cds_wfcq_dequeue_blocking(&worker->head,
248 &worker->tail);
249 if (node)
250 goto got_node;
251 }
252 node = __cds_wfcq_dequeue_blocking(&tmp_head, &tmp_tail);
253 assert(node != NULL);
254 got_node:
255 worker->own = caa_container_of(node, struct urcu_work, node);
256 }
257
258 /* Splice into worker workqueue. */
259 splice_ret = __cds_wfcq_splice_blocking(&worker->head,
260 &worker->tail,
261 &tmp_head,
262 &tmp_tail);
263 /* Ensure that we preserve FIFO work order. */
264 assert(!steal || splice_ret != CDS_WFCQ_RET_DEST_NON_EMPTY);
265 return true;
266 }
267
268 /*
269 * Try stealing work from siblings when we have nothing to do.
270 */
271 static inline
272 bool ___urcu_steal_work(struct urcu_worker *worker,
273 struct urcu_worker *sibling)
274 {
275 return ___urcu_grab_work(worker, &sibling->head, &sibling->tail, 1);
276 }
277
278 static inline
279 bool __urcu_steal_work(struct urcu_workqueue *queue,
280 struct urcu_worker *worker)
281 {
282 struct urcu_worker *sibling_prev, *sibling_next;
283 struct cds_list_head *sibling_node;
284 bool steal_performed = 0;
285
286 if (!(worker->flags & URCU_WORKER_STEAL))
287 return false;
288
289 rcu_read_lock();
290
291 sibling_node = rcu_dereference(worker->sibling_node.next);
292 if (sibling_node == &queue->sibling_head)
293 sibling_node = rcu_dereference(sibling_node->next);
294 sibling_next = caa_container_of(sibling_node, struct urcu_worker,
295 sibling_node);
296 if (sibling_next != worker)
297 steal_performed = ___urcu_steal_work(worker, sibling_next);
298 if (steal_performed)
299 goto end;
300
301 sibling_node = rcu_dereference(worker->sibling_node.prev);
302 if (sibling_node == &queue->sibling_head)
303 sibling_node = rcu_dereference(sibling_node->prev);
304 sibling_prev = caa_container_of(sibling_node, struct urcu_worker,
305 sibling_node);
306 if (sibling_prev != worker && sibling_prev != sibling_next)
307 steal_performed = ___urcu_steal_work(worker, sibling_prev);
308 end:
309 rcu_read_unlock();
310
311 return steal_performed;
312 }
313
314 static inline
315 bool ___urcu_wakeup_sibling(struct urcu_worker *sibling)
316 {
317 return urcu_adaptative_wake_up(&sibling->wait_node);
318 }
319
320 static inline
321 bool __urcu_wakeup_siblings(struct urcu_workqueue *queue,
322 struct urcu_worker *worker)
323 {
324 struct urcu_worker *sibling_prev, *sibling_next;
325 struct cds_list_head *sibling_node;
326 bool wakeup_performed = 0;
327
328 if (!(worker->flags & URCU_WORKER_STEAL))
329 return;
330
331 /* Only wakeup siblings if we have work in our own queue. */
332 if (cds_wfcq_empty(&worker->head, &worker->tail))
333 return;
334
335 rcu_read_lock();
336
337 sibling_node = rcu_dereference(worker->sibling_node.next);
338 if (sibling_node == &queue->sibling_head)
339 sibling_node = rcu_dereference(sibling_node->next);
340 sibling_next = caa_container_of(sibling_node, struct urcu_worker,
341 sibling_node);
342 if (sibling_next != worker)
343 wakeup_performed = ___urcu_wakeup_sibling(sibling_next);
344 if (wakeup_performed)
345 goto end;
346
347 sibling_node = rcu_dereference(worker->sibling_node.prev);
348 if (sibling_node == &queue->sibling_head)
349 sibling_node = rcu_dereference(sibling_node->prev);
350 sibling_prev = caa_container_of(sibling_node, struct urcu_worker,
351 sibling_node);
352 if (sibling_prev != worker && sibling_prev != sibling_next)
353 wakeup_performed = ___urcu_wakeup_sibling(sibling_prev);
354 end:
355 rcu_read_unlock();
356
357 return wakeup_performed;
358 }
359
360 static inline
361 enum urcu_accept_ret urcu_accept_work(struct urcu_workqueue *queue,
362 struct urcu_worker *worker)
363 {
364 enum cds_wfcq_ret wfcq_ret;
365 bool has_work;
366
367 has_work = ___urcu_grab_work(worker, &queue->head, &queue->tail, 0);
368 /* Don't wait if we have work to do. */
369 if (has_work || !cds_wfcq_empty(&worker->head, &worker->tail))
370 goto do_work;
371 /* Try to steal work from sibling instead of blocking */
372 if (__urcu_steal_work(queue, worker))
373 goto do_work;
374 /* No more work to do, check shutdown state */
375 if (CMM_LOAD_SHARED(queue->shutdown))
376 return URCU_ACCEPT_SHUTDOWN;
377 urcu_wait_set_state(&worker->wait_node,
378 URCU_WAIT_WAITING);
379 if (!CMM_LOAD_SHARED(worker->wait_node.node.next)) {
380 int was_empty;
381
382 /*
383 * NULL next pointer. We are therefore not in
384 * the queue.
385 */
386 cds_lfs_node_init(&worker->wait_node.node);
387 /* Protect stack dequeue against ABA */
388 synchronize_rcu();
389 was_empty = !urcu_wait_add(&queue->waitqueue,
390 &worker->wait_node);
391 /*
392 * If the wait queue was empty, it means we are the
393 * first thread to be put back into an otherwise empty
394 * wait queue. Re-check if work queue is empty after
395 * adding ourself to wait queue, so we can wakeup the
396 * top of wait queue since new work have appeared, and
397 * work enqueuer may not have seen that it needed to do
398 * a wake up.
399 */
400 if (was_empty && !cds_wfcq_empty(&queue->head,
401 &queue->tail)) {
402 rcu_read_lock(); /* Protect stack dequeue */
403 (void) urcu_dequeue_wake_single(&queue->waitqueue);
404 rcu_read_unlock(); /* Protect stack dequeue */
405 }
406 } else {
407 /*
408 * Non-NULL next pointer. We are therefore in
409 * the queue, or the dispatcher just removed us
410 * from it (after we read the next pointer), and
411 * is therefore awakening us. The state will
412 * therefore have been changed from WAITING to
413 * some other state, which will let the busy
414 * wait pass through.
415 */
416 }
417 urcu_adaptative_busy_wait(&worker->wait_node);
418 return;
419
420 do_work:
421 /*
422 * We will be busy handling the work batch, awaken siblings so
423 * they can steal from us.
424 */
425 (void) __urcu_wakeup_siblings(queue, worker);
426 return URCU_ACCEPT_WORK;
427 }
428
429 static inline
430 struct urcu_work *urcu_dequeue_work(struct urcu_worker *worker)
431 {
432 struct cds_wfcq_node *node;
433
434 if (worker->own) {
435 struct urcu_work *work;
436
437 /* Process our own work entry. */
438 work = worker->own;
439 worker->own = NULL;
440 return work;
441 }
442 /*
443 * If we are registered for work stealing, we need to dequeue
444 * safely against siblings.
445 */
446 if (worker->flags & URCU_WORKER_STEAL) {
447 /*
448 * Don't bother grabbing the worker queue lock if it is
449 * empty.
450 */
451 if (cds_wfcq_empty(&worker->head, &worker->tail))
452 return NULL;
453 node = cds_wfcq_dequeue_blocking(&worker->head,
454 &worker->tail);
455 } else {
456 node = ___cds_wfcq_dequeue_with_state(&worker->head,
457 &worker->tail, NULL, 1, 0);
458 }
459 if (!node)
460 return NULL;
461 return caa_container_of(node, struct urcu_work, node);
462 }
463
464 static inline
465 void urcu_workqueue_shutdown(struct urcu_workqueue *queue)
466 {
467 /* Set shutdown */
468 CMM_STORE_SHARED(queue->shutdown, true);
469 /* Wakeup all workers */
470 __urcu_workqueue_wakeup_all(queue);
471 }
472
473 #endif /* _URCU_WORKQUEUE_FIFO_H */
This page took 0.038218 seconds and 4 git commands to generate.