Commit | Line | Data |
---|---|---|
ccacf27f MD |
1 | /* |
2 | * workqueue.c | |
3 | * | |
4 | * Userspace RCU library - Userspace workqeues | |
5 | * | |
6 | * Copyright (c) 2010 Paul E. McKenney <paulmck@linux.vnet.ibm.com> | |
7 | * Copyright (c) 2017 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> | |
8 | * | |
9 | * This library is free software; you can redistribute it and/or | |
10 | * modify it under the terms of the GNU Lesser General Public | |
11 | * License as published by the Free Software Foundation; either | |
12 | * version 2.1 of the License, or (at your option) any later version. | |
13 | * | |
14 | * This library is distributed in the hope that it will be useful, | |
15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
17 | * Lesser General Public License for more details. | |
18 | * | |
19 | * You should have received a copy of the GNU Lesser General Public | |
20 | * License along with this library; if not, write to the Free Software | |
21 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |
22 | */ | |
23 | ||
24 | #define _LGPL_SOURCE | |
25 | #include <stdio.h> | |
26 | #include <pthread.h> | |
27 | #include <signal.h> | |
28 | #include <assert.h> | |
29 | #include <stdlib.h> | |
30 | #include <stdint.h> | |
31 | #include <string.h> | |
32 | #include <errno.h> | |
33 | #include <poll.h> | |
34 | #include <sys/time.h> | |
35 | #include <unistd.h> | |
36 | #include <sched.h> | |
37 | ||
38 | #include "compat-getcpu.h" | |
39 | #include "urcu/wfcqueue.h" | |
40 | #include "urcu-call-rcu.h" | |
41 | #include "urcu-pointer.h" | |
42 | #include "urcu/list.h" | |
43 | #include "urcu/futex.h" | |
44 | #include "urcu/tls-compat.h" | |
45 | #include "urcu/ref.h" | |
46 | #include "urcu-die.h" | |
47 | ||
48 | #include "workqueue.h" | |
49 | ||
50 | #define SET_AFFINITY_CHECK_PERIOD (1U << 8) /* 256 */ | |
51 | #define SET_AFFINITY_CHECK_PERIOD_MASK (SET_AFFINITY_CHECK_PERIOD - 1) | |
52 | ||
53 | /* Data structure that identifies a workqueue. */ | |
54 | ||
55 | struct urcu_workqueue { | |
56 | /* | |
57 | * We do not align head on a different cache-line than tail | |
58 | * mainly because call_rcu callback-invocation threads use | |
59 | * batching ("splice") to get an entire list of callbacks, which | |
60 | * effectively empties the queue, and requires to touch the tail | |
61 | * anyway. | |
62 | */ | |
63 | struct cds_wfcq_tail cbs_tail; | |
64 | struct cds_wfcq_head cbs_head; | |
65 | unsigned long flags; | |
66 | int32_t futex; | |
67 | unsigned long qlen; /* maintained for debugging. */ | |
68 | pthread_t tid; | |
69 | int cpu_affinity; | |
70 | unsigned long loop_count; | |
71 | void *priv; | |
72 | void (*grace_period_fct)(struct urcu_workqueue *workqueue, void *priv); | |
73 | void (*initialize_worker_fct)(struct urcu_workqueue *workqueue, void *priv); | |
74 | void (*finalize_worker_fct)(struct urcu_workqueue *workqueue, void *priv); | |
75 | void (*worker_before_pause_fct)(struct urcu_workqueue *workqueue, void *priv); | |
76 | void (*worker_after_resume_fct)(struct urcu_workqueue *workqueue, void *priv); | |
77 | void (*worker_before_wait_fct)(struct urcu_workqueue *workqueue, void *priv); | |
78 | void (*worker_after_wake_up_fct)(struct urcu_workqueue *workqueue, void *priv); | |
79 | } __attribute__((aligned(CAA_CACHE_LINE_SIZE))); | |
80 | ||
81 | struct urcu_workqueue_completion { | |
82 | int barrier_count; | |
83 | int32_t futex; | |
84 | struct urcu_ref ref; | |
85 | }; | |
86 | ||
87 | struct urcu_workqueue_completion_work { | |
88 | struct urcu_work work; | |
89 | struct urcu_workqueue_completion *completion; | |
90 | }; | |
91 | ||
92 | /* | |
93 | * Periodically retry setting CPU affinity if we migrate. | |
94 | * Losing affinity can be caused by CPU hotunplug/hotplug, or by | |
95 | * cpuset(7). | |
96 | */ | |
97 | #if HAVE_SCHED_SETAFFINITY | |
98 | static int set_thread_cpu_affinity(struct urcu_workqueue *workqueue) | |
99 | { | |
100 | cpu_set_t mask; | |
101 | int ret; | |
102 | ||
103 | if (workqueue->cpu_affinity < 0) | |
104 | return 0; | |
105 | if (++workqueue->loop_count & SET_AFFINITY_CHECK_PERIOD_MASK) | |
106 | return 0; | |
107 | if (urcu_sched_getcpu() == workqueue->cpu_affinity) | |
108 | return 0; | |
109 | ||
110 | CPU_ZERO(&mask); | |
111 | CPU_SET(workqueue->cpu_affinity, &mask); | |
112 | #if SCHED_SETAFFINITY_ARGS == 2 | |
113 | ret = sched_setaffinity(0, &mask); | |
114 | #else | |
115 | ret = sched_setaffinity(0, sizeof(mask), &mask); | |
116 | #endif | |
117 | /* | |
118 | * EINVAL is fine: can be caused by hotunplugged CPUs, or by | |
119 | * cpuset(7). This is why we should always retry if we detect | |
120 | * migration. | |
121 | */ | |
122 | if (ret && errno == EINVAL) { | |
123 | ret = 0; | |
124 | errno = 0; | |
125 | } | |
126 | return ret; | |
127 | } | |
128 | #else | |
129 | static int set_thread_cpu_affinity(struct urcu_workqueue *workqueue) | |
130 | { | |
131 | return 0; | |
132 | } | |
133 | #endif | |
134 | ||
135 | static void futex_wait(int32_t *futex) | |
136 | { | |
137 | /* Read condition before read futex */ | |
138 | cmm_smp_mb(); | |
139 | if (uatomic_read(futex) != -1) | |
140 | return; | |
141 | while (futex_async(futex, FUTEX_WAIT, -1, NULL, NULL, 0)) { | |
142 | switch (errno) { | |
143 | case EWOULDBLOCK: | |
144 | /* Value already changed. */ | |
145 | return; | |
146 | case EINTR: | |
147 | /* Retry if interrupted by signal. */ | |
148 | break; /* Get out of switch. */ | |
149 | default: | |
150 | /* Unexpected error. */ | |
151 | urcu_die(errno); | |
152 | } | |
153 | } | |
154 | } | |
155 | ||
156 | static void futex_wake_up(int32_t *futex) | |
157 | { | |
158 | /* Write to condition before reading/writing futex */ | |
159 | cmm_smp_mb(); | |
160 | if (caa_unlikely(uatomic_read(futex) == -1)) { | |
161 | uatomic_set(futex, 0); | |
162 | if (futex_async(futex, FUTEX_WAKE, 1, | |
163 | NULL, NULL, 0) < 0) | |
164 | urcu_die(errno); | |
165 | } | |
166 | } | |
167 | ||
168 | /* This is the code run by each worker thread. */ | |
169 | ||
170 | static void *workqueue_thread(void *arg) | |
171 | { | |
172 | unsigned long cbcount; | |
173 | struct urcu_workqueue *workqueue = (struct urcu_workqueue *) arg; | |
174 | int rt = !!(uatomic_read(&workqueue->flags) & URCU_WORKQUEUE_RT); | |
175 | ||
176 | if (set_thread_cpu_affinity(workqueue)) | |
177 | urcu_die(errno); | |
178 | ||
179 | if (workqueue->initialize_worker_fct) | |
180 | workqueue->initialize_worker_fct(workqueue, workqueue->priv); | |
181 | ||
182 | if (!rt) { | |
183 | uatomic_dec(&workqueue->futex); | |
184 | /* Decrement futex before reading workqueue */ | |
185 | cmm_smp_mb(); | |
186 | } | |
187 | for (;;) { | |
188 | struct cds_wfcq_head cbs_tmp_head; | |
189 | struct cds_wfcq_tail cbs_tmp_tail; | |
190 | struct cds_wfcq_node *cbs, *cbs_tmp_n; | |
191 | enum cds_wfcq_ret splice_ret; | |
192 | ||
193 | if (set_thread_cpu_affinity(workqueue)) | |
194 | urcu_die(errno); | |
195 | ||
196 | if (uatomic_read(&workqueue->flags) & URCU_WORKQUEUE_PAUSE) { | |
197 | /* | |
198 | * Pause requested. Become quiescent: remove | |
199 | * ourself from all global lists, and don't | |
200 | * process any callback. The callback lists may | |
201 | * still be non-empty though. | |
202 | */ | |
203 | if (workqueue->worker_before_pause_fct) | |
204 | workqueue->worker_before_pause_fct(workqueue, workqueue->priv); | |
205 | cmm_smp_mb__before_uatomic_or(); | |
206 | uatomic_or(&workqueue->flags, URCU_WORKQUEUE_PAUSED); | |
207 | while ((uatomic_read(&workqueue->flags) & URCU_WORKQUEUE_PAUSE) != 0) | |
208 | (void) poll(NULL, 0, 1); | |
209 | uatomic_and(&workqueue->flags, ~URCU_WORKQUEUE_PAUSED); | |
210 | cmm_smp_mb__after_uatomic_and(); | |
211 | if (workqueue->worker_after_resume_fct) | |
212 | workqueue->worker_after_resume_fct(workqueue, workqueue->priv); | |
213 | } | |
214 | ||
215 | cds_wfcq_init(&cbs_tmp_head, &cbs_tmp_tail); | |
216 | splice_ret = __cds_wfcq_splice_blocking(&cbs_tmp_head, | |
217 | &cbs_tmp_tail, &workqueue->cbs_head, &workqueue->cbs_tail); | |
218 | assert(splice_ret != CDS_WFCQ_RET_WOULDBLOCK); | |
219 | assert(splice_ret != CDS_WFCQ_RET_DEST_NON_EMPTY); | |
220 | if (splice_ret != CDS_WFCQ_RET_SRC_EMPTY) { | |
221 | if (workqueue->grace_period_fct) | |
222 | workqueue->grace_period_fct(workqueue, workqueue->priv); | |
223 | cbcount = 0; | |
224 | __cds_wfcq_for_each_blocking_safe(&cbs_tmp_head, | |
225 | &cbs_tmp_tail, cbs, cbs_tmp_n) { | |
226 | struct rcu_head *rhp; | |
227 | ||
228 | rhp = caa_container_of(cbs, | |
229 | struct rcu_head, next); | |
230 | rhp->func(rhp); | |
231 | cbcount++; | |
232 | } | |
233 | uatomic_sub(&workqueue->qlen, cbcount); | |
234 | } | |
235 | if (uatomic_read(&workqueue->flags) & URCU_WORKQUEUE_STOP) | |
236 | break; | |
237 | if (workqueue->worker_before_wait_fct) | |
238 | workqueue->worker_before_wait_fct(workqueue, workqueue->priv); | |
239 | if (!rt) { | |
240 | if (cds_wfcq_empty(&workqueue->cbs_head, | |
241 | &workqueue->cbs_tail)) { | |
242 | futex_wait(&workqueue->futex); | |
243 | (void) poll(NULL, 0, 10); | |
244 | uatomic_dec(&workqueue->futex); | |
245 | /* | |
246 | * Decrement futex before reading | |
247 | * call_rcu list. | |
248 | */ | |
249 | cmm_smp_mb(); | |
250 | } else { | |
251 | (void) poll(NULL, 0, 10); | |
252 | } | |
253 | } else { | |
254 | (void) poll(NULL, 0, 10); | |
255 | } | |
256 | if (workqueue->worker_after_wake_up_fct) | |
257 | workqueue->worker_after_wake_up_fct(workqueue, workqueue->priv); | |
258 | } | |
259 | if (!rt) { | |
260 | /* | |
261 | * Read call_rcu list before write futex. | |
262 | */ | |
263 | cmm_smp_mb(); | |
264 | uatomic_set(&workqueue->futex, 0); | |
265 | } | |
266 | if (workqueue->finalize_worker_fct) | |
267 | workqueue->finalize_worker_fct(workqueue, workqueue->priv); | |
268 | return NULL; | |
269 | } | |
270 | ||
271 | struct urcu_workqueue *urcu_workqueue_create(unsigned long flags, | |
272 | int cpu_affinity, void *priv, | |
273 | void (*grace_period_fct)(struct urcu_workqueue *workqueue, void *priv), | |
274 | void (*initialize_worker_fct)(struct urcu_workqueue *workqueue, void *priv), | |
275 | void (*finalize_worker_fct)(struct urcu_workqueue *workqueue, void *priv), | |
276 | void (*worker_before_wait_fct)(struct urcu_workqueue *workqueue, void *priv), | |
277 | void (*worker_after_wake_up_fct)(struct urcu_workqueue *workqueue, void *priv), | |
278 | void (*worker_before_pause_fct)(struct urcu_workqueue *workqueue, void *priv), | |
279 | void (*worker_after_resume_fct)(struct urcu_workqueue *workqueue, void *priv)) | |
280 | { | |
281 | struct urcu_workqueue *workqueue; | |
282 | int ret; | |
283 | ||
284 | workqueue = malloc(sizeof(*workqueue)); | |
285 | if (workqueue == NULL) | |
286 | urcu_die(errno); | |
287 | memset(workqueue, '\0', sizeof(*workqueue)); | |
288 | cds_wfcq_init(&workqueue->cbs_head, &workqueue->cbs_tail); | |
289 | workqueue->qlen = 0; | |
290 | workqueue->futex = 0; | |
291 | workqueue->flags = flags; | |
292 | workqueue->priv = priv; | |
293 | workqueue->grace_period_fct = grace_period_fct; | |
294 | workqueue->initialize_worker_fct = initialize_worker_fct; | |
295 | workqueue->finalize_worker_fct = finalize_worker_fct; | |
296 | workqueue->worker_before_wait_fct = worker_before_wait_fct; | |
297 | workqueue->worker_after_wake_up_fct = worker_after_wake_up_fct; | |
298 | workqueue->worker_before_pause_fct = worker_before_pause_fct; | |
299 | workqueue->worker_after_resume_fct = worker_after_resume_fct; | |
300 | workqueue->cpu_affinity = cpu_affinity; | |
301 | workqueue->loop_count = 0; | |
302 | cmm_smp_mb(); /* Structure initialized before pointer is planted. */ | |
303 | ret = pthread_create(&workqueue->tid, NULL, workqueue_thread, workqueue); | |
304 | if (ret) { | |
305 | urcu_die(ret); | |
306 | } | |
307 | return workqueue; | |
308 | } | |
309 | ||
310 | static void wake_worker_thread(struct urcu_workqueue *workqueue) | |
311 | { | |
312 | if (!(_CMM_LOAD_SHARED(workqueue->flags) & URCU_CALL_RCU_RT)) | |
313 | futex_wake_up(&workqueue->futex); | |
314 | } | |
315 | ||
316 | static int urcu_workqueue_destroy_worker(struct urcu_workqueue *workqueue) | |
317 | { | |
318 | int ret; | |
319 | void *retval; | |
320 | ||
321 | uatomic_or(&workqueue->flags, URCU_WORKQUEUE_STOP); | |
322 | wake_worker_thread(workqueue); | |
323 | ||
324 | ret = pthread_join(workqueue->tid, &retval); | |
325 | if (ret) { | |
326 | urcu_die(ret); | |
327 | } | |
328 | if (retval != NULL) { | |
329 | urcu_die(EINVAL); | |
330 | } | |
331 | workqueue->flags &= ~URCU_WORKQUEUE_STOP; | |
332 | workqueue->tid = 0; | |
333 | return 0; | |
334 | } | |
335 | ||
336 | void urcu_workqueue_destroy(struct urcu_workqueue *workqueue) | |
337 | { | |
338 | if (workqueue == NULL) { | |
339 | return; | |
340 | } | |
341 | if (urcu_workqueue_destroy_worker(workqueue)) { | |
342 | urcu_die(errno); | |
343 | } | |
344 | assert(cds_wfcq_empty(&workqueue->cbs_head, &workqueue->cbs_tail)); | |
345 | free(workqueue); | |
346 | } | |
347 | ||
348 | void urcu_workqueue_queue_work(struct urcu_workqueue *workqueue, | |
349 | struct urcu_work *work, | |
350 | void (*func)(struct urcu_work *work)) | |
351 | { | |
352 | cds_wfcq_node_init(&work->next); | |
353 | work->func = func; | |
354 | cds_wfcq_enqueue(&workqueue->cbs_head, &workqueue->cbs_tail, &work->next); | |
355 | uatomic_inc(&workqueue->qlen); | |
356 | wake_worker_thread(workqueue); | |
357 | } | |
358 | ||
359 | static | |
360 | void free_completion(struct urcu_ref *ref) | |
361 | { | |
362 | struct urcu_workqueue_completion *completion; | |
363 | ||
364 | completion = caa_container_of(ref, struct urcu_workqueue_completion, ref); | |
365 | free(completion); | |
366 | } | |
367 | ||
368 | static | |
369 | void _urcu_workqueue_wait_complete(struct urcu_work *work) | |
370 | { | |
371 | struct urcu_workqueue_completion_work *completion_work; | |
372 | struct urcu_workqueue_completion *completion; | |
373 | ||
374 | completion_work = caa_container_of(work, struct urcu_workqueue_completion_work, work); | |
375 | completion = completion_work->completion; | |
376 | if (!uatomic_sub_return(&completion->barrier_count, 1)) | |
377 | futex_wake_up(&completion->futex); | |
378 | urcu_ref_put(&completion->ref, free_completion); | |
379 | free(completion_work); | |
380 | } | |
381 | ||
382 | struct urcu_workqueue_completion *urcu_workqueue_create_completion(void) | |
383 | { | |
384 | struct urcu_workqueue_completion *completion; | |
385 | ||
386 | completion = calloc(sizeof(*completion), 1); | |
387 | if (!completion) | |
388 | urcu_die(errno); | |
389 | urcu_ref_set(&completion->ref, 1); | |
390 | completion->barrier_count = 0; | |
391 | return completion; | |
392 | } | |
393 | ||
394 | void urcu_workqueue_destroy_completion(struct urcu_workqueue_completion *completion) | |
395 | { | |
396 | urcu_ref_put(&completion->ref, free_completion); | |
397 | } | |
398 | ||
399 | void urcu_workqueue_wait_completion(struct urcu_workqueue_completion *completion) | |
400 | { | |
401 | /* Wait for them */ | |
402 | for (;;) { | |
403 | uatomic_dec(&completion->futex); | |
404 | /* Decrement futex before reading barrier_count */ | |
405 | cmm_smp_mb(); | |
406 | if (!uatomic_read(&completion->barrier_count)) | |
407 | break; | |
408 | futex_wait(&completion->futex); | |
409 | } | |
410 | } | |
411 | ||
412 | void urcu_workqueue_queue_completion(struct urcu_workqueue *workqueue, | |
413 | struct urcu_workqueue_completion *completion) | |
414 | { | |
415 | struct urcu_workqueue_completion_work *work; | |
416 | ||
417 | work = calloc(sizeof(*work), 1); | |
418 | if (!work) | |
419 | urcu_die(errno); | |
420 | work->completion = completion; | |
421 | urcu_ref_get(&completion->ref); | |
422 | uatomic_inc(&completion->barrier_count); | |
423 | urcu_workqueue_queue_work(workqueue, &work->work, _urcu_workqueue_wait_complete); | |
424 | } | |
425 | ||
426 | /* | |
427 | * Wait for all in-flight work to complete execution. | |
428 | */ | |
429 | void urcu_workqueue_flush_queued_work(struct urcu_workqueue *workqueue) | |
430 | { | |
431 | struct urcu_workqueue_completion *completion; | |
432 | ||
433 | completion = urcu_workqueue_create_completion(); | |
434 | if (!completion) | |
435 | urcu_die(ENOMEM); | |
436 | urcu_workqueue_queue_completion(workqueue, completion); | |
437 | urcu_workqueue_wait_completion(completion); | |
438 | urcu_workqueue_destroy_completion(completion); | |
439 | } | |
440 | ||
441 | /* To be used in before fork handler. */ | |
442 | void urcu_workqueue_pause_worker(struct urcu_workqueue *workqueue) | |
443 | { | |
444 | uatomic_or(&workqueue->flags, URCU_WORKQUEUE_PAUSE); | |
445 | cmm_smp_mb__after_uatomic_or(); | |
446 | wake_worker_thread(workqueue); | |
447 | ||
448 | while ((uatomic_read(&workqueue->flags) & URCU_WORKQUEUE_PAUSED) == 0) | |
449 | (void) poll(NULL, 0, 1); | |
450 | } | |
451 | ||
452 | /* To be used in after fork parent handler. */ | |
453 | void urcu_workqueue_resume_worker(struct urcu_workqueue *workqueue) | |
454 | { | |
455 | uatomic_and(&workqueue->flags, ~URCU_WORKQUEUE_PAUSE); | |
456 | while ((uatomic_read(&workqueue->flags) & URCU_WORKQUEUE_PAUSED) != 0) | |
457 | (void) poll(NULL, 0, 1); | |
458 | } | |
459 | ||
460 | void urcu_workqueue_create_worker(struct urcu_workqueue *workqueue) | |
461 | { | |
462 | int ret; | |
463 | ||
464 | /* Clear workqueue state from parent. */ | |
465 | workqueue->flags &= ~URCU_WORKQUEUE_PAUSED; | |
466 | workqueue->flags &= ~URCU_WORKQUEUE_PAUSE; | |
467 | workqueue->tid = 0; | |
468 | ret = pthread_create(&workqueue->tid, NULL, workqueue_thread, workqueue); | |
469 | if (ret) { | |
470 | urcu_die(ret); | |
471 | } | |
472 | } |