Fix call_rcu fork handling
[urcu.git] / urcu-call-rcu-impl.h
1 /*
2 * urcu-call-rcu.c
3 *
4 * Userspace RCU library - batch memory reclamation with kernel API
5 *
6 * Copyright (c) 2010 Paul E. McKenney <paulmck@linux.vnet.ibm.com>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23 #define _GNU_SOURCE
24 #define _LGPL_SOURCE
25 #include <stdio.h>
26 #include <pthread.h>
27 #include <signal.h>
28 #include <assert.h>
29 #include <stdlib.h>
30 #include <stdint.h>
31 #include <string.h>
32 #include <errno.h>
33 #include <poll.h>
34 #include <sys/time.h>
35 #include <unistd.h>
36 #include <sched.h>
37
38 #include "config.h"
39 #include "urcu/wfcqueue.h"
40 #include "urcu-call-rcu.h"
41 #include "urcu-pointer.h"
42 #include "urcu/list.h"
43 #include "urcu/futex.h"
44 #include "urcu/tls-compat.h"
45 #include "urcu-die.h"
46
47 /* Data structure that identifies a call_rcu thread. */
48
49 struct call_rcu_data {
50 /*
51 * We do not align head on a different cache-line than tail
52 * mainly because call_rcu callback-invocation threads use
53 * batching ("splice") to get an entire list of callbacks, which
54 * effectively empties the queue, and requires to touch the tail
55 * anyway.
56 */
57 struct cds_wfcq_tail cbs_tail;
58 struct cds_wfcq_head cbs_head;
59 unsigned long flags;
60 int32_t futex;
61 unsigned long qlen; /* maintained for debugging. */
62 pthread_t tid;
63 int cpu_affinity;
64 struct cds_list_head list;
65 } __attribute__((aligned(CAA_CACHE_LINE_SIZE)));
66
67 /*
68 * List of all call_rcu_data structures to keep valgrind happy.
69 * Protected by call_rcu_mutex.
70 */
71
72 static CDS_LIST_HEAD(call_rcu_data_list);
73
74 /* Link a thread using call_rcu() to its call_rcu thread. */
75
76 static DEFINE_URCU_TLS(struct call_rcu_data *, thread_call_rcu_data);
77
78 /*
79 * Guard call_rcu thread creation and atfork handlers.
80 */
81 static pthread_mutex_t call_rcu_mutex = PTHREAD_MUTEX_INITIALIZER;
82
83 /* If a given thread does not have its own call_rcu thread, this is default. */
84
85 static struct call_rcu_data *default_call_rcu_data;
86
87 /*
88 * If the sched_getcpu() and sysconf(_SC_NPROCESSORS_CONF) calls are
89 * available, then we can have call_rcu threads assigned to individual
90 * CPUs rather than only to specific threads.
91 */
92
93 #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF)
94
95 /*
96 * Pointer to array of pointers to per-CPU call_rcu_data structures
97 * and # CPUs. per_cpu_call_rcu_data is a RCU-protected pointer to an
98 * array of RCU-protected pointers to call_rcu_data. call_rcu acts as a
99 * RCU read-side and reads per_cpu_call_rcu_data and the per-cpu pointer
100 * without mutex. The call_rcu_mutex protects updates.
101 */
102
103 static struct call_rcu_data **per_cpu_call_rcu_data;
104 static long maxcpus;
105
106 static void maxcpus_reset(void)
107 {
108 maxcpus = 0;
109 }
110
111 /* Allocate the array if it has not already been allocated. */
112
113 static void alloc_cpu_call_rcu_data(void)
114 {
115 struct call_rcu_data **p;
116 static int warned = 0;
117
118 if (maxcpus != 0)
119 return;
120 maxcpus = sysconf(_SC_NPROCESSORS_CONF);
121 if (maxcpus <= 0) {
122 return;
123 }
124 p = malloc(maxcpus * sizeof(*per_cpu_call_rcu_data));
125 if (p != NULL) {
126 memset(p, '\0', maxcpus * sizeof(*per_cpu_call_rcu_data));
127 rcu_set_pointer(&per_cpu_call_rcu_data, p);
128 } else {
129 if (!warned) {
130 fprintf(stderr, "[error] liburcu: unable to allocate per-CPU pointer array\n");
131 }
132 warned = 1;
133 }
134 }
135
136 #else /* #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF) */
137
138 /*
139 * per_cpu_call_rcu_data should be constant, but some functions below, used both
140 * for cases where cpu number is available and not available, assume it it not
141 * constant.
142 */
143 static struct call_rcu_data **per_cpu_call_rcu_data = NULL;
144 static const long maxcpus = -1;
145
146 static void maxcpus_reset(void)
147 {
148 }
149
150 static void alloc_cpu_call_rcu_data(void)
151 {
152 }
153
154 static int sched_getcpu(void)
155 {
156 return -1;
157 }
158
159 #endif /* #else #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF) */
160
161 /* Acquire the specified pthread mutex. */
162
163 static void call_rcu_lock(pthread_mutex_t *pmp)
164 {
165 int ret;
166
167 ret = pthread_mutex_lock(pmp);
168 if (ret)
169 urcu_die(ret);
170 }
171
172 /* Release the specified pthread mutex. */
173
174 static void call_rcu_unlock(pthread_mutex_t *pmp)
175 {
176 int ret;
177
178 ret = pthread_mutex_unlock(pmp);
179 if (ret)
180 urcu_die(ret);
181 }
182
183 #if HAVE_SCHED_SETAFFINITY
184 static
185 int set_thread_cpu_affinity(struct call_rcu_data *crdp)
186 {
187 cpu_set_t mask;
188
189 if (crdp->cpu_affinity < 0)
190 return 0;
191
192 CPU_ZERO(&mask);
193 CPU_SET(crdp->cpu_affinity, &mask);
194 #if SCHED_SETAFFINITY_ARGS == 2
195 return sched_setaffinity(0, &mask);
196 #else
197 return sched_setaffinity(0, sizeof(mask), &mask);
198 #endif
199 }
200 #else
201 static
202 int set_thread_cpu_affinity(struct call_rcu_data *crdp)
203 {
204 return 0;
205 }
206 #endif
207
208 static void call_rcu_wait(struct call_rcu_data *crdp)
209 {
210 /* Read call_rcu list before read futex */
211 cmm_smp_mb();
212 if (uatomic_read(&crdp->futex) == -1)
213 futex_async(&crdp->futex, FUTEX_WAIT, -1,
214 NULL, NULL, 0);
215 }
216
217 static void call_rcu_wake_up(struct call_rcu_data *crdp)
218 {
219 /* Write to call_rcu list before reading/writing futex */
220 cmm_smp_mb();
221 if (caa_unlikely(uatomic_read(&crdp->futex) == -1)) {
222 uatomic_set(&crdp->futex, 0);
223 futex_async(&crdp->futex, FUTEX_WAKE, 1,
224 NULL, NULL, 0);
225 }
226 }
227
228 /* This is the code run by each call_rcu thread. */
229
230 static void *call_rcu_thread(void *arg)
231 {
232 unsigned long cbcount;
233 struct call_rcu_data *crdp = (struct call_rcu_data *) arg;
234 int rt = !!(uatomic_read(&crdp->flags) & URCU_CALL_RCU_RT);
235 int ret;
236
237 ret = set_thread_cpu_affinity(crdp);
238 if (ret)
239 urcu_die(errno);
240
241 /*
242 * If callbacks take a read-side lock, we need to be registered.
243 */
244 rcu_register_thread();
245
246 URCU_TLS(thread_call_rcu_data) = crdp;
247 if (!rt) {
248 uatomic_dec(&crdp->futex);
249 /* Decrement futex before reading call_rcu list */
250 cmm_smp_mb();
251 }
252 for (;;) {
253 struct cds_wfcq_head cbs_tmp_head;
254 struct cds_wfcq_tail cbs_tmp_tail;
255 struct cds_wfcq_node *cbs, *cbs_tmp_n;
256 enum cds_wfcq_ret splice_ret;
257
258 if (uatomic_read(&crdp->flags) & URCU_CALL_RCU_PAUSE) {
259 /*
260 * Pause requested. Become quiescent: remove
261 * ourself from all global lists, and don't
262 * process any callback. The callback lists may
263 * still be non-empty though.
264 */
265 rcu_unregister_thread();
266 cmm_smp_mb__before_uatomic_or();
267 uatomic_or(&crdp->flags, URCU_CALL_RCU_PAUSED);
268 while ((uatomic_read(&crdp->flags) & URCU_CALL_RCU_PAUSE) != 0)
269 poll(NULL, 0, 1);
270 rcu_register_thread();
271 }
272
273 cds_wfcq_init(&cbs_tmp_head, &cbs_tmp_tail);
274 splice_ret = __cds_wfcq_splice_blocking(&cbs_tmp_head,
275 &cbs_tmp_tail, &crdp->cbs_head, &crdp->cbs_tail);
276 assert(splice_ret != CDS_WFCQ_RET_WOULDBLOCK);
277 assert(splice_ret != CDS_WFCQ_RET_DEST_NON_EMPTY);
278 if (splice_ret != CDS_WFCQ_RET_SRC_EMPTY) {
279 synchronize_rcu();
280 cbcount = 0;
281 __cds_wfcq_for_each_blocking_safe(&cbs_tmp_head,
282 &cbs_tmp_tail, cbs, cbs_tmp_n) {
283 struct rcu_head *rhp;
284
285 rhp = caa_container_of(cbs,
286 struct rcu_head, next);
287 rhp->func(rhp);
288 cbcount++;
289 }
290 uatomic_sub(&crdp->qlen, cbcount);
291 }
292 if (uatomic_read(&crdp->flags) & URCU_CALL_RCU_STOP)
293 break;
294 rcu_thread_offline();
295 if (!rt) {
296 if (cds_wfcq_empty(&crdp->cbs_head,
297 &crdp->cbs_tail)) {
298 call_rcu_wait(crdp);
299 poll(NULL, 0, 10);
300 uatomic_dec(&crdp->futex);
301 /*
302 * Decrement futex before reading
303 * call_rcu list.
304 */
305 cmm_smp_mb();
306 } else {
307 poll(NULL, 0, 10);
308 }
309 } else {
310 poll(NULL, 0, 10);
311 }
312 rcu_thread_online();
313 }
314 if (!rt) {
315 /*
316 * Read call_rcu list before write futex.
317 */
318 cmm_smp_mb();
319 uatomic_set(&crdp->futex, 0);
320 }
321 uatomic_or(&crdp->flags, URCU_CALL_RCU_STOPPED);
322 rcu_unregister_thread();
323 return NULL;
324 }
325
326 /*
327 * Create both a call_rcu thread and the corresponding call_rcu_data
328 * structure, linking the structure in as specified. Caller must hold
329 * call_rcu_mutex.
330 */
331
332 static void call_rcu_data_init(struct call_rcu_data **crdpp,
333 unsigned long flags,
334 int cpu_affinity)
335 {
336 struct call_rcu_data *crdp;
337 int ret;
338
339 crdp = malloc(sizeof(*crdp));
340 if (crdp == NULL)
341 urcu_die(errno);
342 memset(crdp, '\0', sizeof(*crdp));
343 cds_wfcq_init(&crdp->cbs_head, &crdp->cbs_tail);
344 crdp->qlen = 0;
345 crdp->futex = 0;
346 crdp->flags = flags;
347 cds_list_add(&crdp->list, &call_rcu_data_list);
348 crdp->cpu_affinity = cpu_affinity;
349 cmm_smp_mb(); /* Structure initialized before pointer is planted. */
350 *crdpp = crdp;
351 ret = pthread_create(&crdp->tid, NULL, call_rcu_thread, crdp);
352 if (ret)
353 urcu_die(ret);
354 }
355
356 /*
357 * Return a pointer to the call_rcu_data structure for the specified
358 * CPU, returning NULL if there is none. We cannot automatically
359 * created it because the platform we are running on might not define
360 * sched_getcpu().
361 *
362 * The call to this function and use of the returned call_rcu_data
363 * should be protected by RCU read-side lock.
364 */
365
366 struct call_rcu_data *get_cpu_call_rcu_data(int cpu)
367 {
368 static int warned = 0;
369 struct call_rcu_data **pcpu_crdp;
370
371 pcpu_crdp = rcu_dereference(per_cpu_call_rcu_data);
372 if (pcpu_crdp == NULL)
373 return NULL;
374 if (!warned && maxcpus > 0 && (cpu < 0 || maxcpus <= cpu)) {
375 fprintf(stderr, "[error] liburcu: get CPU # out of range\n");
376 warned = 1;
377 }
378 if (cpu < 0 || maxcpus <= cpu)
379 return NULL;
380 return rcu_dereference(pcpu_crdp[cpu]);
381 }
382
383 /*
384 * Return the tid corresponding to the call_rcu thread whose
385 * call_rcu_data structure is specified.
386 */
387
388 pthread_t get_call_rcu_thread(struct call_rcu_data *crdp)
389 {
390 return crdp->tid;
391 }
392
393 /*
394 * Create a call_rcu_data structure (with thread) and return a pointer.
395 */
396
397 static struct call_rcu_data *__create_call_rcu_data(unsigned long flags,
398 int cpu_affinity)
399 {
400 struct call_rcu_data *crdp;
401
402 call_rcu_data_init(&crdp, flags, cpu_affinity);
403 return crdp;
404 }
405
406 struct call_rcu_data *create_call_rcu_data(unsigned long flags,
407 int cpu_affinity)
408 {
409 struct call_rcu_data *crdp;
410
411 call_rcu_lock(&call_rcu_mutex);
412 crdp = __create_call_rcu_data(flags, cpu_affinity);
413 call_rcu_unlock(&call_rcu_mutex);
414 return crdp;
415 }
416
417 /*
418 * Set the specified CPU to use the specified call_rcu_data structure.
419 *
420 * Use NULL to remove a CPU's call_rcu_data structure, but it is
421 * the caller's responsibility to dispose of the removed structure.
422 * Use get_cpu_call_rcu_data() to obtain a pointer to the old structure
423 * (prior to NULLing it out, of course).
424 *
425 * The caller must wait for a grace-period to pass between return from
426 * set_cpu_call_rcu_data() and call to call_rcu_data_free() passing the
427 * previous call rcu data as argument.
428 */
429
430 int set_cpu_call_rcu_data(int cpu, struct call_rcu_data *crdp)
431 {
432 static int warned = 0;
433
434 call_rcu_lock(&call_rcu_mutex);
435 alloc_cpu_call_rcu_data();
436 if (cpu < 0 || maxcpus <= cpu) {
437 if (!warned) {
438 fprintf(stderr, "[error] liburcu: set CPU # out of range\n");
439 warned = 1;
440 }
441 call_rcu_unlock(&call_rcu_mutex);
442 errno = EINVAL;
443 return -EINVAL;
444 }
445
446 if (per_cpu_call_rcu_data == NULL) {
447 call_rcu_unlock(&call_rcu_mutex);
448 errno = ENOMEM;
449 return -ENOMEM;
450 }
451
452 if (per_cpu_call_rcu_data[cpu] != NULL && crdp != NULL) {
453 call_rcu_unlock(&call_rcu_mutex);
454 errno = EEXIST;
455 return -EEXIST;
456 }
457
458 rcu_set_pointer(&per_cpu_call_rcu_data[cpu], crdp);
459 call_rcu_unlock(&call_rcu_mutex);
460 return 0;
461 }
462
463 /*
464 * Return a pointer to the default call_rcu_data structure, creating
465 * one if need be. Because we never free call_rcu_data structures,
466 * we don't need to be in an RCU read-side critical section.
467 */
468
469 struct call_rcu_data *get_default_call_rcu_data(void)
470 {
471 if (default_call_rcu_data != NULL)
472 return rcu_dereference(default_call_rcu_data);
473 call_rcu_lock(&call_rcu_mutex);
474 if (default_call_rcu_data != NULL) {
475 call_rcu_unlock(&call_rcu_mutex);
476 return default_call_rcu_data;
477 }
478 call_rcu_data_init(&default_call_rcu_data, 0, -1);
479 call_rcu_unlock(&call_rcu_mutex);
480 return default_call_rcu_data;
481 }
482
483 /*
484 * Return the call_rcu_data structure that applies to the currently
485 * running thread. Any call_rcu_data structure assigned specifically
486 * to this thread has first priority, followed by any call_rcu_data
487 * structure assigned to the CPU on which the thread is running,
488 * followed by the default call_rcu_data structure. If there is not
489 * yet a default call_rcu_data structure, one will be created.
490 *
491 * Calls to this function and use of the returned call_rcu_data should
492 * be protected by RCU read-side lock.
493 */
494 struct call_rcu_data *get_call_rcu_data(void)
495 {
496 struct call_rcu_data *crd;
497
498 if (URCU_TLS(thread_call_rcu_data) != NULL)
499 return URCU_TLS(thread_call_rcu_data);
500
501 if (maxcpus > 0) {
502 crd = get_cpu_call_rcu_data(sched_getcpu());
503 if (crd)
504 return crd;
505 }
506
507 return get_default_call_rcu_data();
508 }
509
510 /*
511 * Return a pointer to this task's call_rcu_data if there is one.
512 */
513
514 struct call_rcu_data *get_thread_call_rcu_data(void)
515 {
516 return URCU_TLS(thread_call_rcu_data);
517 }
518
519 /*
520 * Set this task's call_rcu_data structure as specified, regardless
521 * of whether or not this task already had one. (This allows switching
522 * to and from real-time call_rcu threads, for example.)
523 *
524 * Use NULL to remove a thread's call_rcu_data structure, but it is
525 * the caller's responsibility to dispose of the removed structure.
526 * Use get_thread_call_rcu_data() to obtain a pointer to the old structure
527 * (prior to NULLing it out, of course).
528 */
529
530 void set_thread_call_rcu_data(struct call_rcu_data *crdp)
531 {
532 URCU_TLS(thread_call_rcu_data) = crdp;
533 }
534
535 /*
536 * Create a separate call_rcu thread for each CPU. This does not
537 * replace a pre-existing call_rcu thread -- use the set_cpu_call_rcu_data()
538 * function if you want that behavior. Should be paired with
539 * free_all_cpu_call_rcu_data() to teardown these call_rcu worker
540 * threads.
541 */
542
543 int create_all_cpu_call_rcu_data(unsigned long flags)
544 {
545 int i;
546 struct call_rcu_data *crdp;
547 int ret;
548
549 call_rcu_lock(&call_rcu_mutex);
550 alloc_cpu_call_rcu_data();
551 call_rcu_unlock(&call_rcu_mutex);
552 if (maxcpus <= 0) {
553 errno = EINVAL;
554 return -EINVAL;
555 }
556 if (per_cpu_call_rcu_data == NULL) {
557 errno = ENOMEM;
558 return -ENOMEM;
559 }
560 for (i = 0; i < maxcpus; i++) {
561 call_rcu_lock(&call_rcu_mutex);
562 if (get_cpu_call_rcu_data(i)) {
563 call_rcu_unlock(&call_rcu_mutex);
564 continue;
565 }
566 crdp = __create_call_rcu_data(flags, i);
567 if (crdp == NULL) {
568 call_rcu_unlock(&call_rcu_mutex);
569 errno = ENOMEM;
570 return -ENOMEM;
571 }
572 call_rcu_unlock(&call_rcu_mutex);
573 if ((ret = set_cpu_call_rcu_data(i, crdp)) != 0) {
574 call_rcu_data_free(crdp);
575
576 /* it has been created by other thread */
577 if (ret == -EEXIST)
578 continue;
579
580 return ret;
581 }
582 }
583 return 0;
584 }
585
586 /*
587 * Wake up the call_rcu thread corresponding to the specified
588 * call_rcu_data structure.
589 */
590 static void wake_call_rcu_thread(struct call_rcu_data *crdp)
591 {
592 if (!(_CMM_LOAD_SHARED(crdp->flags) & URCU_CALL_RCU_RT))
593 call_rcu_wake_up(crdp);
594 }
595
596 /*
597 * Schedule a function to be invoked after a following grace period.
598 * This is the only function that must be called -- the others are
599 * only present to allow applications to tune their use of RCU for
600 * maximum performance.
601 *
602 * Note that unless a call_rcu thread has not already been created,
603 * the first invocation of call_rcu() will create one. So, if you
604 * need the first invocation of call_rcu() to be fast, make sure
605 * to create a call_rcu thread first. One way to accomplish this is
606 * "get_call_rcu_data();", and another is create_all_cpu_call_rcu_data().
607 *
608 * call_rcu must be called by registered RCU read-side threads.
609 */
610
611 void call_rcu(struct rcu_head *head,
612 void (*func)(struct rcu_head *head))
613 {
614 struct call_rcu_data *crdp;
615
616 cds_wfcq_node_init(&head->next);
617 head->func = func;
618 /* Holding rcu read-side lock across use of per-cpu crdp */
619 rcu_read_lock();
620 crdp = get_call_rcu_data();
621 cds_wfcq_enqueue(&crdp->cbs_head, &crdp->cbs_tail, &head->next);
622 uatomic_inc(&crdp->qlen);
623 wake_call_rcu_thread(crdp);
624 rcu_read_unlock();
625 }
626
627 /*
628 * Free up the specified call_rcu_data structure, terminating the
629 * associated call_rcu thread. The caller must have previously
630 * removed the call_rcu_data structure from per-thread or per-CPU
631 * usage. For example, set_cpu_call_rcu_data(cpu, NULL) for per-CPU
632 * call_rcu_data structures or set_thread_call_rcu_data(NULL) for
633 * per-thread call_rcu_data structures.
634 *
635 * We silently refuse to free up the default call_rcu_data structure
636 * because that is where we put any leftover callbacks. Note that
637 * the possibility of self-spawning callbacks makes it impossible
638 * to execute all the callbacks in finite time without putting any
639 * newly spawned callbacks somewhere else. The "somewhere else" of
640 * last resort is the default call_rcu_data structure.
641 *
642 * We also silently refuse to free NULL pointers. This simplifies
643 * the calling code.
644 *
645 * The caller must wait for a grace-period to pass between return from
646 * set_cpu_call_rcu_data() and call to call_rcu_data_free() passing the
647 * previous call rcu data as argument.
648 *
649 * Note: introducing __cds_wfcq_splice_blocking() in this function fixed
650 * a list corruption bug in the 0.7.x series. The equivalent fix
651 * appeared in 0.6.8 for the stable-0.6 branch.
652 */
653 void call_rcu_data_free(struct call_rcu_data *crdp)
654 {
655 if (crdp == NULL || crdp == default_call_rcu_data) {
656 return;
657 }
658 if ((uatomic_read(&crdp->flags) & URCU_CALL_RCU_STOPPED) == 0) {
659 uatomic_or(&crdp->flags, URCU_CALL_RCU_STOP);
660 wake_call_rcu_thread(crdp);
661 while ((uatomic_read(&crdp->flags) & URCU_CALL_RCU_STOPPED) == 0)
662 poll(NULL, 0, 1);
663 }
664 if (!cds_wfcq_empty(&crdp->cbs_head, &crdp->cbs_tail)) {
665 /* Create default call rcu data if need be */
666 (void) get_default_call_rcu_data();
667 __cds_wfcq_splice_blocking(&default_call_rcu_data->cbs_head,
668 &default_call_rcu_data->cbs_tail,
669 &crdp->cbs_head, &crdp->cbs_tail);
670 uatomic_add(&default_call_rcu_data->qlen,
671 uatomic_read(&crdp->qlen));
672 wake_call_rcu_thread(default_call_rcu_data);
673 }
674
675 call_rcu_lock(&call_rcu_mutex);
676 cds_list_del(&crdp->list);
677 call_rcu_unlock(&call_rcu_mutex);
678
679 free(crdp);
680 }
681
682 /*
683 * Clean up all the per-CPU call_rcu threads.
684 */
685 void free_all_cpu_call_rcu_data(void)
686 {
687 int cpu;
688 struct call_rcu_data **crdp;
689 static int warned = 0;
690
691 if (maxcpus <= 0)
692 return;
693
694 crdp = malloc(sizeof(*crdp) * maxcpus);
695 if (!crdp) {
696 if (!warned) {
697 fprintf(stderr, "[error] liburcu: unable to allocate per-CPU pointer array\n");
698 }
699 warned = 1;
700 return;
701 }
702
703 for (cpu = 0; cpu < maxcpus; cpu++) {
704 crdp[cpu] = get_cpu_call_rcu_data(cpu);
705 if (crdp[cpu] == NULL)
706 continue;
707 set_cpu_call_rcu_data(cpu, NULL);
708 }
709 /*
710 * Wait for call_rcu sites acting as RCU readers of the
711 * call_rcu_data to become quiescent.
712 */
713 synchronize_rcu();
714 for (cpu = 0; cpu < maxcpus; cpu++) {
715 if (crdp[cpu] == NULL)
716 continue;
717 call_rcu_data_free(crdp[cpu]);
718 }
719 free(crdp);
720 }
721
722 /*
723 * Acquire the call_rcu_mutex in order to ensure that the child sees
724 * all of the call_rcu() data structures in a consistent state. Ensure
725 * that all call_rcu threads are in a quiescent state across fork.
726 * Suitable for pthread_atfork() and friends.
727 */
728 void call_rcu_before_fork(void)
729 {
730 struct call_rcu_data *crdp;
731
732 call_rcu_lock(&call_rcu_mutex);
733
734 cds_list_for_each_entry(crdp, &call_rcu_data_list, list) {
735 uatomic_or(&crdp->flags, URCU_CALL_RCU_PAUSE);
736 cmm_smp_mb__after_uatomic_or();
737 wake_call_rcu_thread(crdp);
738 }
739 cds_list_for_each_entry(crdp, &call_rcu_data_list, list) {
740 while ((uatomic_read(&crdp->flags) & URCU_CALL_RCU_PAUSED) == 0)
741 poll(NULL, 0, 1);
742 }
743 }
744
745 /*
746 * Clean up call_rcu data structures in the parent of a successful fork()
747 * that is not followed by exec() in the child. Suitable for
748 * pthread_atfork() and friends.
749 */
750 void call_rcu_after_fork_parent(void)
751 {
752 struct call_rcu_data *crdp;
753
754 cds_list_for_each_entry(crdp, &call_rcu_data_list, list)
755 uatomic_and(&crdp->flags, ~URCU_CALL_RCU_PAUSE);
756 call_rcu_unlock(&call_rcu_mutex);
757 }
758
759 /*
760 * Clean up call_rcu data structures in the child of a successful fork()
761 * that is not followed by exec(). Suitable for pthread_atfork() and
762 * friends.
763 */
764 void call_rcu_after_fork_child(void)
765 {
766 struct call_rcu_data *crdp, *next;
767
768 /* Release the mutex. */
769 call_rcu_unlock(&call_rcu_mutex);
770
771 /* Do nothing when call_rcu() has not been used */
772 if (cds_list_empty(&call_rcu_data_list))
773 return;
774
775 /*
776 * Allocate a new default call_rcu_data structure in order
777 * to get a working call_rcu thread to go with it.
778 */
779 default_call_rcu_data = NULL;
780 (void)get_default_call_rcu_data();
781
782 /* Cleanup call_rcu_data pointers before use */
783 maxcpus_reset();
784 free(per_cpu_call_rcu_data);
785 rcu_set_pointer(&per_cpu_call_rcu_data, NULL);
786 URCU_TLS(thread_call_rcu_data) = NULL;
787
788 /*
789 * Dispose of all of the rest of the call_rcu_data structures.
790 * Leftover call_rcu callbacks will be merged into the new
791 * default call_rcu thread queue.
792 */
793 cds_list_for_each_entry_safe(crdp, next, &call_rcu_data_list, list) {
794 if (crdp == default_call_rcu_data)
795 continue;
796 uatomic_set(&crdp->flags, URCU_CALL_RCU_STOPPED);
797 call_rcu_data_free(crdp);
798 }
799 }
This page took 0.052834 seconds and 5 git commands to generate.