Version 0.7.11
[userspace-rcu.git] / urcu-call-rcu-impl.h
1 /*
2 * urcu-call-rcu.c
3 *
4 * Userspace RCU library - batch memory reclamation with kernel API
5 *
6 * Copyright (c) 2010 Paul E. McKenney <paulmck@linux.vnet.ibm.com>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23 #define _GNU_SOURCE
24 #include <stdio.h>
25 #include <pthread.h>
26 #include <signal.h>
27 #include <assert.h>
28 #include <stdlib.h>
29 #include <stdint.h>
30 #include <string.h>
31 #include <errno.h>
32 #include <poll.h>
33 #include <sys/time.h>
34 #include <unistd.h>
35 #include <sched.h>
36
37 #include "config.h"
38 #include "urcu/wfqueue.h"
39 #include "urcu-call-rcu.h"
40 #include "urcu-pointer.h"
41 #include "urcu/list.h"
42 #include "urcu/futex.h"
43 #include "urcu/tls-compat.h"
44 #include "urcu-die.h"
45
46 /* Data structure that identifies a call_rcu thread. */
47
48 struct call_rcu_data {
49 struct cds_wfq_queue cbs;
50 unsigned long flags;
51 int32_t futex;
52 unsigned long qlen; /* maintained for debugging. */
53 pthread_t tid;
54 int cpu_affinity;
55 struct cds_list_head list;
56 } __attribute__((aligned(CAA_CACHE_LINE_SIZE)));
57
58 /*
59 * List of all call_rcu_data structures to keep valgrind happy.
60 * Protected by call_rcu_mutex.
61 */
62
63 CDS_LIST_HEAD(call_rcu_data_list);
64
65 /* Link a thread using call_rcu() to its call_rcu thread. */
66
67 static DEFINE_URCU_TLS(struct call_rcu_data *, thread_call_rcu_data);
68
69 /*
70 * Guard call_rcu thread creation and atfork handlers.
71 */
72 static pthread_mutex_t call_rcu_mutex = PTHREAD_MUTEX_INITIALIZER;
73
74 /* If a given thread does not have its own call_rcu thread, this is default. */
75
76 static struct call_rcu_data *default_call_rcu_data;
77
78 /*
79 * If the sched_getcpu() and sysconf(_SC_NPROCESSORS_CONF) calls are
80 * available, then we can have call_rcu threads assigned to individual
81 * CPUs rather than only to specific threads.
82 */
83
84 #ifdef HAVE_SCHED_GETCPU
85
86 static int urcu_sched_getcpu(void)
87 {
88 return sched_getcpu();
89 }
90
91 #else /* #ifdef HAVE_SCHED_GETCPU */
92
93 static int urcu_sched_getcpu(void)
94 {
95 return -1;
96 }
97
98 #endif /* #else #ifdef HAVE_SCHED_GETCPU */
99
100 #if defined(HAVE_SYSCONF) && defined(HAVE_SCHED_GETCPU)
101
102 /*
103 * Pointer to array of pointers to per-CPU call_rcu_data structures
104 * and # CPUs. per_cpu_call_rcu_data is a RCU-protected pointer to an
105 * array of RCU-protected pointers to call_rcu_data. call_rcu acts as a
106 * RCU read-side and reads per_cpu_call_rcu_data and the per-cpu pointer
107 * without mutex. The call_rcu_mutex protects updates.
108 */
109
110 static struct call_rcu_data **per_cpu_call_rcu_data;
111 static long maxcpus;
112
113 static void maxcpus_reset(void)
114 {
115 maxcpus = 0;
116 }
117
118 /* Allocate the array if it has not already been allocated. */
119
120 static void alloc_cpu_call_rcu_data(void)
121 {
122 struct call_rcu_data **p;
123 static int warned = 0;
124
125 if (maxcpus != 0)
126 return;
127 maxcpus = sysconf(_SC_NPROCESSORS_CONF);
128 if (maxcpus <= 0) {
129 return;
130 }
131 p = malloc(maxcpus * sizeof(*per_cpu_call_rcu_data));
132 if (p != NULL) {
133 memset(p, '\0', maxcpus * sizeof(*per_cpu_call_rcu_data));
134 rcu_set_pointer(&per_cpu_call_rcu_data, p);
135 } else {
136 if (!warned) {
137 fprintf(stderr, "[error] liburcu: unable to allocate per-CPU pointer array\n");
138 }
139 warned = 1;
140 }
141 }
142
143 #else /* #if defined(HAVE_SYSCONF) && defined(HAVE_SCHED_GETCPU) */
144
145 /*
146 * per_cpu_call_rcu_data should be constant, but some functions below, used both
147 * for cases where cpu number is available and not available, assume it it not
148 * constant.
149 */
150 static struct call_rcu_data **per_cpu_call_rcu_data = NULL;
151 static const long maxcpus = -1;
152
153 static void maxcpus_reset(void)
154 {
155 }
156
157 static void alloc_cpu_call_rcu_data(void)
158 {
159 }
160
161 #endif /* #else #if defined(HAVE_SYSCONF) && defined(HAVE_SCHED_GETCPU) */
162
163 /* Acquire the specified pthread mutex. */
164
165 static void call_rcu_lock(pthread_mutex_t *pmp)
166 {
167 int ret;
168
169 ret = pthread_mutex_lock(pmp);
170 if (ret)
171 urcu_die(ret);
172 }
173
174 /* Release the specified pthread mutex. */
175
176 static void call_rcu_unlock(pthread_mutex_t *pmp)
177 {
178 int ret;
179
180 ret = pthread_mutex_unlock(pmp);
181 if (ret)
182 urcu_die(ret);
183 }
184
185 #if HAVE_SCHED_SETAFFINITY
186 static
187 int set_thread_cpu_affinity(struct call_rcu_data *crdp)
188 {
189 cpu_set_t mask;
190
191 if (crdp->cpu_affinity < 0)
192 return 0;
193
194 CPU_ZERO(&mask);
195 CPU_SET(crdp->cpu_affinity, &mask);
196 #if SCHED_SETAFFINITY_ARGS == 2
197 return sched_setaffinity(0, &mask);
198 #else
199 return sched_setaffinity(0, sizeof(mask), &mask);
200 #endif
201 }
202 #else
203 static
204 int set_thread_cpu_affinity(struct call_rcu_data *crdp)
205 {
206 return 0;
207 }
208 #endif
209
210 static void call_rcu_wait(struct call_rcu_data *crdp)
211 {
212 /* Read call_rcu list before read futex */
213 cmm_smp_mb();
214 if (uatomic_read(&crdp->futex) == -1)
215 futex_async(&crdp->futex, FUTEX_WAIT, -1,
216 NULL, NULL, 0);
217 }
218
219 static void call_rcu_wake_up(struct call_rcu_data *crdp)
220 {
221 /* Write to call_rcu list before reading/writing futex */
222 cmm_smp_mb();
223 if (caa_unlikely(uatomic_read(&crdp->futex) == -1)) {
224 uatomic_set(&crdp->futex, 0);
225 futex_async(&crdp->futex, FUTEX_WAKE, 1,
226 NULL, NULL, 0);
227 }
228 }
229
230 /* This is the code run by each call_rcu thread. */
231
232 static void *call_rcu_thread(void *arg)
233 {
234 unsigned long cbcount;
235 struct cds_wfq_node *cbs;
236 struct cds_wfq_node **cbs_tail;
237 struct call_rcu_data *crdp = (struct call_rcu_data *)arg;
238 struct rcu_head *rhp;
239 int rt = !!(uatomic_read(&crdp->flags) & URCU_CALL_RCU_RT);
240 int ret;
241
242 ret = set_thread_cpu_affinity(crdp);
243 if (ret)
244 urcu_die(errno);
245
246 /*
247 * If callbacks take a read-side lock, we need to be registered.
248 */
249 rcu_register_thread();
250
251 URCU_TLS(thread_call_rcu_data) = crdp;
252 if (!rt) {
253 uatomic_dec(&crdp->futex);
254 /* Decrement futex before reading call_rcu list */
255 cmm_smp_mb();
256 }
257 for (;;) {
258 if (uatomic_read(&crdp->flags) & URCU_CALL_RCU_PAUSE) {
259 /*
260 * Pause requested. Become quiescent: remove
261 * ourself from all global lists, and don't
262 * process any callback. The callback lists may
263 * still be non-empty though.
264 */
265 rcu_unregister_thread();
266 cmm_smp_mb__before_uatomic_or();
267 uatomic_or(&crdp->flags, URCU_CALL_RCU_PAUSED);
268 while ((uatomic_read(&crdp->flags) & URCU_CALL_RCU_PAUSE) != 0)
269 poll(NULL, 0, 1);
270 rcu_register_thread();
271 }
272
273 if (&crdp->cbs.head != _CMM_LOAD_SHARED(crdp->cbs.tail)) {
274 while ((cbs = _CMM_LOAD_SHARED(crdp->cbs.head)) == NULL)
275 poll(NULL, 0, 1);
276 _CMM_STORE_SHARED(crdp->cbs.head, NULL);
277 cbs_tail = (struct cds_wfq_node **)
278 uatomic_xchg(&crdp->cbs.tail, &crdp->cbs.head);
279 synchronize_rcu();
280 cbcount = 0;
281 do {
282 while (cbs->next == NULL &&
283 &cbs->next != cbs_tail)
284 poll(NULL, 0, 1);
285 if (cbs == &crdp->cbs.dummy) {
286 cbs = cbs->next;
287 continue;
288 }
289 rhp = (struct rcu_head *)cbs;
290 cbs = cbs->next;
291 rhp->func(rhp);
292 cbcount++;
293 } while (cbs != NULL);
294 uatomic_sub(&crdp->qlen, cbcount);
295 }
296 if (uatomic_read(&crdp->flags) & URCU_CALL_RCU_STOP)
297 break;
298 rcu_thread_offline();
299 if (!rt) {
300 if (&crdp->cbs.head
301 == _CMM_LOAD_SHARED(crdp->cbs.tail)) {
302 call_rcu_wait(crdp);
303 poll(NULL, 0, 10);
304 uatomic_dec(&crdp->futex);
305 /*
306 * Decrement futex before reading
307 * call_rcu list.
308 */
309 cmm_smp_mb();
310 } else {
311 poll(NULL, 0, 10);
312 }
313 } else {
314 poll(NULL, 0, 10);
315 }
316 rcu_thread_online();
317 }
318 if (!rt) {
319 /*
320 * Read call_rcu list before write futex.
321 */
322 cmm_smp_mb();
323 uatomic_set(&crdp->futex, 0);
324 }
325 uatomic_or(&crdp->flags, URCU_CALL_RCU_STOPPED);
326 rcu_unregister_thread();
327 return NULL;
328 }
329
330 /*
331 * Create both a call_rcu thread and the corresponding call_rcu_data
332 * structure, linking the structure in as specified. Caller must hold
333 * call_rcu_mutex.
334 */
335
336 static void call_rcu_data_init(struct call_rcu_data **crdpp,
337 unsigned long flags,
338 int cpu_affinity)
339 {
340 struct call_rcu_data *crdp;
341 int ret;
342
343 crdp = malloc(sizeof(*crdp));
344 if (crdp == NULL)
345 urcu_die(errno);
346 memset(crdp, '\0', sizeof(*crdp));
347 cds_wfq_init(&crdp->cbs);
348 crdp->qlen = 0;
349 crdp->futex = 0;
350 crdp->flags = flags;
351 cds_list_add(&crdp->list, &call_rcu_data_list);
352 crdp->cpu_affinity = cpu_affinity;
353 cmm_smp_mb(); /* Structure initialized before pointer is planted. */
354 *crdpp = crdp;
355 ret = pthread_create(&crdp->tid, NULL, call_rcu_thread, crdp);
356 if (ret)
357 urcu_die(ret);
358 }
359
360 /*
361 * Return a pointer to the call_rcu_data structure for the specified
362 * CPU, returning NULL if there is none. We cannot automatically
363 * created it because the platform we are running on might not define
364 * urcu_sched_getcpu().
365 *
366 * The call to this function and use of the returned call_rcu_data
367 * should be protected by RCU read-side lock.
368 */
369
370 struct call_rcu_data *get_cpu_call_rcu_data(int cpu)
371 {
372 static int warned = 0;
373 struct call_rcu_data **pcpu_crdp;
374
375 pcpu_crdp = rcu_dereference(per_cpu_call_rcu_data);
376 if (pcpu_crdp == NULL)
377 return NULL;
378 if (!warned && maxcpus > 0 && (cpu < 0 || maxcpus <= cpu)) {
379 fprintf(stderr, "[error] liburcu: get CPU # out of range\n");
380 warned = 1;
381 }
382 if (cpu < 0 || maxcpus <= cpu)
383 return NULL;
384 return rcu_dereference(pcpu_crdp[cpu]);
385 }
386
387 /*
388 * Return the tid corresponding to the call_rcu thread whose
389 * call_rcu_data structure is specified.
390 */
391
392 pthread_t get_call_rcu_thread(struct call_rcu_data *crdp)
393 {
394 return crdp->tid;
395 }
396
397 /*
398 * Create a call_rcu_data structure (with thread) and return a pointer.
399 */
400
401 static struct call_rcu_data *__create_call_rcu_data(unsigned long flags,
402 int cpu_affinity)
403 {
404 struct call_rcu_data *crdp;
405
406 call_rcu_data_init(&crdp, flags, cpu_affinity);
407 return crdp;
408 }
409
410 struct call_rcu_data *create_call_rcu_data(unsigned long flags,
411 int cpu_affinity)
412 {
413 struct call_rcu_data *crdp;
414
415 call_rcu_lock(&call_rcu_mutex);
416 crdp = __create_call_rcu_data(flags, cpu_affinity);
417 call_rcu_unlock(&call_rcu_mutex);
418 return crdp;
419 }
420
421 /*
422 * Set the specified CPU to use the specified call_rcu_data structure.
423 *
424 * Use NULL to remove a CPU's call_rcu_data structure, but it is
425 * the caller's responsibility to dispose of the removed structure.
426 * Use get_cpu_call_rcu_data() to obtain a pointer to the old structure
427 * (prior to NULLing it out, of course).
428 *
429 * The caller must wait for a grace-period to pass between return from
430 * set_cpu_call_rcu_data() and call to call_rcu_data_free() passing the
431 * previous call rcu data as argument.
432 */
433
434 int set_cpu_call_rcu_data(int cpu, struct call_rcu_data *crdp)
435 {
436 static int warned = 0;
437
438 call_rcu_lock(&call_rcu_mutex);
439 alloc_cpu_call_rcu_data();
440 if (cpu < 0 || maxcpus <= cpu) {
441 if (!warned) {
442 fprintf(stderr, "[error] liburcu: set CPU # out of range\n");
443 warned = 1;
444 }
445 call_rcu_unlock(&call_rcu_mutex);
446 errno = EINVAL;
447 return -EINVAL;
448 }
449
450 if (per_cpu_call_rcu_data == NULL) {
451 call_rcu_unlock(&call_rcu_mutex);
452 errno = ENOMEM;
453 return -ENOMEM;
454 }
455
456 if (per_cpu_call_rcu_data[cpu] != NULL && crdp != NULL) {
457 call_rcu_unlock(&call_rcu_mutex);
458 errno = EEXIST;
459 return -EEXIST;
460 }
461
462 rcu_set_pointer(&per_cpu_call_rcu_data[cpu], crdp);
463 call_rcu_unlock(&call_rcu_mutex);
464 return 0;
465 }
466
467 /*
468 * Return a pointer to the default call_rcu_data structure, creating
469 * one if need be. Because we never free call_rcu_data structures,
470 * we don't need to be in an RCU read-side critical section.
471 */
472
473 struct call_rcu_data *get_default_call_rcu_data(void)
474 {
475 if (default_call_rcu_data != NULL)
476 return rcu_dereference(default_call_rcu_data);
477 call_rcu_lock(&call_rcu_mutex);
478 if (default_call_rcu_data != NULL) {
479 call_rcu_unlock(&call_rcu_mutex);
480 return default_call_rcu_data;
481 }
482 call_rcu_data_init(&default_call_rcu_data, 0, -1);
483 call_rcu_unlock(&call_rcu_mutex);
484 return default_call_rcu_data;
485 }
486
487 /*
488 * Return the call_rcu_data structure that applies to the currently
489 * running thread. Any call_rcu_data structure assigned specifically
490 * to this thread has first priority, followed by any call_rcu_data
491 * structure assigned to the CPU on which the thread is running,
492 * followed by the default call_rcu_data structure. If there is not
493 * yet a default call_rcu_data structure, one will be created.
494 *
495 * Calls to this function and use of the returned call_rcu_data should
496 * be protected by RCU read-side lock.
497 */
498 struct call_rcu_data *get_call_rcu_data(void)
499 {
500 struct call_rcu_data *crd;
501
502 if (URCU_TLS(thread_call_rcu_data) != NULL)
503 return URCU_TLS(thread_call_rcu_data);
504
505 if (maxcpus > 0) {
506 crd = get_cpu_call_rcu_data(urcu_sched_getcpu());
507 if (crd)
508 return crd;
509 }
510
511 return get_default_call_rcu_data();
512 }
513
514 /*
515 * Return a pointer to this task's call_rcu_data if there is one.
516 */
517
518 struct call_rcu_data *get_thread_call_rcu_data(void)
519 {
520 return URCU_TLS(thread_call_rcu_data);
521 }
522
523 /*
524 * Set this task's call_rcu_data structure as specified, regardless
525 * of whether or not this task already had one. (This allows switching
526 * to and from real-time call_rcu threads, for example.)
527 *
528 * Use NULL to remove a thread's call_rcu_data structure, but it is
529 * the caller's responsibility to dispose of the removed structure.
530 * Use get_thread_call_rcu_data() to obtain a pointer to the old structure
531 * (prior to NULLing it out, of course).
532 */
533
534 void set_thread_call_rcu_data(struct call_rcu_data *crdp)
535 {
536 URCU_TLS(thread_call_rcu_data) = crdp;
537 }
538
539 /*
540 * Create a separate call_rcu thread for each CPU. This does not
541 * replace a pre-existing call_rcu thread -- use the set_cpu_call_rcu_data()
542 * function if you want that behavior. Should be paired with
543 * free_all_cpu_call_rcu_data() to teardown these call_rcu worker
544 * threads.
545 */
546
547 int create_all_cpu_call_rcu_data(unsigned long flags)
548 {
549 int i;
550 struct call_rcu_data *crdp;
551 int ret;
552
553 call_rcu_lock(&call_rcu_mutex);
554 alloc_cpu_call_rcu_data();
555 call_rcu_unlock(&call_rcu_mutex);
556 if (maxcpus <= 0) {
557 errno = EINVAL;
558 return -EINVAL;
559 }
560 if (per_cpu_call_rcu_data == NULL) {
561 errno = ENOMEM;
562 return -ENOMEM;
563 }
564 for (i = 0; i < maxcpus; i++) {
565 call_rcu_lock(&call_rcu_mutex);
566 if (get_cpu_call_rcu_data(i)) {
567 call_rcu_unlock(&call_rcu_mutex);
568 continue;
569 }
570 crdp = __create_call_rcu_data(flags, i);
571 if (crdp == NULL) {
572 call_rcu_unlock(&call_rcu_mutex);
573 errno = ENOMEM;
574 return -ENOMEM;
575 }
576 call_rcu_unlock(&call_rcu_mutex);
577 if ((ret = set_cpu_call_rcu_data(i, crdp)) != 0) {
578 call_rcu_data_free(crdp);
579
580 /* it has been created by other thread */
581 if (ret == -EEXIST)
582 continue;
583
584 return ret;
585 }
586 }
587 return 0;
588 }
589
590 /*
591 * Wake up the call_rcu thread corresponding to the specified
592 * call_rcu_data structure.
593 */
594 static void wake_call_rcu_thread(struct call_rcu_data *crdp)
595 {
596 if (!(_CMM_LOAD_SHARED(crdp->flags) & URCU_CALL_RCU_RT))
597 call_rcu_wake_up(crdp);
598 }
599
600 /*
601 * Schedule a function to be invoked after a following grace period.
602 * This is the only function that must be called -- the others are
603 * only present to allow applications to tune their use of RCU for
604 * maximum performance.
605 *
606 * Note that unless a call_rcu thread has not already been created,
607 * the first invocation of call_rcu() will create one. So, if you
608 * need the first invocation of call_rcu() to be fast, make sure
609 * to create a call_rcu thread first. One way to accomplish this is
610 * "get_call_rcu_data();", and another is create_all_cpu_call_rcu_data().
611 *
612 * call_rcu must be called by registered RCU read-side threads.
613 */
614
615 void call_rcu(struct rcu_head *head,
616 void (*func)(struct rcu_head *head))
617 {
618 struct call_rcu_data *crdp;
619
620 cds_wfq_node_init(&head->next);
621 head->func = func;
622 /* Holding rcu read-side lock across use of per-cpu crdp */
623 rcu_read_lock();
624 crdp = get_call_rcu_data();
625 cds_wfq_enqueue(&crdp->cbs, &head->next);
626 uatomic_inc(&crdp->qlen);
627 wake_call_rcu_thread(crdp);
628 rcu_read_unlock();
629 }
630
631 /*
632 * Free up the specified call_rcu_data structure, terminating the
633 * associated call_rcu thread. The caller must have previously
634 * removed the call_rcu_data structure from per-thread or per-CPU
635 * usage. For example, set_cpu_call_rcu_data(cpu, NULL) for per-CPU
636 * call_rcu_data structures or set_thread_call_rcu_data(NULL) for
637 * per-thread call_rcu_data structures.
638 *
639 * We silently refuse to free up the default call_rcu_data structure
640 * because that is where we put any leftover callbacks. Note that
641 * the possibility of self-spawning callbacks makes it impossible
642 * to execute all the callbacks in finite time without putting any
643 * newly spawned callbacks somewhere else. The "somewhere else" of
644 * last resort is the default call_rcu_data structure.
645 *
646 * We also silently refuse to free NULL pointers. This simplifies
647 * the calling code.
648 *
649 * The caller must wait for a grace-period to pass between return from
650 * set_cpu_call_rcu_data() and call to call_rcu_data_free() passing the
651 * previous call rcu data as argument.
652 */
653 void call_rcu_data_free(struct call_rcu_data *crdp)
654 {
655 struct cds_wfq_node *cbs;
656 struct cds_wfq_node **cbs_tail;
657 struct cds_wfq_node **cbs_endprev;
658
659 if (crdp == NULL || crdp == default_call_rcu_data) {
660 return;
661 }
662 if ((uatomic_read(&crdp->flags) & URCU_CALL_RCU_STOPPED) == 0) {
663 uatomic_or(&crdp->flags, URCU_CALL_RCU_STOP);
664 wake_call_rcu_thread(crdp);
665 while ((uatomic_read(&crdp->flags) & URCU_CALL_RCU_STOPPED) == 0)
666 poll(NULL, 0, 1);
667 }
668 if (&crdp->cbs.head != _CMM_LOAD_SHARED(crdp->cbs.tail)) {
669 while ((cbs = _CMM_LOAD_SHARED(crdp->cbs.head)) == NULL)
670 poll(NULL, 0, 1);
671 _CMM_STORE_SHARED(crdp->cbs.head, NULL);
672 cbs_tail = (struct cds_wfq_node **)
673 uatomic_xchg(&crdp->cbs.tail, &crdp->cbs.head);
674 /* Create default call rcu data if need be */
675 (void) get_default_call_rcu_data();
676 cbs_endprev = (struct cds_wfq_node **)
677 uatomic_xchg(&default_call_rcu_data->cbs.tail,
678 cbs_tail);
679 _CMM_STORE_SHARED(*cbs_endprev, cbs);
680 uatomic_add(&default_call_rcu_data->qlen,
681 uatomic_read(&crdp->qlen));
682 wake_call_rcu_thread(default_call_rcu_data);
683 }
684
685 call_rcu_lock(&call_rcu_mutex);
686 cds_list_del(&crdp->list);
687 call_rcu_unlock(&call_rcu_mutex);
688
689 free(crdp);
690 }
691
692 /*
693 * Clean up all the per-CPU call_rcu threads.
694 */
695 void free_all_cpu_call_rcu_data(void)
696 {
697 int cpu;
698 struct call_rcu_data **crdp;
699 static int warned = 0;
700
701 if (maxcpus <= 0)
702 return;
703
704 crdp = malloc(sizeof(*crdp) * maxcpus);
705 if (!crdp) {
706 if (!warned) {
707 fprintf(stderr, "[error] liburcu: unable to allocate per-CPU pointer array\n");
708 }
709 warned = 1;
710 return;
711 }
712
713 for (cpu = 0; cpu < maxcpus; cpu++) {
714 crdp[cpu] = get_cpu_call_rcu_data(cpu);
715 if (crdp[cpu] == NULL)
716 continue;
717 set_cpu_call_rcu_data(cpu, NULL);
718 }
719 /*
720 * Wait for call_rcu sites acting as RCU readers of the
721 * call_rcu_data to become quiescent.
722 */
723 synchronize_rcu();
724 for (cpu = 0; cpu < maxcpus; cpu++) {
725 if (crdp[cpu] == NULL)
726 continue;
727 call_rcu_data_free(crdp[cpu]);
728 }
729 free(crdp);
730 }
731
732 /*
733 * Acquire the call_rcu_mutex in order to ensure that the child sees
734 * all of the call_rcu() data structures in a consistent state. Ensure
735 * that all call_rcu threads are in a quiescent state across fork.
736 * Suitable for pthread_atfork() and friends.
737 */
738 void call_rcu_before_fork(void)
739 {
740 struct call_rcu_data *crdp;
741
742 call_rcu_lock(&call_rcu_mutex);
743
744 cds_list_for_each_entry(crdp, &call_rcu_data_list, list) {
745 uatomic_or(&crdp->flags, URCU_CALL_RCU_PAUSE);
746 cmm_smp_mb__after_uatomic_or();
747 wake_call_rcu_thread(crdp);
748 }
749 cds_list_for_each_entry(crdp, &call_rcu_data_list, list) {
750 while ((uatomic_read(&crdp->flags) & URCU_CALL_RCU_PAUSED) == 0)
751 poll(NULL, 0, 1);
752 }
753 }
754
755 /*
756 * Clean up call_rcu data structures in the parent of a successful fork()
757 * that is not followed by exec() in the child. Suitable for
758 * pthread_atfork() and friends.
759 */
760 void call_rcu_after_fork_parent(void)
761 {
762 struct call_rcu_data *crdp;
763
764 cds_list_for_each_entry(crdp, &call_rcu_data_list, list)
765 uatomic_and(&crdp->flags, ~URCU_CALL_RCU_PAUSE);
766 call_rcu_unlock(&call_rcu_mutex);
767 }
768
769 /*
770 * Clean up call_rcu data structures in the child of a successful fork()
771 * that is not followed by exec(). Suitable for pthread_atfork() and
772 * friends.
773 */
774 void call_rcu_after_fork_child(void)
775 {
776 struct call_rcu_data *crdp, *next;
777
778 /* Release the mutex. */
779 call_rcu_unlock(&call_rcu_mutex);
780
781 /* Do nothing when call_rcu() has not been used */
782 if (cds_list_empty(&call_rcu_data_list))
783 return;
784
785 /*
786 * Allocate a new default call_rcu_data structure in order
787 * to get a working call_rcu thread to go with it.
788 */
789 default_call_rcu_data = NULL;
790 (void)get_default_call_rcu_data();
791
792 /* Cleanup call_rcu_data pointers before use */
793 maxcpus_reset();
794 free(per_cpu_call_rcu_data);
795 rcu_set_pointer(&per_cpu_call_rcu_data, NULL);
796 URCU_TLS(thread_call_rcu_data) = NULL;
797
798 /*
799 * Dispose of all of the rest of the call_rcu_data structures.
800 * Leftover call_rcu callbacks will be merged into the new
801 * default call_rcu thread queue.
802 */
803 cds_list_for_each_entry_safe(crdp, next, &call_rcu_data_list, list) {
804 if (crdp == default_call_rcu_data)
805 continue;
806 uatomic_set(&crdp->flags, URCU_CALL_RCU_STOPPED);
807 call_rcu_data_free(crdp);
808 }
809 }
This page took 0.044978 seconds and 4 git commands to generate.