Fix building on MSYS2
[urcu.git] / src / urcu-call-rcu-impl.h
... / ...
CommitLineData
1/*
2 * urcu-call-rcu.c
3 *
4 * Userspace RCU library - batch memory reclamation with kernel API
5 *
6 * Copyright (c) 2010 Paul E. McKenney <paulmck@linux.vnet.ibm.com>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23#define _LGPL_SOURCE
24#include <stdio.h>
25#include <pthread.h>
26#include <signal.h>
27#include <assert.h>
28#include <stdlib.h>
29#include <stdint.h>
30#include <string.h>
31#include <errno.h>
32#include <poll.h>
33#include <sys/time.h>
34#include <unistd.h>
35#include <sched.h>
36
37#include "compat-getcpu.h"
38#include <urcu/wfcqueue.h>
39#include <urcu/call-rcu.h>
40#include <urcu/pointer.h>
41#include <urcu/list.h>
42#include <urcu/futex.h>
43#include <urcu/tls-compat.h>
44#include <urcu/ref.h>
45#include "urcu-die.h"
46#include "urcu-utils.h"
47#include "compat-smp.h"
48
49#define SET_AFFINITY_CHECK_PERIOD (1U << 8) /* 256 */
50#define SET_AFFINITY_CHECK_PERIOD_MASK (SET_AFFINITY_CHECK_PERIOD - 1)
51
52/* Data structure that identifies a call_rcu thread. */
53
54struct call_rcu_data {
55 /*
56 * We do not align head on a different cache-line than tail
57 * mainly because call_rcu callback-invocation threads use
58 * batching ("splice") to get an entire list of callbacks, which
59 * effectively empties the queue, and requires to touch the tail
60 * anyway.
61 */
62 struct cds_wfcq_tail cbs_tail;
63 struct cds_wfcq_head cbs_head;
64 unsigned long flags;
65 int32_t futex;
66 unsigned long qlen; /* maintained for debugging. */
67 pthread_t tid;
68 int cpu_affinity;
69 unsigned long gp_count;
70 struct cds_list_head list;
71} __attribute__((aligned(CAA_CACHE_LINE_SIZE)));
72
73struct call_rcu_completion {
74 int barrier_count;
75 int32_t futex;
76 struct urcu_ref ref;
77};
78
79struct call_rcu_completion_work {
80 struct rcu_head head;
81 struct call_rcu_completion *completion;
82};
83
84/*
85 * List of all call_rcu_data structures to keep valgrind happy.
86 * Protected by call_rcu_mutex.
87 */
88
89static CDS_LIST_HEAD(call_rcu_data_list);
90
91/* Link a thread using call_rcu() to its call_rcu thread. */
92
93static DEFINE_URCU_TLS(struct call_rcu_data *, thread_call_rcu_data);
94
95/*
96 * Guard call_rcu thread creation and atfork handlers.
97 */
98static pthread_mutex_t call_rcu_mutex = PTHREAD_MUTEX_INITIALIZER;
99
100/* If a given thread does not have its own call_rcu thread, this is default. */
101
102static struct call_rcu_data *default_call_rcu_data;
103
104static struct urcu_atfork *registered_rculfhash_atfork;
105static unsigned long registered_rculfhash_atfork_refcount;
106
107/*
108 * If the sched_getcpu() and sysconf(_SC_NPROCESSORS_CONF) calls are
109 * available, then we can have call_rcu threads assigned to individual
110 * CPUs rather than only to specific threads.
111 */
112
113#if defined(HAVE_SYSCONF) && (defined(HAVE_SCHED_GETCPU) || defined(HAVE_GETCPUID))
114
115/*
116 * Pointer to array of pointers to per-CPU call_rcu_data structures
117 * and # CPUs. per_cpu_call_rcu_data is a RCU-protected pointer to an
118 * array of RCU-protected pointers to call_rcu_data. call_rcu acts as a
119 * RCU read-side and reads per_cpu_call_rcu_data and the per-cpu pointer
120 * without mutex. The call_rcu_mutex protects updates.
121 */
122
123static struct call_rcu_data **per_cpu_call_rcu_data;
124static long cpus_array_len;
125
126static void cpus_array_len_reset(void)
127{
128 cpus_array_len = 0;
129}
130
131/* Allocate the array if it has not already been allocated. */
132
133static void alloc_cpu_call_rcu_data(void)
134{
135 struct call_rcu_data **p;
136 static int warned = 0;
137
138 if (cpus_array_len != 0)
139 return;
140 cpus_array_len = get_possible_cpus_array_len();
141 if (cpus_array_len <= 0) {
142 return;
143 }
144 p = malloc(cpus_array_len * sizeof(*per_cpu_call_rcu_data));
145 if (p != NULL) {
146 memset(p, '\0', cpus_array_len * sizeof(*per_cpu_call_rcu_data));
147 rcu_set_pointer(&per_cpu_call_rcu_data, p);
148 } else {
149 if (!warned) {
150 fprintf(stderr, "[error] liburcu: unable to allocate per-CPU pointer array\n");
151 }
152 warned = 1;
153 }
154}
155
156#else /* #if defined(HAVE_SYSCONF) && defined(HAVE_SCHED_GETCPU) */
157
158/*
159 * per_cpu_call_rcu_data should be constant, but some functions below, used both
160 * for cases where cpu number is available and not available, assume it it not
161 * constant.
162 */
163static struct call_rcu_data **per_cpu_call_rcu_data = NULL;
164static const long cpus_array_len = -1;
165
166static void cpus_array_len_reset(void)
167{
168}
169
170static void alloc_cpu_call_rcu_data(void)
171{
172}
173
174#endif /* #else #if defined(HAVE_SYSCONF) && defined(HAVE_SCHED_GETCPU) */
175
176/* Acquire the specified pthread mutex. */
177
178static void call_rcu_lock(pthread_mutex_t *pmp)
179{
180 int ret;
181
182 ret = pthread_mutex_lock(pmp);
183 if (ret)
184 urcu_die(ret);
185}
186
187/* Release the specified pthread mutex. */
188
189static void call_rcu_unlock(pthread_mutex_t *pmp)
190{
191 int ret;
192
193 ret = pthread_mutex_unlock(pmp);
194 if (ret)
195 urcu_die(ret);
196}
197
198/*
199 * Periodically retry setting CPU affinity if we migrate.
200 * Losing affinity can be caused by CPU hotunplug/hotplug, or by
201 * cpuset(7).
202 */
203#ifdef HAVE_SCHED_SETAFFINITY
204static
205int set_thread_cpu_affinity(struct call_rcu_data *crdp)
206{
207 cpu_set_t mask;
208 int ret;
209
210 if (crdp->cpu_affinity < 0)
211 return 0;
212 if (++crdp->gp_count & SET_AFFINITY_CHECK_PERIOD_MASK)
213 return 0;
214 if (urcu_sched_getcpu() == crdp->cpu_affinity)
215 return 0;
216
217 CPU_ZERO(&mask);
218 CPU_SET(crdp->cpu_affinity, &mask);
219 ret = sched_setaffinity(0, sizeof(mask), &mask);
220
221 /*
222 * EINVAL is fine: can be caused by hotunplugged CPUs, or by
223 * cpuset(7). This is why we should always retry if we detect
224 * migration.
225 */
226 if (ret && errno == EINVAL) {
227 ret = 0;
228 errno = 0;
229 }
230 return ret;
231}
232#else
233static
234int set_thread_cpu_affinity(struct call_rcu_data *crdp __attribute__((unused)))
235{
236 return 0;
237}
238#endif
239
240static void call_rcu_wait(struct call_rcu_data *crdp)
241{
242 /* Read call_rcu list before read futex */
243 cmm_smp_mb();
244 while (uatomic_read(&crdp->futex) == -1) {
245 if (!futex_async(&crdp->futex, FUTEX_WAIT, -1, NULL, NULL, 0)) {
246 /*
247 * Prior queued wakeups queued by unrelated code
248 * using the same address can cause futex wait to
249 * return 0 even through the futex value is still
250 * -1 (spurious wakeups). Check the value again
251 * in user-space to validate whether it really
252 * differs from -1.
253 */
254 continue;
255 }
256 switch (errno) {
257 case EAGAIN:
258 /* Value already changed. */
259 return;
260 case EINTR:
261 /* Retry if interrupted by signal. */
262 break; /* Get out of switch. Check again. */
263 default:
264 /* Unexpected error. */
265 urcu_die(errno);
266 }
267 }
268}
269
270static void call_rcu_wake_up(struct call_rcu_data *crdp)
271{
272 /* Write to call_rcu list before reading/writing futex */
273 cmm_smp_mb();
274 if (caa_unlikely(uatomic_read(&crdp->futex) == -1)) {
275 uatomic_set(&crdp->futex, 0);
276 if (futex_async(&crdp->futex, FUTEX_WAKE, 1,
277 NULL, NULL, 0) < 0)
278 urcu_die(errno);
279 }
280}
281
282static void call_rcu_completion_wait(struct call_rcu_completion *completion)
283{
284 /* Read completion barrier count before read futex */
285 cmm_smp_mb();
286 while (uatomic_read(&completion->futex) == -1) {
287 if (!futex_async(&completion->futex, FUTEX_WAIT, -1, NULL, NULL, 0)) {
288 /*
289 * Prior queued wakeups queued by unrelated code
290 * using the same address can cause futex wait to
291 * return 0 even through the futex value is still
292 * -1 (spurious wakeups). Check the value again
293 * in user-space to validate whether it really
294 * differs from -1.
295 */
296 continue;
297 }
298 switch (errno) {
299 case EAGAIN:
300 /* Value already changed. */
301 return;
302 case EINTR:
303 /* Retry if interrupted by signal. */
304 break; /* Get out of switch. Check again. */
305 default:
306 /* Unexpected error. */
307 urcu_die(errno);
308 }
309 }
310}
311
312static void call_rcu_completion_wake_up(struct call_rcu_completion *completion)
313{
314 /* Write to completion barrier count before reading/writing futex */
315 cmm_smp_mb();
316 if (caa_unlikely(uatomic_read(&completion->futex) == -1)) {
317 uatomic_set(&completion->futex, 0);
318 if (futex_async(&completion->futex, FUTEX_WAKE, 1,
319 NULL, NULL, 0) < 0)
320 urcu_die(errno);
321 }
322}
323
324/* This is the code run by each call_rcu thread. */
325
326static void *call_rcu_thread(void *arg)
327{
328 unsigned long cbcount;
329 struct call_rcu_data *crdp = (struct call_rcu_data *) arg;
330 int rt = !!(uatomic_read(&crdp->flags) & URCU_CALL_RCU_RT);
331
332 if (set_thread_cpu_affinity(crdp))
333 urcu_die(errno);
334
335 /*
336 * If callbacks take a read-side lock, we need to be registered.
337 */
338 rcu_register_thread();
339
340 URCU_TLS(thread_call_rcu_data) = crdp;
341 if (!rt) {
342 uatomic_dec(&crdp->futex);
343 /* Decrement futex before reading call_rcu list */
344 cmm_smp_mb();
345 }
346 for (;;) {
347 struct cds_wfcq_head cbs_tmp_head;
348 struct cds_wfcq_tail cbs_tmp_tail;
349 struct cds_wfcq_node *cbs, *cbs_tmp_n;
350 enum cds_wfcq_ret splice_ret;
351
352 if (set_thread_cpu_affinity(crdp))
353 urcu_die(errno);
354
355 if (uatomic_read(&crdp->flags) & URCU_CALL_RCU_PAUSE) {
356 /*
357 * Pause requested. Become quiescent: remove
358 * ourself from all global lists, and don't
359 * process any callback. The callback lists may
360 * still be non-empty though.
361 */
362 rcu_unregister_thread();
363 cmm_smp_mb__before_uatomic_or();
364 uatomic_or(&crdp->flags, URCU_CALL_RCU_PAUSED);
365 while ((uatomic_read(&crdp->flags) & URCU_CALL_RCU_PAUSE) != 0)
366 (void) poll(NULL, 0, 1);
367 uatomic_and(&crdp->flags, ~URCU_CALL_RCU_PAUSED);
368 cmm_smp_mb__after_uatomic_and();
369 rcu_register_thread();
370 }
371
372 cds_wfcq_init(&cbs_tmp_head, &cbs_tmp_tail);
373 splice_ret = __cds_wfcq_splice_blocking(&cbs_tmp_head,
374 &cbs_tmp_tail, &crdp->cbs_head, &crdp->cbs_tail);
375 assert(splice_ret != CDS_WFCQ_RET_WOULDBLOCK);
376 assert(splice_ret != CDS_WFCQ_RET_DEST_NON_EMPTY);
377 if (splice_ret != CDS_WFCQ_RET_SRC_EMPTY) {
378 synchronize_rcu();
379 cbcount = 0;
380 __cds_wfcq_for_each_blocking_safe(&cbs_tmp_head,
381 &cbs_tmp_tail, cbs, cbs_tmp_n) {
382 struct rcu_head *rhp;
383
384 rhp = caa_container_of(cbs,
385 struct rcu_head, next);
386 rhp->func(rhp);
387 cbcount++;
388 }
389 uatomic_sub(&crdp->qlen, cbcount);
390 }
391 if (uatomic_read(&crdp->flags) & URCU_CALL_RCU_STOP)
392 break;
393 rcu_thread_offline();
394 if (!rt) {
395 if (cds_wfcq_empty(&crdp->cbs_head,
396 &crdp->cbs_tail)) {
397 call_rcu_wait(crdp);
398 (void) poll(NULL, 0, 10);
399 uatomic_dec(&crdp->futex);
400 /*
401 * Decrement futex before reading
402 * call_rcu list.
403 */
404 cmm_smp_mb();
405 } else {
406 (void) poll(NULL, 0, 10);
407 }
408 } else {
409 (void) poll(NULL, 0, 10);
410 }
411 rcu_thread_online();
412 }
413 if (!rt) {
414 /*
415 * Read call_rcu list before write futex.
416 */
417 cmm_smp_mb();
418 uatomic_set(&crdp->futex, 0);
419 }
420 uatomic_or(&crdp->flags, URCU_CALL_RCU_STOPPED);
421 rcu_unregister_thread();
422 return NULL;
423}
424
425/*
426 * Create both a call_rcu thread and the corresponding call_rcu_data
427 * structure, linking the structure in as specified. Caller must hold
428 * call_rcu_mutex.
429 */
430
431static void call_rcu_data_init(struct call_rcu_data **crdpp,
432 unsigned long flags,
433 int cpu_affinity)
434{
435 struct call_rcu_data *crdp;
436 int ret;
437
438 crdp = malloc(sizeof(*crdp));
439 if (crdp == NULL)
440 urcu_die(errno);
441 memset(crdp, '\0', sizeof(*crdp));
442 cds_wfcq_init(&crdp->cbs_head, &crdp->cbs_tail);
443 crdp->qlen = 0;
444 crdp->futex = 0;
445 crdp->flags = flags;
446 cds_list_add(&crdp->list, &call_rcu_data_list);
447 crdp->cpu_affinity = cpu_affinity;
448 crdp->gp_count = 0;
449 cmm_smp_mb(); /* Structure initialized before pointer is planted. */
450 *crdpp = crdp;
451 ret = pthread_create(&crdp->tid, NULL, call_rcu_thread, crdp);
452 if (ret)
453 urcu_die(ret);
454}
455
456/*
457 * Return a pointer to the call_rcu_data structure for the specified
458 * CPU, returning NULL if there is none. We cannot automatically
459 * created it because the platform we are running on might not define
460 * urcu_sched_getcpu().
461 *
462 * The call to this function and use of the returned call_rcu_data
463 * should be protected by RCU read-side lock.
464 */
465
466struct call_rcu_data *get_cpu_call_rcu_data(int cpu)
467{
468 static int warned = 0;
469 struct call_rcu_data **pcpu_crdp;
470
471 pcpu_crdp = rcu_dereference(per_cpu_call_rcu_data);
472 if (pcpu_crdp == NULL)
473 return NULL;
474 if (!warned && cpus_array_len > 0 && (cpu < 0 || cpus_array_len <= cpu)) {
475 fprintf(stderr, "[error] liburcu: get CPU # out of range\n");
476 warned = 1;
477 }
478 if (cpu < 0 || cpus_array_len <= cpu)
479 return NULL;
480 return rcu_dereference(pcpu_crdp[cpu]);
481}
482
483/*
484 * Return the tid corresponding to the call_rcu thread whose
485 * call_rcu_data structure is specified.
486 */
487
488pthread_t get_call_rcu_thread(struct call_rcu_data *crdp)
489{
490 return crdp->tid;
491}
492
493/*
494 * Create a call_rcu_data structure (with thread) and return a pointer.
495 */
496
497static struct call_rcu_data *__create_call_rcu_data(unsigned long flags,
498 int cpu_affinity)
499{
500 struct call_rcu_data *crdp;
501
502 call_rcu_data_init(&crdp, flags, cpu_affinity);
503 return crdp;
504}
505
506struct call_rcu_data *create_call_rcu_data(unsigned long flags,
507 int cpu_affinity)
508{
509 struct call_rcu_data *crdp;
510
511 call_rcu_lock(&call_rcu_mutex);
512 crdp = __create_call_rcu_data(flags, cpu_affinity);
513 call_rcu_unlock(&call_rcu_mutex);
514 return crdp;
515}
516
517/*
518 * Set the specified CPU to use the specified call_rcu_data structure.
519 *
520 * Use NULL to remove a CPU's call_rcu_data structure, but it is
521 * the caller's responsibility to dispose of the removed structure.
522 * Use get_cpu_call_rcu_data() to obtain a pointer to the old structure
523 * (prior to NULLing it out, of course).
524 *
525 * The caller must wait for a grace-period to pass between return from
526 * set_cpu_call_rcu_data() and call to call_rcu_data_free() passing the
527 * previous call rcu data as argument.
528 */
529
530int set_cpu_call_rcu_data(int cpu, struct call_rcu_data *crdp)
531{
532 static int warned = 0;
533
534 call_rcu_lock(&call_rcu_mutex);
535 alloc_cpu_call_rcu_data();
536 if (cpu < 0 || cpus_array_len <= cpu) {
537 if (!warned) {
538 fprintf(stderr, "[error] liburcu: set CPU # out of range\n");
539 warned = 1;
540 }
541 call_rcu_unlock(&call_rcu_mutex);
542 errno = EINVAL;
543 return -EINVAL;
544 }
545
546 if (per_cpu_call_rcu_data == NULL) {
547 call_rcu_unlock(&call_rcu_mutex);
548 errno = ENOMEM;
549 return -ENOMEM;
550 }
551
552 if (per_cpu_call_rcu_data[cpu] != NULL && crdp != NULL) {
553 call_rcu_unlock(&call_rcu_mutex);
554 errno = EEXIST;
555 return -EEXIST;
556 }
557
558 rcu_set_pointer(&per_cpu_call_rcu_data[cpu], crdp);
559 call_rcu_unlock(&call_rcu_mutex);
560 return 0;
561}
562
563/*
564 * Return a pointer to the default call_rcu_data structure, creating
565 * one if need be. Because we never free call_rcu_data structures,
566 * we don't need to be in an RCU read-side critical section.
567 */
568
569struct call_rcu_data *get_default_call_rcu_data(void)
570{
571 if (default_call_rcu_data != NULL)
572 return rcu_dereference(default_call_rcu_data);
573 call_rcu_lock(&call_rcu_mutex);
574 if (default_call_rcu_data != NULL) {
575 call_rcu_unlock(&call_rcu_mutex);
576 return default_call_rcu_data;
577 }
578 call_rcu_data_init(&default_call_rcu_data, 0, -1);
579 call_rcu_unlock(&call_rcu_mutex);
580 return default_call_rcu_data;
581}
582
583/*
584 * Return the call_rcu_data structure that applies to the currently
585 * running thread. Any call_rcu_data structure assigned specifically
586 * to this thread has first priority, followed by any call_rcu_data
587 * structure assigned to the CPU on which the thread is running,
588 * followed by the default call_rcu_data structure. If there is not
589 * yet a default call_rcu_data structure, one will be created.
590 *
591 * Calls to this function and use of the returned call_rcu_data should
592 * be protected by RCU read-side lock.
593 */
594struct call_rcu_data *get_call_rcu_data(void)
595{
596 struct call_rcu_data *crd;
597
598 if (URCU_TLS(thread_call_rcu_data) != NULL)
599 return URCU_TLS(thread_call_rcu_data);
600
601 if (cpus_array_len > 0) {
602 crd = get_cpu_call_rcu_data(urcu_sched_getcpu());
603 if (crd)
604 return crd;
605 }
606
607 return get_default_call_rcu_data();
608}
609
610/*
611 * Return a pointer to this task's call_rcu_data if there is one.
612 */
613
614struct call_rcu_data *get_thread_call_rcu_data(void)
615{
616 return URCU_TLS(thread_call_rcu_data);
617}
618
619/*
620 * Set this task's call_rcu_data structure as specified, regardless
621 * of whether or not this task already had one. (This allows switching
622 * to and from real-time call_rcu threads, for example.)
623 *
624 * Use NULL to remove a thread's call_rcu_data structure, but it is
625 * the caller's responsibility to dispose of the removed structure.
626 * Use get_thread_call_rcu_data() to obtain a pointer to the old structure
627 * (prior to NULLing it out, of course).
628 */
629
630void set_thread_call_rcu_data(struct call_rcu_data *crdp)
631{
632 URCU_TLS(thread_call_rcu_data) = crdp;
633}
634
635/*
636 * Create a separate call_rcu thread for each CPU. This does not
637 * replace a pre-existing call_rcu thread -- use the set_cpu_call_rcu_data()
638 * function if you want that behavior. Should be paired with
639 * free_all_cpu_call_rcu_data() to teardown these call_rcu worker
640 * threads.
641 */
642
643int create_all_cpu_call_rcu_data(unsigned long flags)
644{
645 int i;
646 struct call_rcu_data *crdp;
647 int ret;
648
649 call_rcu_lock(&call_rcu_mutex);
650 alloc_cpu_call_rcu_data();
651 call_rcu_unlock(&call_rcu_mutex);
652 if (cpus_array_len <= 0) {
653 errno = EINVAL;
654 return -EINVAL;
655 }
656 if (per_cpu_call_rcu_data == NULL) {
657 errno = ENOMEM;
658 return -ENOMEM;
659 }
660 for (i = 0; i < cpus_array_len; i++) {
661 call_rcu_lock(&call_rcu_mutex);
662 if (get_cpu_call_rcu_data(i)) {
663 call_rcu_unlock(&call_rcu_mutex);
664 continue;
665 }
666 crdp = __create_call_rcu_data(flags, i);
667 if (crdp == NULL) {
668 call_rcu_unlock(&call_rcu_mutex);
669 errno = ENOMEM;
670 return -ENOMEM;
671 }
672 call_rcu_unlock(&call_rcu_mutex);
673 if ((ret = set_cpu_call_rcu_data(i, crdp)) != 0) {
674 call_rcu_data_free(crdp);
675
676 /* it has been created by other thread */
677 if (ret == -EEXIST)
678 continue;
679
680 return ret;
681 }
682 }
683 return 0;
684}
685
686/*
687 * Wake up the call_rcu thread corresponding to the specified
688 * call_rcu_data structure.
689 */
690static void wake_call_rcu_thread(struct call_rcu_data *crdp)
691{
692 if (!(_CMM_LOAD_SHARED(crdp->flags) & URCU_CALL_RCU_RT))
693 call_rcu_wake_up(crdp);
694}
695
696static void _call_rcu(struct rcu_head *head,
697 void (*func)(struct rcu_head *head),
698 struct call_rcu_data *crdp)
699{
700 cds_wfcq_node_init(&head->next);
701 head->func = func;
702 cds_wfcq_enqueue(&crdp->cbs_head, &crdp->cbs_tail, &head->next);
703 uatomic_inc(&crdp->qlen);
704 wake_call_rcu_thread(crdp);
705}
706
707/*
708 * Schedule a function to be invoked after a following grace period.
709 * This is the only function that must be called -- the others are
710 * only present to allow applications to tune their use of RCU for
711 * maximum performance.
712 *
713 * Note that unless a call_rcu thread has not already been created,
714 * the first invocation of call_rcu() will create one. So, if you
715 * need the first invocation of call_rcu() to be fast, make sure
716 * to create a call_rcu thread first. One way to accomplish this is
717 * "get_call_rcu_data();", and another is create_all_cpu_call_rcu_data().
718 *
719 * call_rcu must be called by registered RCU read-side threads.
720 */
721void call_rcu(struct rcu_head *head,
722 void (*func)(struct rcu_head *head))
723{
724 struct call_rcu_data *crdp;
725
726 /* Holding rcu read-side lock across use of per-cpu crdp */
727 _rcu_read_lock();
728 crdp = get_call_rcu_data();
729 _call_rcu(head, func, crdp);
730 _rcu_read_unlock();
731}
732
733/*
734 * Free up the specified call_rcu_data structure, terminating the
735 * associated call_rcu thread. The caller must have previously
736 * removed the call_rcu_data structure from per-thread or per-CPU
737 * usage. For example, set_cpu_call_rcu_data(cpu, NULL) for per-CPU
738 * call_rcu_data structures or set_thread_call_rcu_data(NULL) for
739 * per-thread call_rcu_data structures.
740 *
741 * We silently refuse to free up the default call_rcu_data structure
742 * because that is where we put any leftover callbacks. Note that
743 * the possibility of self-spawning callbacks makes it impossible
744 * to execute all the callbacks in finite time without putting any
745 * newly spawned callbacks somewhere else. The "somewhere else" of
746 * last resort is the default call_rcu_data structure.
747 *
748 * We also silently refuse to free NULL pointers. This simplifies
749 * the calling code.
750 *
751 * The caller must wait for a grace-period to pass between return from
752 * set_cpu_call_rcu_data() and call to call_rcu_data_free() passing the
753 * previous call rcu data as argument.
754 *
755 * Note: introducing __cds_wfcq_splice_blocking() in this function fixed
756 * a list corruption bug in the 0.7.x series. The equivalent fix
757 * appeared in 0.6.8 for the stable-0.6 branch.
758 */
759void call_rcu_data_free(struct call_rcu_data *crdp)
760{
761 if (crdp == NULL || crdp == default_call_rcu_data) {
762 return;
763 }
764 if ((uatomic_read(&crdp->flags) & URCU_CALL_RCU_STOPPED) == 0) {
765 uatomic_or(&crdp->flags, URCU_CALL_RCU_STOP);
766 wake_call_rcu_thread(crdp);
767 while ((uatomic_read(&crdp->flags) & URCU_CALL_RCU_STOPPED) == 0)
768 (void) poll(NULL, 0, 1);
769 }
770 call_rcu_lock(&call_rcu_mutex);
771 if (!cds_wfcq_empty(&crdp->cbs_head, &crdp->cbs_tail)) {
772 call_rcu_unlock(&call_rcu_mutex);
773 /* Create default call rcu data if need be. */
774 /* CBs queued here will be handed to the default list. */
775 (void) get_default_call_rcu_data();
776 call_rcu_lock(&call_rcu_mutex);
777 __cds_wfcq_splice_blocking(&default_call_rcu_data->cbs_head,
778 &default_call_rcu_data->cbs_tail,
779 &crdp->cbs_head, &crdp->cbs_tail);
780 uatomic_add(&default_call_rcu_data->qlen,
781 uatomic_read(&crdp->qlen));
782 wake_call_rcu_thread(default_call_rcu_data);
783 }
784
785 cds_list_del(&crdp->list);
786 call_rcu_unlock(&call_rcu_mutex);
787
788 free(crdp);
789}
790
791/*
792 * Clean up all the per-CPU call_rcu threads.
793 */
794void free_all_cpu_call_rcu_data(void)
795{
796 int cpu;
797 struct call_rcu_data **crdp;
798 static int warned = 0;
799
800 if (cpus_array_len <= 0)
801 return;
802
803 crdp = malloc(sizeof(*crdp) * cpus_array_len);
804 if (!crdp) {
805 if (!warned) {
806 fprintf(stderr, "[error] liburcu: unable to allocate per-CPU pointer array\n");
807 }
808 warned = 1;
809 return;
810 }
811
812 for (cpu = 0; cpu < cpus_array_len; cpu++) {
813 crdp[cpu] = get_cpu_call_rcu_data(cpu);
814 if (crdp[cpu] == NULL)
815 continue;
816 set_cpu_call_rcu_data(cpu, NULL);
817 }
818 /*
819 * Wait for call_rcu sites acting as RCU readers of the
820 * call_rcu_data to become quiescent.
821 */
822 synchronize_rcu();
823 for (cpu = 0; cpu < cpus_array_len; cpu++) {
824 if (crdp[cpu] == NULL)
825 continue;
826 call_rcu_data_free(crdp[cpu]);
827 }
828 free(crdp);
829}
830
831static
832void free_completion(struct urcu_ref *ref)
833{
834 struct call_rcu_completion *completion;
835
836 completion = caa_container_of(ref, struct call_rcu_completion, ref);
837 free(completion);
838}
839
840static
841void _rcu_barrier_complete(struct rcu_head *head)
842{
843 struct call_rcu_completion_work *work;
844 struct call_rcu_completion *completion;
845
846 work = caa_container_of(head, struct call_rcu_completion_work, head);
847 completion = work->completion;
848 if (!uatomic_sub_return(&completion->barrier_count, 1))
849 call_rcu_completion_wake_up(completion);
850 urcu_ref_put(&completion->ref, free_completion);
851 free(work);
852}
853
854/*
855 * Wait for all in-flight call_rcu callbacks to complete execution.
856 */
857void rcu_barrier(void)
858{
859 struct call_rcu_data *crdp;
860 struct call_rcu_completion *completion;
861 int count = 0;
862 int was_online;
863
864 /* Put in offline state in QSBR. */
865 was_online = _rcu_read_ongoing();
866 if (was_online)
867 rcu_thread_offline();
868 /*
869 * Calling a rcu_barrier() within a RCU read-side critical
870 * section is an error.
871 */
872 if (_rcu_read_ongoing()) {
873 static int warned = 0;
874
875 if (!warned) {
876 fprintf(stderr, "[error] liburcu: rcu_barrier() called from within RCU read-side critical section.\n");
877 }
878 warned = 1;
879 goto online;
880 }
881
882 completion = calloc(sizeof(*completion), 1);
883 if (!completion)
884 urcu_die(errno);
885
886 call_rcu_lock(&call_rcu_mutex);
887 cds_list_for_each_entry(crdp, &call_rcu_data_list, list)
888 count++;
889
890 /* Referenced by rcu_barrier() and each call_rcu thread. */
891 urcu_ref_set(&completion->ref, count + 1);
892 completion->barrier_count = count;
893
894 cds_list_for_each_entry(crdp, &call_rcu_data_list, list) {
895 struct call_rcu_completion_work *work;
896
897 work = calloc(sizeof(*work), 1);
898 if (!work)
899 urcu_die(errno);
900 work->completion = completion;
901 _call_rcu(&work->head, _rcu_barrier_complete, crdp);
902 }
903 call_rcu_unlock(&call_rcu_mutex);
904
905 /* Wait for them */
906 for (;;) {
907 uatomic_dec(&completion->futex);
908 /* Decrement futex before reading barrier_count */
909 cmm_smp_mb();
910 if (!uatomic_read(&completion->barrier_count))
911 break;
912 call_rcu_completion_wait(completion);
913 }
914
915 urcu_ref_put(&completion->ref, free_completion);
916
917online:
918 if (was_online)
919 rcu_thread_online();
920}
921
922/*
923 * Acquire the call_rcu_mutex in order to ensure that the child sees
924 * all of the call_rcu() data structures in a consistent state. Ensure
925 * that all call_rcu threads are in a quiescent state across fork.
926 * Suitable for pthread_atfork() and friends.
927 */
928void call_rcu_before_fork(void)
929{
930 struct call_rcu_data *crdp;
931 struct urcu_atfork *atfork;
932
933 call_rcu_lock(&call_rcu_mutex);
934
935 atfork = registered_rculfhash_atfork;
936 if (atfork)
937 atfork->before_fork(atfork->priv);
938
939 cds_list_for_each_entry(crdp, &call_rcu_data_list, list) {
940 uatomic_or(&crdp->flags, URCU_CALL_RCU_PAUSE);
941 cmm_smp_mb__after_uatomic_or();
942 wake_call_rcu_thread(crdp);
943 }
944 cds_list_for_each_entry(crdp, &call_rcu_data_list, list) {
945 while ((uatomic_read(&crdp->flags) & URCU_CALL_RCU_PAUSED) == 0)
946 (void) poll(NULL, 0, 1);
947 }
948}
949
950/*
951 * Clean up call_rcu data structures in the parent of a successful fork()
952 * that is not followed by exec() in the child. Suitable for
953 * pthread_atfork() and friends.
954 */
955void call_rcu_after_fork_parent(void)
956{
957 struct call_rcu_data *crdp;
958 struct urcu_atfork *atfork;
959
960 cds_list_for_each_entry(crdp, &call_rcu_data_list, list)
961 uatomic_and(&crdp->flags, ~URCU_CALL_RCU_PAUSE);
962 cds_list_for_each_entry(crdp, &call_rcu_data_list, list) {
963 while ((uatomic_read(&crdp->flags) & URCU_CALL_RCU_PAUSED) != 0)
964 (void) poll(NULL, 0, 1);
965 }
966 atfork = registered_rculfhash_atfork;
967 if (atfork)
968 atfork->after_fork_parent(atfork->priv);
969 call_rcu_unlock(&call_rcu_mutex);
970}
971
972/*
973 * Clean up call_rcu data structures in the child of a successful fork()
974 * that is not followed by exec(). Suitable for pthread_atfork() and
975 * friends.
976 */
977void call_rcu_after_fork_child(void)
978{
979 struct call_rcu_data *crdp, *next;
980 struct urcu_atfork *atfork;
981
982 /* Release the mutex. */
983 call_rcu_unlock(&call_rcu_mutex);
984
985 atfork = registered_rculfhash_atfork;
986 if (atfork)
987 atfork->after_fork_child(atfork->priv);
988
989 /* Do nothing when call_rcu() has not been used */
990 if (cds_list_empty(&call_rcu_data_list))
991 return;
992
993 /*
994 * Allocate a new default call_rcu_data structure in order
995 * to get a working call_rcu thread to go with it.
996 */
997 default_call_rcu_data = NULL;
998 (void)get_default_call_rcu_data();
999
1000 /* Cleanup call_rcu_data pointers before use */
1001 cpus_array_len_reset();
1002 free(per_cpu_call_rcu_data);
1003 rcu_set_pointer(&per_cpu_call_rcu_data, NULL);
1004 URCU_TLS(thread_call_rcu_data) = NULL;
1005
1006 /*
1007 * Dispose of all of the rest of the call_rcu_data structures.
1008 * Leftover call_rcu callbacks will be merged into the new
1009 * default call_rcu thread queue.
1010 */
1011 cds_list_for_each_entry_safe(crdp, next, &call_rcu_data_list, list) {
1012 if (crdp == default_call_rcu_data)
1013 continue;
1014 uatomic_set(&crdp->flags, URCU_CALL_RCU_STOPPED);
1015 call_rcu_data_free(crdp);
1016 }
1017}
1018
1019void urcu_register_rculfhash_atfork(struct urcu_atfork *atfork)
1020{
1021 call_rcu_lock(&call_rcu_mutex);
1022 if (registered_rculfhash_atfork_refcount++)
1023 goto end;
1024 registered_rculfhash_atfork = atfork;
1025end:
1026 call_rcu_unlock(&call_rcu_mutex);
1027}
1028
1029void urcu_unregister_rculfhash_atfork(struct urcu_atfork *atfork __attribute__((unused)))
1030{
1031 call_rcu_lock(&call_rcu_mutex);
1032 if (--registered_rculfhash_atfork_refcount)
1033 goto end;
1034 registered_rculfhash_atfork = NULL;
1035end:
1036 call_rcu_unlock(&call_rcu_mutex);
1037}
This page took 0.040205 seconds and 4 git commands to generate.