c01f02b2f5c7400edf8c9ed191c87a9cc5b9d52c
[urcu.git] / src / urcu-call-rcu-impl.h
1 /*
2 * urcu-call-rcu.c
3 *
4 * Userspace RCU library - batch memory reclamation with kernel API
5 *
6 * Copyright (c) 2010 Paul E. McKenney <paulmck@linux.vnet.ibm.com>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23 #define _LGPL_SOURCE
24 #include <stdio.h>
25 #include <pthread.h>
26 #include <signal.h>
27 #include <assert.h>
28 #include <stdlib.h>
29 #include <stdint.h>
30 #include <string.h>
31 #include <errno.h>
32 #include <poll.h>
33 #include <sys/time.h>
34 #include <unistd.h>
35 #include <sched.h>
36
37 #include "compat-getcpu.h"
38 #include <urcu/wfcqueue.h>
39 #include <urcu/call-rcu.h>
40 #include <urcu/pointer.h>
41 #include <urcu/list.h>
42 #include <urcu/futex.h>
43 #include <urcu/tls-compat.h>
44 #include <urcu/ref.h>
45 #include "urcu-die.h"
46 #include "urcu-utils.h"
47 #include "compat-smp.h"
48
49 #define SET_AFFINITY_CHECK_PERIOD (1U << 8) /* 256 */
50 #define SET_AFFINITY_CHECK_PERIOD_MASK (SET_AFFINITY_CHECK_PERIOD - 1)
51
52 /* Data structure that identifies a call_rcu thread. */
53
54 struct call_rcu_data {
55 /*
56 * We do not align head on a different cache-line than tail
57 * mainly because call_rcu callback-invocation threads use
58 * batching ("splice") to get an entire list of callbacks, which
59 * effectively empties the queue, and requires to touch the tail
60 * anyway.
61 */
62 struct cds_wfcq_tail cbs_tail;
63 struct cds_wfcq_head cbs_head;
64 unsigned long flags;
65 int32_t futex;
66 unsigned long qlen; /* maintained for debugging. */
67 pthread_t tid;
68 int cpu_affinity;
69 unsigned long gp_count;
70 struct cds_list_head list;
71 } __attribute__((aligned(CAA_CACHE_LINE_SIZE)));
72
73 struct call_rcu_completion {
74 int barrier_count;
75 int32_t futex;
76 struct urcu_ref ref;
77 };
78
79 struct call_rcu_completion_work {
80 struct rcu_head head;
81 struct call_rcu_completion *completion;
82 };
83
84 enum crdf_flags {
85 CRDF_FLAG_JOIN_THREAD = (1 << 0),
86 };
87
88 /*
89 * List of all call_rcu_data structures to keep valgrind happy.
90 * Protected by call_rcu_mutex.
91 */
92
93 static CDS_LIST_HEAD(call_rcu_data_list);
94
95 /* Link a thread using call_rcu() to its call_rcu thread. */
96
97 static DEFINE_URCU_TLS(struct call_rcu_data *, thread_call_rcu_data);
98
99 /*
100 * Guard call_rcu thread creation and atfork handlers.
101 */
102 static pthread_mutex_t call_rcu_mutex = PTHREAD_MUTEX_INITIALIZER;
103
104 /* If a given thread does not have its own call_rcu thread, this is default. */
105
106 static struct call_rcu_data *default_call_rcu_data;
107
108 static struct urcu_atfork *registered_rculfhash_atfork;
109
110 /*
111 * If the sched_getcpu() and sysconf(_SC_NPROCESSORS_CONF) calls are
112 * available, then we can have call_rcu threads assigned to individual
113 * CPUs rather than only to specific threads.
114 */
115
116 #if defined(HAVE_SYSCONF) && (defined(HAVE_SCHED_GETCPU) || defined(HAVE_GETCPUID))
117
118 /*
119 * Pointer to array of pointers to per-CPU call_rcu_data structures
120 * and # CPUs. per_cpu_call_rcu_data is a RCU-protected pointer to an
121 * array of RCU-protected pointers to call_rcu_data. call_rcu acts as a
122 * RCU read-side and reads per_cpu_call_rcu_data and the per-cpu pointer
123 * without mutex. The call_rcu_mutex protects updates.
124 */
125
126 static struct call_rcu_data **per_cpu_call_rcu_data;
127 static long cpus_array_len;
128
129 static void cpus_array_len_reset(void)
130 {
131 cpus_array_len = 0;
132 }
133
134 /* Allocate the array if it has not already been allocated. */
135
136 static void alloc_cpu_call_rcu_data(void)
137 {
138 struct call_rcu_data **p;
139 static int warned = 0;
140
141 if (cpus_array_len != 0)
142 return;
143 cpus_array_len = get_possible_cpus_array_len();
144 if (cpus_array_len <= 0) {
145 return;
146 }
147 p = malloc(cpus_array_len * sizeof(*per_cpu_call_rcu_data));
148 if (p != NULL) {
149 memset(p, '\0', cpus_array_len * sizeof(*per_cpu_call_rcu_data));
150 rcu_set_pointer(&per_cpu_call_rcu_data, p);
151 } else {
152 if (!warned) {
153 fprintf(stderr, "[error] liburcu: unable to allocate per-CPU pointer array\n");
154 }
155 warned = 1;
156 }
157 }
158
159 #else /* #if defined(HAVE_SYSCONF) && defined(HAVE_SCHED_GETCPU) */
160
161 /*
162 * per_cpu_call_rcu_data should be constant, but some functions below, used both
163 * for cases where cpu number is available and not available, assume it it not
164 * constant.
165 */
166 static struct call_rcu_data **per_cpu_call_rcu_data = NULL;
167 static const long cpus_array_len = -1;
168
169 static void cpus_array_len_reset(void)
170 {
171 }
172
173 static void alloc_cpu_call_rcu_data(void)
174 {
175 }
176
177 #endif /* #else #if defined(HAVE_SYSCONF) && defined(HAVE_SCHED_GETCPU) */
178
179 /* Acquire the specified pthread mutex. */
180
181 static void call_rcu_lock(pthread_mutex_t *pmp)
182 {
183 int ret;
184
185 ret = pthread_mutex_lock(pmp);
186 if (ret)
187 urcu_die(ret);
188 }
189
190 /* Release the specified pthread mutex. */
191
192 static void call_rcu_unlock(pthread_mutex_t *pmp)
193 {
194 int ret;
195
196 ret = pthread_mutex_unlock(pmp);
197 if (ret)
198 urcu_die(ret);
199 }
200
201 /*
202 * Periodically retry setting CPU affinity if we migrate.
203 * Losing affinity can be caused by CPU hotunplug/hotplug, or by
204 * cpuset(7).
205 */
206 #ifdef HAVE_SCHED_SETAFFINITY
207 static
208 int set_thread_cpu_affinity(struct call_rcu_data *crdp)
209 {
210 cpu_set_t mask;
211 int ret;
212
213 if (crdp->cpu_affinity < 0)
214 return 0;
215 if (++crdp->gp_count & SET_AFFINITY_CHECK_PERIOD_MASK)
216 return 0;
217 if (urcu_sched_getcpu() == crdp->cpu_affinity)
218 return 0;
219
220 CPU_ZERO(&mask);
221 CPU_SET(crdp->cpu_affinity, &mask);
222 ret = sched_setaffinity(0, sizeof(mask), &mask);
223
224 /*
225 * EINVAL is fine: can be caused by hotunplugged CPUs, or by
226 * cpuset(7). This is why we should always retry if we detect
227 * migration.
228 */
229 if (ret && errno == EINVAL) {
230 ret = 0;
231 errno = 0;
232 }
233 return ret;
234 }
235 #else
236 static
237 int set_thread_cpu_affinity(struct call_rcu_data *crdp __attribute__((unused)))
238 {
239 return 0;
240 }
241 #endif
242
243 static void call_rcu_wait(struct call_rcu_data *crdp)
244 {
245 /* Read call_rcu list before read futex */
246 cmm_smp_mb();
247 while (uatomic_read(&crdp->futex) == -1) {
248 if (!futex_async(&crdp->futex, FUTEX_WAIT, -1, NULL, NULL, 0)) {
249 /*
250 * Prior queued wakeups queued by unrelated code
251 * using the same address can cause futex wait to
252 * return 0 even through the futex value is still
253 * -1 (spurious wakeups). Check the value again
254 * in user-space to validate whether it really
255 * differs from -1.
256 */
257 continue;
258 }
259 switch (errno) {
260 case EAGAIN:
261 /* Value already changed. */
262 return;
263 case EINTR:
264 /* Retry if interrupted by signal. */
265 break; /* Get out of switch. Check again. */
266 default:
267 /* Unexpected error. */
268 urcu_die(errno);
269 }
270 }
271 }
272
273 static void call_rcu_wake_up(struct call_rcu_data *crdp)
274 {
275 /* Write to call_rcu list before reading/writing futex */
276 cmm_smp_mb();
277 if (caa_unlikely(uatomic_read(&crdp->futex) == -1)) {
278 uatomic_set(&crdp->futex, 0);
279 if (futex_async(&crdp->futex, FUTEX_WAKE, 1,
280 NULL, NULL, 0) < 0)
281 urcu_die(errno);
282 }
283 }
284
285 static void call_rcu_completion_wait(struct call_rcu_completion *completion)
286 {
287 /* Read completion barrier count before read futex */
288 cmm_smp_mb();
289 while (uatomic_read(&completion->futex) == -1) {
290 if (!futex_async(&completion->futex, FUTEX_WAIT, -1, NULL, NULL, 0)) {
291 /*
292 * Prior queued wakeups queued by unrelated code
293 * using the same address can cause futex wait to
294 * return 0 even through the futex value is still
295 * -1 (spurious wakeups). Check the value again
296 * in user-space to validate whether it really
297 * differs from -1.
298 */
299 continue;
300 }
301 switch (errno) {
302 case EAGAIN:
303 /* Value already changed. */
304 return;
305 case EINTR:
306 /* Retry if interrupted by signal. */
307 break; /* Get out of switch. Check again. */
308 default:
309 /* Unexpected error. */
310 urcu_die(errno);
311 }
312 }
313 }
314
315 static void call_rcu_completion_wake_up(struct call_rcu_completion *completion)
316 {
317 /* Write to completion barrier count before reading/writing futex */
318 cmm_smp_mb();
319 if (caa_unlikely(uatomic_read(&completion->futex) == -1)) {
320 uatomic_set(&completion->futex, 0);
321 if (futex_async(&completion->futex, FUTEX_WAKE, 1,
322 NULL, NULL, 0) < 0)
323 urcu_die(errno);
324 }
325 }
326
327 /* This is the code run by each call_rcu thread. */
328
329 static void *call_rcu_thread(void *arg)
330 {
331 unsigned long cbcount;
332 struct call_rcu_data *crdp = (struct call_rcu_data *) arg;
333 int rt = !!(uatomic_read(&crdp->flags) & URCU_CALL_RCU_RT);
334
335 if (set_thread_cpu_affinity(crdp))
336 urcu_die(errno);
337
338 /*
339 * If callbacks take a read-side lock, we need to be registered.
340 */
341 rcu_register_thread();
342
343 URCU_TLS(thread_call_rcu_data) = crdp;
344 if (!rt) {
345 uatomic_dec(&crdp->futex);
346 /* Decrement futex before reading call_rcu list */
347 cmm_smp_mb();
348 }
349 for (;;) {
350 struct cds_wfcq_head cbs_tmp_head;
351 struct cds_wfcq_tail cbs_tmp_tail;
352 struct cds_wfcq_node *cbs, *cbs_tmp_n;
353 enum cds_wfcq_ret splice_ret;
354
355 if (set_thread_cpu_affinity(crdp))
356 urcu_die(errno);
357
358 if (uatomic_read(&crdp->flags) & URCU_CALL_RCU_PAUSE) {
359 /*
360 * Pause requested. Become quiescent: remove
361 * ourself from all global lists, and don't
362 * process any callback. The callback lists may
363 * still be non-empty though.
364 */
365 rcu_unregister_thread();
366 cmm_smp_mb__before_uatomic_or();
367 uatomic_or(&crdp->flags, URCU_CALL_RCU_PAUSED);
368 while ((uatomic_read(&crdp->flags) & URCU_CALL_RCU_PAUSE) != 0)
369 (void) poll(NULL, 0, 1);
370 uatomic_and(&crdp->flags, ~URCU_CALL_RCU_PAUSED);
371 cmm_smp_mb__after_uatomic_and();
372 rcu_register_thread();
373 }
374
375 cds_wfcq_init(&cbs_tmp_head, &cbs_tmp_tail);
376 splice_ret = __cds_wfcq_splice_blocking(&cbs_tmp_head,
377 &cbs_tmp_tail, &crdp->cbs_head, &crdp->cbs_tail);
378 assert(splice_ret != CDS_WFCQ_RET_WOULDBLOCK);
379 assert(splice_ret != CDS_WFCQ_RET_DEST_NON_EMPTY);
380 if (splice_ret != CDS_WFCQ_RET_SRC_EMPTY) {
381 synchronize_rcu();
382 cbcount = 0;
383 __cds_wfcq_for_each_blocking_safe(&cbs_tmp_head,
384 &cbs_tmp_tail, cbs, cbs_tmp_n) {
385 struct rcu_head *rhp;
386
387 rhp = caa_container_of(cbs,
388 struct rcu_head, next);
389 rhp->func(rhp);
390 cbcount++;
391 }
392 uatomic_sub(&crdp->qlen, cbcount);
393 }
394 if (uatomic_read(&crdp->flags) & URCU_CALL_RCU_STOP)
395 break;
396 rcu_thread_offline();
397 if (!rt) {
398 if (cds_wfcq_empty(&crdp->cbs_head,
399 &crdp->cbs_tail)) {
400 call_rcu_wait(crdp);
401 (void) poll(NULL, 0, 10);
402 uatomic_dec(&crdp->futex);
403 /*
404 * Decrement futex before reading
405 * call_rcu list.
406 */
407 cmm_smp_mb();
408 } else {
409 (void) poll(NULL, 0, 10);
410 }
411 } else {
412 (void) poll(NULL, 0, 10);
413 }
414 rcu_thread_online();
415 }
416 if (!rt) {
417 /*
418 * Read call_rcu list before write futex.
419 */
420 cmm_smp_mb();
421 uatomic_set(&crdp->futex, 0);
422 }
423 uatomic_or(&crdp->flags, URCU_CALL_RCU_STOPPED);
424 rcu_unregister_thread();
425 return NULL;
426 }
427
428 /*
429 * Create both a call_rcu thread and the corresponding call_rcu_data
430 * structure, linking the structure in as specified. Caller must hold
431 * call_rcu_mutex.
432 */
433
434 static void call_rcu_data_init(struct call_rcu_data **crdpp,
435 unsigned long flags,
436 int cpu_affinity)
437 {
438 struct call_rcu_data *crdp;
439 int ret;
440
441 crdp = malloc(sizeof(*crdp));
442 if (crdp == NULL)
443 urcu_die(errno);
444 memset(crdp, '\0', sizeof(*crdp));
445 cds_wfcq_init(&crdp->cbs_head, &crdp->cbs_tail);
446 crdp->qlen = 0;
447 crdp->futex = 0;
448 crdp->flags = flags;
449 cds_list_add(&crdp->list, &call_rcu_data_list);
450 crdp->cpu_affinity = cpu_affinity;
451 crdp->gp_count = 0;
452 cmm_smp_mb(); /* Structure initialized before pointer is planted. */
453 *crdpp = crdp;
454 ret = pthread_create(&crdp->tid, NULL, call_rcu_thread, crdp);
455 if (ret)
456 urcu_die(ret);
457 }
458
459 /*
460 * Return a pointer to the call_rcu_data structure for the specified
461 * CPU, returning NULL if there is none. We cannot automatically
462 * created it because the platform we are running on might not define
463 * urcu_sched_getcpu().
464 *
465 * The call to this function and use of the returned call_rcu_data
466 * should be protected by RCU read-side lock.
467 */
468
469 struct call_rcu_data *get_cpu_call_rcu_data(int cpu)
470 {
471 static int warned = 0;
472 struct call_rcu_data **pcpu_crdp;
473
474 pcpu_crdp = rcu_dereference(per_cpu_call_rcu_data);
475 if (pcpu_crdp == NULL)
476 return NULL;
477 if (!warned && cpus_array_len > 0 && (cpu < 0 || cpus_array_len <= cpu)) {
478 fprintf(stderr, "[error] liburcu: get CPU # out of range\n");
479 warned = 1;
480 }
481 if (cpu < 0 || cpus_array_len <= cpu)
482 return NULL;
483 return rcu_dereference(pcpu_crdp[cpu]);
484 }
485
486 /*
487 * Return the tid corresponding to the call_rcu thread whose
488 * call_rcu_data structure is specified.
489 */
490
491 pthread_t get_call_rcu_thread(struct call_rcu_data *crdp)
492 {
493 return crdp->tid;
494 }
495
496 /*
497 * Create a call_rcu_data structure (with thread) and return a pointer.
498 */
499
500 static struct call_rcu_data *__create_call_rcu_data(unsigned long flags,
501 int cpu_affinity)
502 {
503 struct call_rcu_data *crdp;
504
505 call_rcu_data_init(&crdp, flags, cpu_affinity);
506 return crdp;
507 }
508
509 struct call_rcu_data *create_call_rcu_data(unsigned long flags,
510 int cpu_affinity)
511 {
512 struct call_rcu_data *crdp;
513
514 call_rcu_lock(&call_rcu_mutex);
515 crdp = __create_call_rcu_data(flags, cpu_affinity);
516 call_rcu_unlock(&call_rcu_mutex);
517 return crdp;
518 }
519
520 /*
521 * Set the specified CPU to use the specified call_rcu_data structure.
522 *
523 * Use NULL to remove a CPU's call_rcu_data structure, but it is
524 * the caller's responsibility to dispose of the removed structure.
525 * Use get_cpu_call_rcu_data() to obtain a pointer to the old structure
526 * (prior to NULLing it out, of course).
527 *
528 * The caller must wait for a grace-period to pass between return from
529 * set_cpu_call_rcu_data() and call to call_rcu_data_free() passing the
530 * previous call rcu data as argument.
531 */
532
533 int set_cpu_call_rcu_data(int cpu, struct call_rcu_data *crdp)
534 {
535 static int warned = 0;
536
537 call_rcu_lock(&call_rcu_mutex);
538 alloc_cpu_call_rcu_data();
539 if (cpu < 0 || cpus_array_len <= cpu) {
540 if (!warned) {
541 fprintf(stderr, "[error] liburcu: set CPU # out of range\n");
542 warned = 1;
543 }
544 call_rcu_unlock(&call_rcu_mutex);
545 errno = EINVAL;
546 return -EINVAL;
547 }
548
549 if (per_cpu_call_rcu_data == NULL) {
550 call_rcu_unlock(&call_rcu_mutex);
551 errno = ENOMEM;
552 return -ENOMEM;
553 }
554
555 if (per_cpu_call_rcu_data[cpu] != NULL && crdp != NULL) {
556 call_rcu_unlock(&call_rcu_mutex);
557 errno = EEXIST;
558 return -EEXIST;
559 }
560
561 rcu_set_pointer(&per_cpu_call_rcu_data[cpu], crdp);
562 call_rcu_unlock(&call_rcu_mutex);
563 return 0;
564 }
565
566 /*
567 * Return a pointer to the default call_rcu_data structure, creating
568 * one if need be. Because we never free call_rcu_data structures,
569 * we don't need to be in an RCU read-side critical section.
570 */
571
572 struct call_rcu_data *get_default_call_rcu_data(void)
573 {
574 if (default_call_rcu_data != NULL)
575 return rcu_dereference(default_call_rcu_data);
576 call_rcu_lock(&call_rcu_mutex);
577 if (default_call_rcu_data != NULL) {
578 call_rcu_unlock(&call_rcu_mutex);
579 return default_call_rcu_data;
580 }
581 call_rcu_data_init(&default_call_rcu_data, 0, -1);
582 call_rcu_unlock(&call_rcu_mutex);
583 return default_call_rcu_data;
584 }
585
586 /*
587 * Return the call_rcu_data structure that applies to the currently
588 * running thread. Any call_rcu_data structure assigned specifically
589 * to this thread has first priority, followed by any call_rcu_data
590 * structure assigned to the CPU on which the thread is running,
591 * followed by the default call_rcu_data structure. If there is not
592 * yet a default call_rcu_data structure, one will be created.
593 *
594 * Calls to this function and use of the returned call_rcu_data should
595 * be protected by RCU read-side lock.
596 */
597 struct call_rcu_data *get_call_rcu_data(void)
598 {
599 struct call_rcu_data *crd;
600
601 if (URCU_TLS(thread_call_rcu_data) != NULL)
602 return URCU_TLS(thread_call_rcu_data);
603
604 if (cpus_array_len > 0) {
605 crd = get_cpu_call_rcu_data(urcu_sched_getcpu());
606 if (crd)
607 return crd;
608 }
609
610 return get_default_call_rcu_data();
611 }
612
613 /*
614 * Return a pointer to this task's call_rcu_data if there is one.
615 */
616
617 struct call_rcu_data *get_thread_call_rcu_data(void)
618 {
619 return URCU_TLS(thread_call_rcu_data);
620 }
621
622 /*
623 * Set this task's call_rcu_data structure as specified, regardless
624 * of whether or not this task already had one. (This allows switching
625 * to and from real-time call_rcu threads, for example.)
626 *
627 * Use NULL to remove a thread's call_rcu_data structure, but it is
628 * the caller's responsibility to dispose of the removed structure.
629 * Use get_thread_call_rcu_data() to obtain a pointer to the old structure
630 * (prior to NULLing it out, of course).
631 */
632
633 void set_thread_call_rcu_data(struct call_rcu_data *crdp)
634 {
635 URCU_TLS(thread_call_rcu_data) = crdp;
636 }
637
638 /*
639 * Create a separate call_rcu thread for each CPU. This does not
640 * replace a pre-existing call_rcu thread -- use the set_cpu_call_rcu_data()
641 * function if you want that behavior. Should be paired with
642 * free_all_cpu_call_rcu_data() to teardown these call_rcu worker
643 * threads.
644 */
645
646 int create_all_cpu_call_rcu_data(unsigned long flags)
647 {
648 int i;
649 struct call_rcu_data *crdp;
650 int ret;
651
652 call_rcu_lock(&call_rcu_mutex);
653 alloc_cpu_call_rcu_data();
654 call_rcu_unlock(&call_rcu_mutex);
655 if (cpus_array_len <= 0) {
656 errno = EINVAL;
657 return -EINVAL;
658 }
659 if (per_cpu_call_rcu_data == NULL) {
660 errno = ENOMEM;
661 return -ENOMEM;
662 }
663 for (i = 0; i < cpus_array_len; i++) {
664 call_rcu_lock(&call_rcu_mutex);
665 if (get_cpu_call_rcu_data(i)) {
666 call_rcu_unlock(&call_rcu_mutex);
667 continue;
668 }
669 crdp = __create_call_rcu_data(flags, i);
670 if (crdp == NULL) {
671 call_rcu_unlock(&call_rcu_mutex);
672 errno = ENOMEM;
673 return -ENOMEM;
674 }
675 call_rcu_unlock(&call_rcu_mutex);
676 if ((ret = set_cpu_call_rcu_data(i, crdp)) != 0) {
677 call_rcu_data_free(crdp);
678
679 /* it has been created by other thread */
680 if (ret == -EEXIST)
681 continue;
682
683 return ret;
684 }
685 }
686 return 0;
687 }
688
689 /*
690 * Wake up the call_rcu thread corresponding to the specified
691 * call_rcu_data structure.
692 */
693 static void wake_call_rcu_thread(struct call_rcu_data *crdp)
694 {
695 if (!(_CMM_LOAD_SHARED(crdp->flags) & URCU_CALL_RCU_RT))
696 call_rcu_wake_up(crdp);
697 }
698
699 static void _call_rcu(struct rcu_head *head,
700 void (*func)(struct rcu_head *head),
701 struct call_rcu_data *crdp)
702 {
703 cds_wfcq_node_init(&head->next);
704 head->func = func;
705 cds_wfcq_enqueue(&crdp->cbs_head, &crdp->cbs_tail, &head->next);
706 uatomic_inc(&crdp->qlen);
707 wake_call_rcu_thread(crdp);
708 }
709
710 /*
711 * Schedule a function to be invoked after a following grace period.
712 * This is the only function that must be called -- the others are
713 * only present to allow applications to tune their use of RCU for
714 * maximum performance.
715 *
716 * Note that unless a call_rcu thread has not already been created,
717 * the first invocation of call_rcu() will create one. So, if you
718 * need the first invocation of call_rcu() to be fast, make sure
719 * to create a call_rcu thread first. One way to accomplish this is
720 * "get_call_rcu_data();", and another is create_all_cpu_call_rcu_data().
721 *
722 * call_rcu must be called by registered RCU read-side threads.
723 */
724 void call_rcu(struct rcu_head *head,
725 void (*func)(struct rcu_head *head))
726 {
727 struct call_rcu_data *crdp;
728
729 /* Holding rcu read-side lock across use of per-cpu crdp */
730 _rcu_read_lock();
731 crdp = get_call_rcu_data();
732 _call_rcu(head, func, crdp);
733 _rcu_read_unlock();
734 }
735
736 /*
737 * Free up the specified call_rcu_data structure, terminating the
738 * associated call_rcu thread. The caller must have previously
739 * removed the call_rcu_data structure from per-thread or per-CPU
740 * usage. For example, set_cpu_call_rcu_data(cpu, NULL) for per-CPU
741 * call_rcu_data structures or set_thread_call_rcu_data(NULL) for
742 * per-thread call_rcu_data structures.
743 *
744 * We silently refuse to free up the default call_rcu_data structure
745 * because that is where we put any leftover callbacks. Note that
746 * the possibility of self-spawning callbacks makes it impossible
747 * to execute all the callbacks in finite time without putting any
748 * newly spawned callbacks somewhere else. The "somewhere else" of
749 * last resort is the default call_rcu_data structure.
750 *
751 * We also silently refuse to free NULL pointers. This simplifies
752 * the calling code.
753 *
754 * The caller must wait for a grace-period to pass between return from
755 * set_cpu_call_rcu_data() and call to call_rcu_data_free() passing the
756 * previous call rcu data as argument.
757 *
758 * Note: introducing __cds_wfcq_splice_blocking() in this function fixed
759 * a list corruption bug in the 0.7.x series. The equivalent fix
760 * appeared in 0.6.8 for the stable-0.6 branch.
761 */
762 static
763 void _call_rcu_data_free(struct call_rcu_data *crdp, unsigned int flags)
764 {
765 if (crdp == NULL || crdp == default_call_rcu_data) {
766 return;
767 }
768 if ((uatomic_read(&crdp->flags) & URCU_CALL_RCU_STOPPED) == 0) {
769 uatomic_or(&crdp->flags, URCU_CALL_RCU_STOP);
770 wake_call_rcu_thread(crdp);
771 while ((uatomic_read(&crdp->flags) & URCU_CALL_RCU_STOPPED) == 0)
772 (void) poll(NULL, 0, 1);
773 }
774 call_rcu_lock(&call_rcu_mutex);
775 if (!cds_wfcq_empty(&crdp->cbs_head, &crdp->cbs_tail)) {
776 call_rcu_unlock(&call_rcu_mutex);
777 /* Create default call rcu data if need be. */
778 /* CBs queued here will be handed to the default list. */
779 (void) get_default_call_rcu_data();
780 call_rcu_lock(&call_rcu_mutex);
781 __cds_wfcq_splice_blocking(&default_call_rcu_data->cbs_head,
782 &default_call_rcu_data->cbs_tail,
783 &crdp->cbs_head, &crdp->cbs_tail);
784 uatomic_add(&default_call_rcu_data->qlen,
785 uatomic_read(&crdp->qlen));
786 wake_call_rcu_thread(default_call_rcu_data);
787 }
788
789 cds_list_del(&crdp->list);
790 call_rcu_unlock(&call_rcu_mutex);
791
792 if (flags & CRDF_FLAG_JOIN_THREAD) {
793 int ret;
794
795 ret = pthread_join(get_call_rcu_thread(crdp), NULL);
796 if (ret)
797 urcu_die(ret);
798 }
799 free(crdp);
800 }
801
802 void call_rcu_data_free(struct call_rcu_data *crdp)
803 {
804 _call_rcu_data_free(crdp, CRDF_FLAG_JOIN_THREAD);
805 }
806
807 /*
808 * Clean up all the per-CPU call_rcu threads.
809 */
810 void free_all_cpu_call_rcu_data(void)
811 {
812 int cpu;
813 struct call_rcu_data **crdp;
814 static int warned = 0;
815
816 if (cpus_array_len <= 0)
817 return;
818
819 crdp = malloc(sizeof(*crdp) * cpus_array_len);
820 if (!crdp) {
821 if (!warned) {
822 fprintf(stderr, "[error] liburcu: unable to allocate per-CPU pointer array\n");
823 }
824 warned = 1;
825 return;
826 }
827
828 for (cpu = 0; cpu < cpus_array_len; cpu++) {
829 crdp[cpu] = get_cpu_call_rcu_data(cpu);
830 if (crdp[cpu] == NULL)
831 continue;
832 set_cpu_call_rcu_data(cpu, NULL);
833 }
834 /*
835 * Wait for call_rcu sites acting as RCU readers of the
836 * call_rcu_data to become quiescent.
837 */
838 synchronize_rcu();
839 for (cpu = 0; cpu < cpus_array_len; cpu++) {
840 if (crdp[cpu] == NULL)
841 continue;
842 call_rcu_data_free(crdp[cpu]);
843 }
844 free(crdp);
845 }
846
847 static
848 void free_completion(struct urcu_ref *ref)
849 {
850 struct call_rcu_completion *completion;
851
852 completion = caa_container_of(ref, struct call_rcu_completion, ref);
853 free(completion);
854 }
855
856 static
857 void _rcu_barrier_complete(struct rcu_head *head)
858 {
859 struct call_rcu_completion_work *work;
860 struct call_rcu_completion *completion;
861
862 work = caa_container_of(head, struct call_rcu_completion_work, head);
863 completion = work->completion;
864 if (!uatomic_sub_return(&completion->barrier_count, 1))
865 call_rcu_completion_wake_up(completion);
866 urcu_ref_put(&completion->ref, free_completion);
867 free(work);
868 }
869
870 /*
871 * Wait for all in-flight call_rcu callbacks to complete execution.
872 */
873 void rcu_barrier(void)
874 {
875 struct call_rcu_data *crdp;
876 struct call_rcu_completion *completion;
877 int count = 0;
878 int was_online;
879
880 /* Put in offline state in QSBR. */
881 was_online = _rcu_read_ongoing();
882 if (was_online)
883 rcu_thread_offline();
884 /*
885 * Calling a rcu_barrier() within a RCU read-side critical
886 * section is an error.
887 */
888 if (_rcu_read_ongoing()) {
889 static int warned = 0;
890
891 if (!warned) {
892 fprintf(stderr, "[error] liburcu: rcu_barrier() called from within RCU read-side critical section.\n");
893 }
894 warned = 1;
895 goto online;
896 }
897
898 completion = calloc(sizeof(*completion), 1);
899 if (!completion)
900 urcu_die(errno);
901
902 call_rcu_lock(&call_rcu_mutex);
903 cds_list_for_each_entry(crdp, &call_rcu_data_list, list)
904 count++;
905
906 /* Referenced by rcu_barrier() and each call_rcu thread. */
907 urcu_ref_set(&completion->ref, count + 1);
908 completion->barrier_count = count;
909
910 cds_list_for_each_entry(crdp, &call_rcu_data_list, list) {
911 struct call_rcu_completion_work *work;
912
913 work = calloc(sizeof(*work), 1);
914 if (!work)
915 urcu_die(errno);
916 work->completion = completion;
917 _call_rcu(&work->head, _rcu_barrier_complete, crdp);
918 }
919 call_rcu_unlock(&call_rcu_mutex);
920
921 /* Wait for them */
922 for (;;) {
923 uatomic_dec(&completion->futex);
924 /* Decrement futex before reading barrier_count */
925 cmm_smp_mb();
926 if (!uatomic_read(&completion->barrier_count))
927 break;
928 call_rcu_completion_wait(completion);
929 }
930
931 urcu_ref_put(&completion->ref, free_completion);
932
933 online:
934 if (was_online)
935 rcu_thread_online();
936 }
937
938 /*
939 * Acquire the call_rcu_mutex in order to ensure that the child sees
940 * all of the call_rcu() data structures in a consistent state. Ensure
941 * that all call_rcu threads are in a quiescent state across fork.
942 * Suitable for pthread_atfork() and friends.
943 */
944 void call_rcu_before_fork(void)
945 {
946 struct call_rcu_data *crdp;
947 struct urcu_atfork *atfork;
948
949 call_rcu_lock(&call_rcu_mutex);
950
951 atfork = registered_rculfhash_atfork;
952 if (atfork)
953 atfork->before_fork(atfork->priv);
954
955 cds_list_for_each_entry(crdp, &call_rcu_data_list, list) {
956 uatomic_or(&crdp->flags, URCU_CALL_RCU_PAUSE);
957 cmm_smp_mb__after_uatomic_or();
958 wake_call_rcu_thread(crdp);
959 }
960 cds_list_for_each_entry(crdp, &call_rcu_data_list, list) {
961 while ((uatomic_read(&crdp->flags) & URCU_CALL_RCU_PAUSED) == 0)
962 (void) poll(NULL, 0, 1);
963 }
964 }
965
966 /*
967 * Clean up call_rcu data structures in the parent of a successful fork()
968 * that is not followed by exec() in the child. Suitable for
969 * pthread_atfork() and friends.
970 */
971 void call_rcu_after_fork_parent(void)
972 {
973 struct call_rcu_data *crdp;
974 struct urcu_atfork *atfork;
975
976 cds_list_for_each_entry(crdp, &call_rcu_data_list, list)
977 uatomic_and(&crdp->flags, ~URCU_CALL_RCU_PAUSE);
978 cds_list_for_each_entry(crdp, &call_rcu_data_list, list) {
979 while ((uatomic_read(&crdp->flags) & URCU_CALL_RCU_PAUSED) != 0)
980 (void) poll(NULL, 0, 1);
981 }
982 atfork = registered_rculfhash_atfork;
983 if (atfork)
984 atfork->after_fork_parent(atfork->priv);
985 call_rcu_unlock(&call_rcu_mutex);
986 }
987
988 /*
989 * Clean up call_rcu data structures in the child of a successful fork()
990 * that is not followed by exec(). Suitable for pthread_atfork() and
991 * friends.
992 */
993 void call_rcu_after_fork_child(void)
994 {
995 struct call_rcu_data *crdp, *next;
996 struct urcu_atfork *atfork;
997
998 /* Release the mutex. */
999 call_rcu_unlock(&call_rcu_mutex);
1000
1001 atfork = registered_rculfhash_atfork;
1002 if (atfork)
1003 atfork->after_fork_child(atfork->priv);
1004
1005 /* Do nothing when call_rcu() has not been used */
1006 if (cds_list_empty(&call_rcu_data_list))
1007 return;
1008
1009 /*
1010 * Allocate a new default call_rcu_data structure in order
1011 * to get a working call_rcu thread to go with it.
1012 */
1013 default_call_rcu_data = NULL;
1014 (void)get_default_call_rcu_data();
1015
1016 /* Cleanup call_rcu_data pointers before use */
1017 cpus_array_len_reset();
1018 free(per_cpu_call_rcu_data);
1019 rcu_set_pointer(&per_cpu_call_rcu_data, NULL);
1020 URCU_TLS(thread_call_rcu_data) = NULL;
1021
1022 /*
1023 * Dispose of all of the rest of the call_rcu_data structures.
1024 * Leftover call_rcu callbacks will be merged into the new
1025 * default call_rcu thread queue.
1026 */
1027 cds_list_for_each_entry_safe(crdp, next, &call_rcu_data_list, list) {
1028 if (crdp == default_call_rcu_data)
1029 continue;
1030 uatomic_set(&crdp->flags, URCU_CALL_RCU_STOPPED);
1031 /*
1032 * Do not join the thread because it does not exist in
1033 * the child.
1034 */
1035 _call_rcu_data_free(crdp, 0);
1036 }
1037 }
1038
1039 void urcu_register_rculfhash_atfork(struct urcu_atfork *atfork)
1040 {
1041 if (CMM_LOAD_SHARED(registered_rculfhash_atfork))
1042 return;
1043 call_rcu_lock(&call_rcu_mutex);
1044 if (!registered_rculfhash_atfork)
1045 registered_rculfhash_atfork = atfork;
1046 call_rcu_unlock(&call_rcu_mutex);
1047 }
1048
1049 /*
1050 * This unregistration function is deprecated, meant only for internal
1051 * use by rculfhash.
1052 */
1053 __attribute__((noreturn))
1054 void urcu_unregister_rculfhash_atfork(struct urcu_atfork *atfork __attribute__((unused)))
1055 {
1056 urcu_die(EPERM);
1057 }
This page took 0.048169 seconds and 3 git commands to generate.