e34faee68c3c642e96cba3bfb06f242ab55228a8
[urcu.git] / src / urcu-call-rcu-impl.h
1 /*
2 * urcu-call-rcu.c
3 *
4 * Userspace RCU library - batch memory reclamation with kernel API
5 *
6 * Copyright (c) 2010 Paul E. McKenney <paulmck@linux.vnet.ibm.com>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23 #define _LGPL_SOURCE
24 #include <stdio.h>
25 #include <pthread.h>
26 #include <signal.h>
27 #include <assert.h>
28 #include <stdlib.h>
29 #include <stdint.h>
30 #include <string.h>
31 #include <errno.h>
32 #include <poll.h>
33 #include <sys/time.h>
34 #include <unistd.h>
35 #include <sched.h>
36
37 #include "compat-getcpu.h"
38 #include <urcu/wfcqueue.h>
39 #include <urcu/call-rcu.h>
40 #include <urcu/pointer.h>
41 #include <urcu/list.h>
42 #include <urcu/futex.h>
43 #include <urcu/tls-compat.h>
44 #include <urcu/ref.h>
45 #include "urcu-die.h"
46 #include "urcu-utils.h"
47 #include "compat-smp.h"
48
49 #define SET_AFFINITY_CHECK_PERIOD (1U << 8) /* 256 */
50 #define SET_AFFINITY_CHECK_PERIOD_MASK (SET_AFFINITY_CHECK_PERIOD - 1)
51
52 /* Data structure that identifies a call_rcu thread. */
53
54 struct call_rcu_data {
55 /*
56 * We do not align head on a different cache-line than tail
57 * mainly because call_rcu callback-invocation threads use
58 * batching ("splice") to get an entire list of callbacks, which
59 * effectively empties the queue, and requires to touch the tail
60 * anyway.
61 */
62 struct cds_wfcq_tail cbs_tail;
63 struct cds_wfcq_head cbs_head;
64 unsigned long flags;
65 int32_t futex;
66 unsigned long qlen; /* maintained for debugging. */
67 pthread_t tid;
68 int cpu_affinity;
69 unsigned long gp_count;
70 struct cds_list_head list;
71 } __attribute__((aligned(CAA_CACHE_LINE_SIZE)));
72
73 struct call_rcu_completion {
74 int barrier_count;
75 int32_t futex;
76 struct urcu_ref ref;
77 };
78
79 struct call_rcu_completion_work {
80 struct rcu_head head;
81 struct call_rcu_completion *completion;
82 };
83
84 enum crdf_flags {
85 CRDF_FLAG_JOIN_THREAD = (1 << 0),
86 };
87
88 /*
89 * List of all call_rcu_data structures to keep valgrind happy.
90 * Protected by call_rcu_mutex.
91 */
92
93 static CDS_LIST_HEAD(call_rcu_data_list);
94
95 /* Link a thread using call_rcu() to its call_rcu thread. */
96
97 static DEFINE_URCU_TLS(struct call_rcu_data *, thread_call_rcu_data);
98
99 /*
100 * Guard call_rcu thread creation and atfork handlers.
101 */
102 static pthread_mutex_t call_rcu_mutex = PTHREAD_MUTEX_INITIALIZER;
103
104 /* If a given thread does not have its own call_rcu thread, this is default. */
105
106 static struct call_rcu_data *default_call_rcu_data;
107
108 static struct urcu_atfork *registered_rculfhash_atfork;
109
110 /*
111 * If the sched_getcpu() and sysconf(_SC_NPROCESSORS_CONF) calls are
112 * available, then we can have call_rcu threads assigned to individual
113 * CPUs rather than only to specific threads.
114 */
115
116 #if defined(HAVE_SYSCONF) && (defined(HAVE_SCHED_GETCPU) || defined(HAVE_GETCPUID))
117
118 /*
119 * Pointer to array of pointers to per-CPU call_rcu_data structures
120 * and # CPUs. per_cpu_call_rcu_data is a RCU-protected pointer to an
121 * array of RCU-protected pointers to call_rcu_data. call_rcu acts as a
122 * RCU read-side and reads per_cpu_call_rcu_data and the per-cpu pointer
123 * without mutex. The call_rcu_mutex protects updates.
124 */
125
126 static struct call_rcu_data **per_cpu_call_rcu_data;
127 static long cpus_array_len;
128
129 static void cpus_array_len_reset(void)
130 {
131 cpus_array_len = 0;
132 }
133
134 /* Allocate the array if it has not already been allocated. */
135
136 static void alloc_cpu_call_rcu_data(void)
137 {
138 struct call_rcu_data **p;
139 static int warned = 0;
140
141 if (cpus_array_len != 0)
142 return;
143 cpus_array_len = get_possible_cpus_array_len();
144 if (cpus_array_len <= 0) {
145 return;
146 }
147 p = malloc(cpus_array_len * sizeof(*per_cpu_call_rcu_data));
148 if (p != NULL) {
149 memset(p, '\0', cpus_array_len * sizeof(*per_cpu_call_rcu_data));
150 rcu_set_pointer(&per_cpu_call_rcu_data, p);
151 } else {
152 if (!warned) {
153 fprintf(stderr, "[error] liburcu: unable to allocate per-CPU pointer array\n");
154 }
155 warned = 1;
156 }
157 }
158
159 #else /* #if defined(HAVE_SYSCONF) && defined(HAVE_SCHED_GETCPU) */
160
161 /*
162 * per_cpu_call_rcu_data should be constant, but some functions below, used both
163 * for cases where cpu number is available and not available, assume it it not
164 * constant.
165 */
166 static struct call_rcu_data **per_cpu_call_rcu_data = NULL;
167 static const long cpus_array_len = -1;
168
169 static void cpus_array_len_reset(void)
170 {
171 }
172
173 static void alloc_cpu_call_rcu_data(void)
174 {
175 }
176
177 #endif /* #else #if defined(HAVE_SYSCONF) && defined(HAVE_SCHED_GETCPU) */
178
179 /* Acquire the specified pthread mutex. */
180
181 static void call_rcu_lock(pthread_mutex_t *pmp)
182 {
183 int ret;
184
185 ret = pthread_mutex_lock(pmp);
186 if (ret)
187 urcu_die(ret);
188 }
189
190 /* Release the specified pthread mutex. */
191
192 static void call_rcu_unlock(pthread_mutex_t *pmp)
193 {
194 int ret;
195
196 ret = pthread_mutex_unlock(pmp);
197 if (ret)
198 urcu_die(ret);
199 }
200
201 /*
202 * Periodically retry setting CPU affinity if we migrate.
203 * Losing affinity can be caused by CPU hotunplug/hotplug, or by
204 * cpuset(7).
205 */
206 #ifdef HAVE_SCHED_SETAFFINITY
207 static
208 int set_thread_cpu_affinity(struct call_rcu_data *crdp)
209 {
210 cpu_set_t mask;
211 int ret;
212
213 if (crdp->cpu_affinity < 0)
214 return 0;
215 if (++crdp->gp_count & SET_AFFINITY_CHECK_PERIOD_MASK)
216 return 0;
217 if (urcu_sched_getcpu() == crdp->cpu_affinity)
218 return 0;
219
220 CPU_ZERO(&mask);
221 CPU_SET(crdp->cpu_affinity, &mask);
222 #if SCHED_SETAFFINITY_ARGS == 2
223 ret = sched_setaffinity(0, &mask);
224 #else
225 ret = sched_setaffinity(0, sizeof(mask), &mask);
226 #endif
227 /*
228 * EINVAL is fine: can be caused by hotunplugged CPUs, or by
229 * cpuset(7). This is why we should always retry if we detect
230 * migration.
231 */
232 if (ret && errno == EINVAL) {
233 ret = 0;
234 errno = 0;
235 }
236 return ret;
237 }
238 #else
239 static
240 int set_thread_cpu_affinity(struct call_rcu_data *crdp __attribute__((unused)))
241 {
242 return 0;
243 }
244 #endif
245
246 static void call_rcu_wait(struct call_rcu_data *crdp)
247 {
248 /* Read call_rcu list before read futex */
249 cmm_smp_mb();
250 while (uatomic_read(&crdp->futex) == -1) {
251 if (!futex_async(&crdp->futex, FUTEX_WAIT, -1, NULL, NULL, 0)) {
252 /*
253 * Prior queued wakeups queued by unrelated code
254 * using the same address can cause futex wait to
255 * return 0 even through the futex value is still
256 * -1 (spurious wakeups). Check the value again
257 * in user-space to validate whether it really
258 * differs from -1.
259 */
260 continue;
261 }
262 switch (errno) {
263 case EAGAIN:
264 /* Value already changed. */
265 return;
266 case EINTR:
267 /* Retry if interrupted by signal. */
268 break; /* Get out of switch. Check again. */
269 default:
270 /* Unexpected error. */
271 urcu_die(errno);
272 }
273 }
274 }
275
276 static void call_rcu_wake_up(struct call_rcu_data *crdp)
277 {
278 /* Write to call_rcu list before reading/writing futex */
279 cmm_smp_mb();
280 if (caa_unlikely(uatomic_read(&crdp->futex) == -1)) {
281 uatomic_set(&crdp->futex, 0);
282 if (futex_async(&crdp->futex, FUTEX_WAKE, 1,
283 NULL, NULL, 0) < 0)
284 urcu_die(errno);
285 }
286 }
287
288 static void call_rcu_completion_wait(struct call_rcu_completion *completion)
289 {
290 /* Read completion barrier count before read futex */
291 cmm_smp_mb();
292 while (uatomic_read(&completion->futex) == -1) {
293 if (!futex_async(&completion->futex, FUTEX_WAIT, -1, NULL, NULL, 0)) {
294 /*
295 * Prior queued wakeups queued by unrelated code
296 * using the same address can cause futex wait to
297 * return 0 even through the futex value is still
298 * -1 (spurious wakeups). Check the value again
299 * in user-space to validate whether it really
300 * differs from -1.
301 */
302 continue;
303 }
304 switch (errno) {
305 case EAGAIN:
306 /* Value already changed. */
307 return;
308 case EINTR:
309 /* Retry if interrupted by signal. */
310 break; /* Get out of switch. Check again. */
311 default:
312 /* Unexpected error. */
313 urcu_die(errno);
314 }
315 }
316 }
317
318 static void call_rcu_completion_wake_up(struct call_rcu_completion *completion)
319 {
320 /* Write to completion barrier count before reading/writing futex */
321 cmm_smp_mb();
322 if (caa_unlikely(uatomic_read(&completion->futex) == -1)) {
323 uatomic_set(&completion->futex, 0);
324 if (futex_async(&completion->futex, FUTEX_WAKE, 1,
325 NULL, NULL, 0) < 0)
326 urcu_die(errno);
327 }
328 }
329
330 /* This is the code run by each call_rcu thread. */
331
332 static void *call_rcu_thread(void *arg)
333 {
334 unsigned long cbcount;
335 struct call_rcu_data *crdp = (struct call_rcu_data *) arg;
336 int rt = !!(uatomic_read(&crdp->flags) & URCU_CALL_RCU_RT);
337
338 if (set_thread_cpu_affinity(crdp))
339 urcu_die(errno);
340
341 /*
342 * If callbacks take a read-side lock, we need to be registered.
343 */
344 rcu_register_thread();
345
346 URCU_TLS(thread_call_rcu_data) = crdp;
347 if (!rt) {
348 uatomic_dec(&crdp->futex);
349 /* Decrement futex before reading call_rcu list */
350 cmm_smp_mb();
351 }
352 for (;;) {
353 struct cds_wfcq_head cbs_tmp_head;
354 struct cds_wfcq_tail cbs_tmp_tail;
355 struct cds_wfcq_node *cbs, *cbs_tmp_n;
356 enum cds_wfcq_ret splice_ret;
357
358 if (set_thread_cpu_affinity(crdp))
359 urcu_die(errno);
360
361 if (uatomic_read(&crdp->flags) & URCU_CALL_RCU_PAUSE) {
362 /*
363 * Pause requested. Become quiescent: remove
364 * ourself from all global lists, and don't
365 * process any callback. The callback lists may
366 * still be non-empty though.
367 */
368 rcu_unregister_thread();
369 cmm_smp_mb__before_uatomic_or();
370 uatomic_or(&crdp->flags, URCU_CALL_RCU_PAUSED);
371 while ((uatomic_read(&crdp->flags) & URCU_CALL_RCU_PAUSE) != 0)
372 (void) poll(NULL, 0, 1);
373 uatomic_and(&crdp->flags, ~URCU_CALL_RCU_PAUSED);
374 cmm_smp_mb__after_uatomic_and();
375 rcu_register_thread();
376 }
377
378 cds_wfcq_init(&cbs_tmp_head, &cbs_tmp_tail);
379 splice_ret = __cds_wfcq_splice_blocking(&cbs_tmp_head,
380 &cbs_tmp_tail, &crdp->cbs_head, &crdp->cbs_tail);
381 assert(splice_ret != CDS_WFCQ_RET_WOULDBLOCK);
382 assert(splice_ret != CDS_WFCQ_RET_DEST_NON_EMPTY);
383 if (splice_ret != CDS_WFCQ_RET_SRC_EMPTY) {
384 synchronize_rcu();
385 cbcount = 0;
386 __cds_wfcq_for_each_blocking_safe(&cbs_tmp_head,
387 &cbs_tmp_tail, cbs, cbs_tmp_n) {
388 struct rcu_head *rhp;
389
390 rhp = caa_container_of(cbs,
391 struct rcu_head, next);
392 rhp->func(rhp);
393 cbcount++;
394 }
395 uatomic_sub(&crdp->qlen, cbcount);
396 }
397 if (uatomic_read(&crdp->flags) & URCU_CALL_RCU_STOP)
398 break;
399 rcu_thread_offline();
400 if (!rt) {
401 if (cds_wfcq_empty(&crdp->cbs_head,
402 &crdp->cbs_tail)) {
403 call_rcu_wait(crdp);
404 (void) poll(NULL, 0, 10);
405 uatomic_dec(&crdp->futex);
406 /*
407 * Decrement futex before reading
408 * call_rcu list.
409 */
410 cmm_smp_mb();
411 } else {
412 (void) poll(NULL, 0, 10);
413 }
414 } else {
415 (void) poll(NULL, 0, 10);
416 }
417 rcu_thread_online();
418 }
419 if (!rt) {
420 /*
421 * Read call_rcu list before write futex.
422 */
423 cmm_smp_mb();
424 uatomic_set(&crdp->futex, 0);
425 }
426 uatomic_or(&crdp->flags, URCU_CALL_RCU_STOPPED);
427 rcu_unregister_thread();
428 return NULL;
429 }
430
431 /*
432 * Create both a call_rcu thread and the corresponding call_rcu_data
433 * structure, linking the structure in as specified. Caller must hold
434 * call_rcu_mutex.
435 */
436
437 static void call_rcu_data_init(struct call_rcu_data **crdpp,
438 unsigned long flags,
439 int cpu_affinity)
440 {
441 struct call_rcu_data *crdp;
442 int ret;
443
444 crdp = malloc(sizeof(*crdp));
445 if (crdp == NULL)
446 urcu_die(errno);
447 memset(crdp, '\0', sizeof(*crdp));
448 cds_wfcq_init(&crdp->cbs_head, &crdp->cbs_tail);
449 crdp->qlen = 0;
450 crdp->futex = 0;
451 crdp->flags = flags;
452 cds_list_add(&crdp->list, &call_rcu_data_list);
453 crdp->cpu_affinity = cpu_affinity;
454 crdp->gp_count = 0;
455 cmm_smp_mb(); /* Structure initialized before pointer is planted. */
456 *crdpp = crdp;
457 ret = pthread_create(&crdp->tid, NULL, call_rcu_thread, crdp);
458 if (ret)
459 urcu_die(ret);
460 }
461
462 /*
463 * Return a pointer to the call_rcu_data structure for the specified
464 * CPU, returning NULL if there is none. We cannot automatically
465 * created it because the platform we are running on might not define
466 * urcu_sched_getcpu().
467 *
468 * The call to this function and use of the returned call_rcu_data
469 * should be protected by RCU read-side lock.
470 */
471
472 struct call_rcu_data *get_cpu_call_rcu_data(int cpu)
473 {
474 static int warned = 0;
475 struct call_rcu_data **pcpu_crdp;
476
477 pcpu_crdp = rcu_dereference(per_cpu_call_rcu_data);
478 if (pcpu_crdp == NULL)
479 return NULL;
480 if (!warned && cpus_array_len > 0 && (cpu < 0 || cpus_array_len <= cpu)) {
481 fprintf(stderr, "[error] liburcu: get CPU # out of range\n");
482 warned = 1;
483 }
484 if (cpu < 0 || cpus_array_len <= cpu)
485 return NULL;
486 return rcu_dereference(pcpu_crdp[cpu]);
487 }
488 URCU_ATTR_ALIAS(urcu_stringify(get_cpu_call_rcu_data))
489 struct call_rcu_data *alias_get_cpu_call_rcu_data();
490
491 /*
492 * Return the tid corresponding to the call_rcu thread whose
493 * call_rcu_data structure is specified.
494 */
495
496 pthread_t get_call_rcu_thread(struct call_rcu_data *crdp)
497 {
498 return crdp->tid;
499 }
500 URCU_ATTR_ALIAS(urcu_stringify(get_call_rcu_thread))
501 pthread_t alias_get_call_rcu_thread();
502
503 /*
504 * Create a call_rcu_data structure (with thread) and return a pointer.
505 */
506
507 static struct call_rcu_data *__create_call_rcu_data(unsigned long flags,
508 int cpu_affinity)
509 {
510 struct call_rcu_data *crdp;
511
512 call_rcu_data_init(&crdp, flags, cpu_affinity);
513 return crdp;
514 }
515
516 URCU_ATTR_ALIAS(urcu_stringify(create_call_rcu_data))
517 struct call_rcu_data *alias_create_call_rcu_data();
518 struct call_rcu_data *create_call_rcu_data(unsigned long flags,
519 int cpu_affinity)
520 {
521 struct call_rcu_data *crdp;
522
523 call_rcu_lock(&call_rcu_mutex);
524 crdp = __create_call_rcu_data(flags, cpu_affinity);
525 call_rcu_unlock(&call_rcu_mutex);
526 return crdp;
527 }
528
529 /*
530 * Set the specified CPU to use the specified call_rcu_data structure.
531 *
532 * Use NULL to remove a CPU's call_rcu_data structure, but it is
533 * the caller's responsibility to dispose of the removed structure.
534 * Use get_cpu_call_rcu_data() to obtain a pointer to the old structure
535 * (prior to NULLing it out, of course).
536 *
537 * The caller must wait for a grace-period to pass between return from
538 * set_cpu_call_rcu_data() and call to call_rcu_data_free() passing the
539 * previous call rcu data as argument.
540 */
541
542 int set_cpu_call_rcu_data(int cpu, struct call_rcu_data *crdp)
543 {
544 static int warned = 0;
545
546 call_rcu_lock(&call_rcu_mutex);
547 alloc_cpu_call_rcu_data();
548 if (cpu < 0 || cpus_array_len <= cpu) {
549 if (!warned) {
550 fprintf(stderr, "[error] liburcu: set CPU # out of range\n");
551 warned = 1;
552 }
553 call_rcu_unlock(&call_rcu_mutex);
554 errno = EINVAL;
555 return -EINVAL;
556 }
557
558 if (per_cpu_call_rcu_data == NULL) {
559 call_rcu_unlock(&call_rcu_mutex);
560 errno = ENOMEM;
561 return -ENOMEM;
562 }
563
564 if (per_cpu_call_rcu_data[cpu] != NULL && crdp != NULL) {
565 call_rcu_unlock(&call_rcu_mutex);
566 errno = EEXIST;
567 return -EEXIST;
568 }
569
570 rcu_set_pointer(&per_cpu_call_rcu_data[cpu], crdp);
571 call_rcu_unlock(&call_rcu_mutex);
572 return 0;
573 }
574 URCU_ATTR_ALIAS(urcu_stringify(set_cpu_call_rcu_data))
575 int alias_set_cpu_call_rcu_data();
576
577 /*
578 * Return a pointer to the default call_rcu_data structure, creating
579 * one if need be. Because we never free call_rcu_data structures,
580 * we don't need to be in an RCU read-side critical section.
581 */
582
583 struct call_rcu_data *get_default_call_rcu_data(void)
584 {
585 if (default_call_rcu_data != NULL)
586 return rcu_dereference(default_call_rcu_data);
587 call_rcu_lock(&call_rcu_mutex);
588 if (default_call_rcu_data != NULL) {
589 call_rcu_unlock(&call_rcu_mutex);
590 return default_call_rcu_data;
591 }
592 call_rcu_data_init(&default_call_rcu_data, 0, -1);
593 call_rcu_unlock(&call_rcu_mutex);
594 return default_call_rcu_data;
595 }
596 URCU_ATTR_ALIAS(urcu_stringify(get_default_call_rcu_data))
597 struct call_rcu_data *alias_get_default_call_rcu_data();
598
599 /*
600 * Return the call_rcu_data structure that applies to the currently
601 * running thread. Any call_rcu_data structure assigned specifically
602 * to this thread has first priority, followed by any call_rcu_data
603 * structure assigned to the CPU on which the thread is running,
604 * followed by the default call_rcu_data structure. If there is not
605 * yet a default call_rcu_data structure, one will be created.
606 *
607 * Calls to this function and use of the returned call_rcu_data should
608 * be protected by RCU read-side lock.
609 */
610 struct call_rcu_data *get_call_rcu_data(void)
611 {
612 struct call_rcu_data *crd;
613
614 if (URCU_TLS(thread_call_rcu_data) != NULL)
615 return URCU_TLS(thread_call_rcu_data);
616
617 if (cpus_array_len > 0) {
618 crd = get_cpu_call_rcu_data(urcu_sched_getcpu());
619 if (crd)
620 return crd;
621 }
622
623 return get_default_call_rcu_data();
624 }
625 URCU_ATTR_ALIAS(urcu_stringify(get_call_rcu_data))
626 struct call_rcu_data *alias_get_call_rcu_data();
627
628 /*
629 * Return a pointer to this task's call_rcu_data if there is one.
630 */
631
632 struct call_rcu_data *get_thread_call_rcu_data(void)
633 {
634 return URCU_TLS(thread_call_rcu_data);
635 }
636 URCU_ATTR_ALIAS(urcu_stringify(get_thread_call_rcu_data))
637 struct call_rcu_data *alias_get_thread_call_rcu_data();
638
639 /*
640 * Set this task's call_rcu_data structure as specified, regardless
641 * of whether or not this task already had one. (This allows switching
642 * to and from real-time call_rcu threads, for example.)
643 *
644 * Use NULL to remove a thread's call_rcu_data structure, but it is
645 * the caller's responsibility to dispose of the removed structure.
646 * Use get_thread_call_rcu_data() to obtain a pointer to the old structure
647 * (prior to NULLing it out, of course).
648 */
649
650 void set_thread_call_rcu_data(struct call_rcu_data *crdp)
651 {
652 URCU_TLS(thread_call_rcu_data) = crdp;
653 }
654 URCU_ATTR_ALIAS(urcu_stringify(set_thread_call_rcu_data))
655 void alias_set_thread_call_rcu_data();
656
657 /*
658 * Create a separate call_rcu thread for each CPU. This does not
659 * replace a pre-existing call_rcu thread -- use the set_cpu_call_rcu_data()
660 * function if you want that behavior. Should be paired with
661 * free_all_cpu_call_rcu_data() to teardown these call_rcu worker
662 * threads.
663 */
664
665 int create_all_cpu_call_rcu_data(unsigned long flags)
666 {
667 int i;
668 struct call_rcu_data *crdp;
669 int ret;
670
671 call_rcu_lock(&call_rcu_mutex);
672 alloc_cpu_call_rcu_data();
673 call_rcu_unlock(&call_rcu_mutex);
674 if (cpus_array_len <= 0) {
675 errno = EINVAL;
676 return -EINVAL;
677 }
678 if (per_cpu_call_rcu_data == NULL) {
679 errno = ENOMEM;
680 return -ENOMEM;
681 }
682 for (i = 0; i < cpus_array_len; i++) {
683 call_rcu_lock(&call_rcu_mutex);
684 if (get_cpu_call_rcu_data(i)) {
685 call_rcu_unlock(&call_rcu_mutex);
686 continue;
687 }
688 crdp = __create_call_rcu_data(flags, i);
689 if (crdp == NULL) {
690 call_rcu_unlock(&call_rcu_mutex);
691 errno = ENOMEM;
692 return -ENOMEM;
693 }
694 call_rcu_unlock(&call_rcu_mutex);
695 if ((ret = set_cpu_call_rcu_data(i, crdp)) != 0) {
696 call_rcu_data_free(crdp);
697
698 /* it has been created by other thread */
699 if (ret == -EEXIST)
700 continue;
701
702 return ret;
703 }
704 }
705 return 0;
706 }
707 URCU_ATTR_ALIAS(urcu_stringify(create_all_cpu_call_rcu_data))
708 int alias_create_all_cpu_call_rcu_data();
709
710 /*
711 * Wake up the call_rcu thread corresponding to the specified
712 * call_rcu_data structure.
713 */
714 static void wake_call_rcu_thread(struct call_rcu_data *crdp)
715 {
716 if (!(_CMM_LOAD_SHARED(crdp->flags) & URCU_CALL_RCU_RT))
717 call_rcu_wake_up(crdp);
718 }
719
720 static void _call_rcu(struct rcu_head *head,
721 void (*func)(struct rcu_head *head),
722 struct call_rcu_data *crdp)
723 {
724 cds_wfcq_node_init(&head->next);
725 head->func = func;
726 cds_wfcq_enqueue(&crdp->cbs_head, &crdp->cbs_tail, &head->next);
727 uatomic_inc(&crdp->qlen);
728 wake_call_rcu_thread(crdp);
729 }
730
731 /*
732 * Schedule a function to be invoked after a following grace period.
733 * This is the only function that must be called -- the others are
734 * only present to allow applications to tune their use of RCU for
735 * maximum performance.
736 *
737 * Note that unless a call_rcu thread has not already been created,
738 * the first invocation of call_rcu() will create one. So, if you
739 * need the first invocation of call_rcu() to be fast, make sure
740 * to create a call_rcu thread first. One way to accomplish this is
741 * "get_call_rcu_data();", and another is create_all_cpu_call_rcu_data().
742 *
743 * call_rcu must be called by registered RCU read-side threads.
744 */
745 void call_rcu(struct rcu_head *head,
746 void (*func)(struct rcu_head *head))
747 {
748 struct call_rcu_data *crdp;
749
750 /* Holding rcu read-side lock across use of per-cpu crdp */
751 _rcu_read_lock();
752 crdp = get_call_rcu_data();
753 _call_rcu(head, func, crdp);
754 _rcu_read_unlock();
755 }
756 URCU_ATTR_ALIAS(urcu_stringify(call_rcu)) void alias_call_rcu();
757
758 /*
759 * Free up the specified call_rcu_data structure, terminating the
760 * associated call_rcu thread. The caller must have previously
761 * removed the call_rcu_data structure from per-thread or per-CPU
762 * usage. For example, set_cpu_call_rcu_data(cpu, NULL) for per-CPU
763 * call_rcu_data structures or set_thread_call_rcu_data(NULL) for
764 * per-thread call_rcu_data structures.
765 *
766 * We silently refuse to free up the default call_rcu_data structure
767 * because that is where we put any leftover callbacks. Note that
768 * the possibility of self-spawning callbacks makes it impossible
769 * to execute all the callbacks in finite time without putting any
770 * newly spawned callbacks somewhere else. The "somewhere else" of
771 * last resort is the default call_rcu_data structure.
772 *
773 * We also silently refuse to free NULL pointers. This simplifies
774 * the calling code.
775 *
776 * The caller must wait for a grace-period to pass between return from
777 * set_cpu_call_rcu_data() and call to call_rcu_data_free() passing the
778 * previous call rcu data as argument.
779 *
780 * Note: introducing __cds_wfcq_splice_blocking() in this function fixed
781 * a list corruption bug in the 0.7.x series. The equivalent fix
782 * appeared in 0.6.8 for the stable-0.6 branch.
783 */
784 static
785 void _call_rcu_data_free(struct call_rcu_data *crdp, unsigned int flags)
786 {
787 if (crdp == NULL || crdp == default_call_rcu_data) {
788 return;
789 }
790 if ((uatomic_read(&crdp->flags) & URCU_CALL_RCU_STOPPED) == 0) {
791 uatomic_or(&crdp->flags, URCU_CALL_RCU_STOP);
792 wake_call_rcu_thread(crdp);
793 while ((uatomic_read(&crdp->flags) & URCU_CALL_RCU_STOPPED) == 0)
794 (void) poll(NULL, 0, 1);
795 }
796 call_rcu_lock(&call_rcu_mutex);
797 if (!cds_wfcq_empty(&crdp->cbs_head, &crdp->cbs_tail)) {
798 call_rcu_unlock(&call_rcu_mutex);
799 /* Create default call rcu data if need be. */
800 /* CBs queued here will be handed to the default list. */
801 (void) get_default_call_rcu_data();
802 call_rcu_lock(&call_rcu_mutex);
803 __cds_wfcq_splice_blocking(&default_call_rcu_data->cbs_head,
804 &default_call_rcu_data->cbs_tail,
805 &crdp->cbs_head, &crdp->cbs_tail);
806 uatomic_add(&default_call_rcu_data->qlen,
807 uatomic_read(&crdp->qlen));
808 wake_call_rcu_thread(default_call_rcu_data);
809 }
810
811 cds_list_del(&crdp->list);
812 call_rcu_unlock(&call_rcu_mutex);
813
814 if (flags & CRDF_FLAG_JOIN_THREAD) {
815 int ret;
816
817 ret = pthread_join(get_call_rcu_thread(crdp), NULL);
818 if (ret)
819 urcu_die(ret);
820 }
821 free(crdp);
822 }
823 URCU_ATTR_ALIAS(urcu_stringify(call_rcu_data_free))
824 void alias_call_rcu_data_free();
825
826 void call_rcu_data_free(struct call_rcu_data *crdp)
827 {
828 _call_rcu_data_free(crdp, CRDF_FLAG_JOIN_THREAD);
829 }
830
831 /*
832 * Clean up all the per-CPU call_rcu threads.
833 */
834 void free_all_cpu_call_rcu_data(void)
835 {
836 int cpu;
837 struct call_rcu_data **crdp;
838 static int warned = 0;
839
840 if (cpus_array_len <= 0)
841 return;
842
843 crdp = malloc(sizeof(*crdp) * cpus_array_len);
844 if (!crdp) {
845 if (!warned) {
846 fprintf(stderr, "[error] liburcu: unable to allocate per-CPU pointer array\n");
847 }
848 warned = 1;
849 return;
850 }
851
852 for (cpu = 0; cpu < cpus_array_len; cpu++) {
853 crdp[cpu] = get_cpu_call_rcu_data(cpu);
854 if (crdp[cpu] == NULL)
855 continue;
856 set_cpu_call_rcu_data(cpu, NULL);
857 }
858 /*
859 * Wait for call_rcu sites acting as RCU readers of the
860 * call_rcu_data to become quiescent.
861 */
862 synchronize_rcu();
863 for (cpu = 0; cpu < cpus_array_len; cpu++) {
864 if (crdp[cpu] == NULL)
865 continue;
866 call_rcu_data_free(crdp[cpu]);
867 }
868 free(crdp);
869 }
870 #ifdef RCU_QSBR
871 /* ABI6 has a non-namespaced free_all_cpu_call_rcu_data for qsbr */
872 #undef free_all_cpu_call_rcu_data
873 URCU_ATTR_ALIAS("urcu_qsbr_free_all_cpu_call_rcu_data")
874 void free_all_cpu_call_rcu_data();
875 #define free_all_cpu_call_rcu_data urcu_qsbr_free_all_cpu_call_rcu_data
876 #else
877 URCU_ATTR_ALIAS(urcu_stringify(free_all_cpu_call_rcu_data))
878 void alias_free_all_cpu_call_rcu_data();
879 #endif
880
881 static
882 void free_completion(struct urcu_ref *ref)
883 {
884 struct call_rcu_completion *completion;
885
886 completion = caa_container_of(ref, struct call_rcu_completion, ref);
887 free(completion);
888 }
889
890 static
891 void _rcu_barrier_complete(struct rcu_head *head)
892 {
893 struct call_rcu_completion_work *work;
894 struct call_rcu_completion *completion;
895
896 work = caa_container_of(head, struct call_rcu_completion_work, head);
897 completion = work->completion;
898 if (!uatomic_sub_return(&completion->barrier_count, 1))
899 call_rcu_completion_wake_up(completion);
900 urcu_ref_put(&completion->ref, free_completion);
901 free(work);
902 }
903
904 /*
905 * Wait for all in-flight call_rcu callbacks to complete execution.
906 */
907 void rcu_barrier(void)
908 {
909 struct call_rcu_data *crdp;
910 struct call_rcu_completion *completion;
911 int count = 0;
912 int was_online;
913
914 /* Put in offline state in QSBR. */
915 was_online = _rcu_read_ongoing();
916 if (was_online)
917 rcu_thread_offline();
918 /*
919 * Calling a rcu_barrier() within a RCU read-side critical
920 * section is an error.
921 */
922 if (_rcu_read_ongoing()) {
923 static int warned = 0;
924
925 if (!warned) {
926 fprintf(stderr, "[error] liburcu: rcu_barrier() called from within RCU read-side critical section.\n");
927 }
928 warned = 1;
929 goto online;
930 }
931
932 completion = calloc(sizeof(*completion), 1);
933 if (!completion)
934 urcu_die(errno);
935
936 call_rcu_lock(&call_rcu_mutex);
937 cds_list_for_each_entry(crdp, &call_rcu_data_list, list)
938 count++;
939
940 /* Referenced by rcu_barrier() and each call_rcu thread. */
941 urcu_ref_set(&completion->ref, count + 1);
942 completion->barrier_count = count;
943
944 cds_list_for_each_entry(crdp, &call_rcu_data_list, list) {
945 struct call_rcu_completion_work *work;
946
947 work = calloc(sizeof(*work), 1);
948 if (!work)
949 urcu_die(errno);
950 work->completion = completion;
951 _call_rcu(&work->head, _rcu_barrier_complete, crdp);
952 }
953 call_rcu_unlock(&call_rcu_mutex);
954
955 /* Wait for them */
956 for (;;) {
957 uatomic_dec(&completion->futex);
958 /* Decrement futex before reading barrier_count */
959 cmm_smp_mb();
960 if (!uatomic_read(&completion->barrier_count))
961 break;
962 call_rcu_completion_wait(completion);
963 }
964
965 urcu_ref_put(&completion->ref, free_completion);
966
967 online:
968 if (was_online)
969 rcu_thread_online();
970 }
971 URCU_ATTR_ALIAS(urcu_stringify(rcu_barrier))
972 void alias_rcu_barrier();
973
974 /*
975 * Acquire the call_rcu_mutex in order to ensure that the child sees
976 * all of the call_rcu() data structures in a consistent state. Ensure
977 * that all call_rcu threads are in a quiescent state across fork.
978 * Suitable for pthread_atfork() and friends.
979 */
980 void call_rcu_before_fork(void)
981 {
982 struct call_rcu_data *crdp;
983 struct urcu_atfork *atfork;
984
985 call_rcu_lock(&call_rcu_mutex);
986
987 atfork = registered_rculfhash_atfork;
988 if (atfork)
989 atfork->before_fork(atfork->priv);
990
991 cds_list_for_each_entry(crdp, &call_rcu_data_list, list) {
992 uatomic_or(&crdp->flags, URCU_CALL_RCU_PAUSE);
993 cmm_smp_mb__after_uatomic_or();
994 wake_call_rcu_thread(crdp);
995 }
996 cds_list_for_each_entry(crdp, &call_rcu_data_list, list) {
997 while ((uatomic_read(&crdp->flags) & URCU_CALL_RCU_PAUSED) == 0)
998 (void) poll(NULL, 0, 1);
999 }
1000 }
1001 URCU_ATTR_ALIAS(urcu_stringify(call_rcu_before_fork))
1002 void alias_call_rcu_before_fork();
1003
1004 /*
1005 * Clean up call_rcu data structures in the parent of a successful fork()
1006 * that is not followed by exec() in the child. Suitable for
1007 * pthread_atfork() and friends.
1008 */
1009 void call_rcu_after_fork_parent(void)
1010 {
1011 struct call_rcu_data *crdp;
1012 struct urcu_atfork *atfork;
1013
1014 cds_list_for_each_entry(crdp, &call_rcu_data_list, list)
1015 uatomic_and(&crdp->flags, ~URCU_CALL_RCU_PAUSE);
1016 cds_list_for_each_entry(crdp, &call_rcu_data_list, list) {
1017 while ((uatomic_read(&crdp->flags) & URCU_CALL_RCU_PAUSED) != 0)
1018 (void) poll(NULL, 0, 1);
1019 }
1020 atfork = registered_rculfhash_atfork;
1021 if (atfork)
1022 atfork->after_fork_parent(atfork->priv);
1023 call_rcu_unlock(&call_rcu_mutex);
1024 }
1025 URCU_ATTR_ALIAS(urcu_stringify(call_rcu_after_fork_parent))
1026 void alias_call_rcu_after_fork_parent();
1027
1028 /*
1029 * Clean up call_rcu data structures in the child of a successful fork()
1030 * that is not followed by exec(). Suitable for pthread_atfork() and
1031 * friends.
1032 */
1033 void call_rcu_after_fork_child(void)
1034 {
1035 struct call_rcu_data *crdp, *next;
1036 struct urcu_atfork *atfork;
1037
1038 /* Release the mutex. */
1039 call_rcu_unlock(&call_rcu_mutex);
1040
1041 atfork = registered_rculfhash_atfork;
1042 if (atfork)
1043 atfork->after_fork_child(atfork->priv);
1044
1045 /* Do nothing when call_rcu() has not been used */
1046 if (cds_list_empty(&call_rcu_data_list))
1047 return;
1048
1049 /*
1050 * Allocate a new default call_rcu_data structure in order
1051 * to get a working call_rcu thread to go with it.
1052 */
1053 default_call_rcu_data = NULL;
1054 (void)get_default_call_rcu_data();
1055
1056 /* Cleanup call_rcu_data pointers before use */
1057 cpus_array_len_reset();
1058 free(per_cpu_call_rcu_data);
1059 rcu_set_pointer(&per_cpu_call_rcu_data, NULL);
1060 URCU_TLS(thread_call_rcu_data) = NULL;
1061
1062 /*
1063 * Dispose of all of the rest of the call_rcu_data structures.
1064 * Leftover call_rcu callbacks will be merged into the new
1065 * default call_rcu thread queue.
1066 */
1067 cds_list_for_each_entry_safe(crdp, next, &call_rcu_data_list, list) {
1068 if (crdp == default_call_rcu_data)
1069 continue;
1070 uatomic_set(&crdp->flags, URCU_CALL_RCU_STOPPED);
1071 /*
1072 * Do not join the thread because it does not exist in
1073 * the child.
1074 */
1075 _call_rcu_data_free(crdp, 0);
1076 }
1077 }
1078 URCU_ATTR_ALIAS(urcu_stringify(call_rcu_after_fork_child))
1079 void alias_call_rcu_after_fork_child();
1080
1081 void urcu_register_rculfhash_atfork(struct urcu_atfork *atfork)
1082 {
1083 if (CMM_LOAD_SHARED(registered_rculfhash_atfork))
1084 return;
1085 call_rcu_lock(&call_rcu_mutex);
1086 if (!registered_rculfhash_atfork)
1087 registered_rculfhash_atfork = atfork;
1088 call_rcu_unlock(&call_rcu_mutex);
1089 }
1090 URCU_ATTR_ALIAS(urcu_stringify(urcu_register_rculfhash_atfork))
1091 void alias_urcu_register_rculfhash_atfork();
1092
1093 /*
1094 * This unregistration function is deprecated, meant only for internal
1095 * use by rculfhash.
1096 */
1097 __attribute__((noreturn))
1098 void urcu_unregister_rculfhash_atfork(struct urcu_atfork *atfork __attribute__((unused)))
1099 {
1100 urcu_die(EPERM);
1101 }
1102 URCU_ATTR_ALIAS(urcu_stringify(urcu_unregister_rculfhash_atfork))
1103 __attribute__((noreturn))
1104 void alias_urcu_unregister_rculfhash_atfork();
This page took 0.049641 seconds and 3 git commands to generate.