Add `urcu_posix_assert()` as `assert()` replacement
[urcu.git] / src / urcu-call-rcu-impl.h
CommitLineData
b57aee66
PM
1/*
2 * urcu-call-rcu.c
3 *
4 * Userspace RCU library - batch memory reclamation with kernel API
5 *
6 * Copyright (c) 2010 Paul E. McKenney <paulmck@linux.vnet.ibm.com>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
5161f31e 23#define _LGPL_SOURCE
b57aee66
PM
24#include <stdio.h>
25#include <pthread.h>
26#include <signal.h>
b57aee66 27#include <stdlib.h>
6d841bc2 28#include <stdint.h>
b57aee66
PM
29#include <string.h>
30#include <errno.h>
31#include <poll.h>
32#include <sys/time.h>
b57aee66 33#include <unistd.h>
c1d2c60b 34#include <sched.h>
b57aee66 35
a47dd11c 36#include "compat-getcpu.h"
01477510 37#include <urcu/assert.h>
4477a870
MD
38#include <urcu/wfcqueue.h>
39#include <urcu/call-rcu.h>
40#include <urcu/pointer.h>
41#include <urcu/list.h>
42#include <urcu/futex.h>
43#include <urcu/tls-compat.h>
44#include <urcu/ref.h>
4a6d7378 45#include "urcu-die.h"
4477a870 46#include "urcu-utils.h"
b57aee66 47
8e3690db
MD
48#define SET_AFFINITY_CHECK_PERIOD (1U << 8) /* 256 */
49#define SET_AFFINITY_CHECK_PERIOD_MASK (SET_AFFINITY_CHECK_PERIOD - 1)
50
b57aee66
PM
51/* Data structure that identifies a call_rcu thread. */
52
53struct call_rcu_data {
5161f31e 54 /*
0b8ab7df
MD
55 * We do not align head on a different cache-line than tail
56 * mainly because call_rcu callback-invocation threads use
57 * batching ("splice") to get an entire list of callbacks, which
58 * effectively empties the queue, and requires to touch the tail
59 * anyway.
5161f31e 60 */
b9f893b6 61 struct cds_wfcq_tail cbs_tail;
0b8ab7df 62 struct cds_wfcq_head cbs_head;
b57aee66 63 unsigned long flags;
6d841bc2 64 int32_t futex;
73987721 65 unsigned long qlen; /* maintained for debugging. */
b57aee66 66 pthread_t tid;
c1d2c60b 67 int cpu_affinity;
8e3690db 68 unsigned long gp_count;
3c24913f 69 struct cds_list_head list;
b57aee66
PM
70} __attribute__((aligned(CAA_CACHE_LINE_SIZE)));
71
b7f721d9
MD
72struct call_rcu_completion {
73 int barrier_count;
74 int32_t futex;
81dd9134 75 struct urcu_ref ref;
b7f721d9
MD
76};
77
78struct call_rcu_completion_work {
79 struct rcu_head head;
80 struct call_rcu_completion *completion;
81};
82
3c24913f
PM
83/*
84 * List of all call_rcu_data structures to keep valgrind happy.
85 * Protected by call_rcu_mutex.
86 */
87
bab44e28 88static CDS_LIST_HEAD(call_rcu_data_list);
3c24913f 89
b57aee66
PM
90/* Link a thread using call_rcu() to its call_rcu thread. */
91
2f661865 92static DEFINE_URCU_TLS(struct call_rcu_data *, thread_call_rcu_data);
b57aee66 93
e85451a1
MD
94/*
95 * Guard call_rcu thread creation and atfork handlers.
96 */
b57aee66
PM
97static pthread_mutex_t call_rcu_mutex = PTHREAD_MUTEX_INITIALIZER;
98
99/* If a given thread does not have its own call_rcu thread, this is default. */
100
101static struct call_rcu_data *default_call_rcu_data;
102
d0ec0ed2
MD
103static struct urcu_atfork *registered_rculfhash_atfork;
104static unsigned long registered_rculfhash_atfork_refcount;
105
b57aee66
PM
106/*
107 * If the sched_getcpu() and sysconf(_SC_NPROCESSORS_CONF) calls are
108 * available, then we can have call_rcu threads assigned to individual
109 * CPUs rather than only to specific threads.
110 */
111
a47dd11c 112#if defined(HAVE_SYSCONF) && (defined(HAVE_SCHED_GETCPU) || defined(HAVE_GETCPUID))
b57aee66
PM
113
114/*
115 * Pointer to array of pointers to per-CPU call_rcu_data structures
618b2595
MD
116 * and # CPUs. per_cpu_call_rcu_data is a RCU-protected pointer to an
117 * array of RCU-protected pointers to call_rcu_data. call_rcu acts as a
118 * RCU read-side and reads per_cpu_call_rcu_data and the per-cpu pointer
119 * without mutex. The call_rcu_mutex protects updates.
b57aee66
PM
120 */
121
122static struct call_rcu_data **per_cpu_call_rcu_data;
123static long maxcpus;
124
60af049d
LJ
125static void maxcpus_reset(void)
126{
127 maxcpus = 0;
128}
129
b57aee66
PM
130/* Allocate the array if it has not already been allocated. */
131
132static void alloc_cpu_call_rcu_data(void)
133{
134 struct call_rcu_data **p;
135 static int warned = 0;
136
137 if (maxcpus != 0)
138 return;
139 maxcpus = sysconf(_SC_NPROCESSORS_CONF);
140 if (maxcpus <= 0) {
141 return;
142 }
143 p = malloc(maxcpus * sizeof(*per_cpu_call_rcu_data));
144 if (p != NULL) {
145 memset(p, '\0', maxcpus * sizeof(*per_cpu_call_rcu_data));
618b2595 146 rcu_set_pointer(&per_cpu_call_rcu_data, p);
b57aee66
PM
147 } else {
148 if (!warned) {
149 fprintf(stderr, "[error] liburcu: unable to allocate per-CPU pointer array\n");
150 }
151 warned = 1;
152 }
153}
154
63b495d8 155#else /* #if defined(HAVE_SYSCONF) && defined(HAVE_SCHED_GETCPU) */
b57aee66 156
f9437098
MD
157/*
158 * per_cpu_call_rcu_data should be constant, but some functions below, used both
159 * for cases where cpu number is available and not available, assume it it not
160 * constant.
161 */
162static struct call_rcu_data **per_cpu_call_rcu_data = NULL;
b57aee66
PM
163static const long maxcpus = -1;
164
60af049d
LJ
165static void maxcpus_reset(void)
166{
167}
168
b57aee66
PM
169static void alloc_cpu_call_rcu_data(void)
170{
171}
172
63b495d8 173#endif /* #else #if defined(HAVE_SYSCONF) && defined(HAVE_SCHED_GETCPU) */
b57aee66
PM
174
175/* Acquire the specified pthread mutex. */
176
177static void call_rcu_lock(pthread_mutex_t *pmp)
178{
4a6d7378
MD
179 int ret;
180
181 ret = pthread_mutex_lock(pmp);
182 if (ret)
183 urcu_die(ret);
b57aee66
PM
184}
185
186/* Release the specified pthread mutex. */
187
188static void call_rcu_unlock(pthread_mutex_t *pmp)
189{
4a6d7378
MD
190 int ret;
191
192 ret = pthread_mutex_unlock(pmp);
193 if (ret)
194 urcu_die(ret);
b57aee66
PM
195}
196
8e3690db
MD
197/*
198 * Periodically retry setting CPU affinity if we migrate.
199 * Losing affinity can be caused by CPU hotunplug/hotplug, or by
200 * cpuset(7).
201 */
2388c075 202#ifdef HAVE_SCHED_SETAFFINITY
c1d2c60b
MD
203static
204int set_thread_cpu_affinity(struct call_rcu_data *crdp)
205{
206 cpu_set_t mask;
8e3690db 207 int ret;
c1d2c60b
MD
208
209 if (crdp->cpu_affinity < 0)
210 return 0;
8e3690db
MD
211 if (++crdp->gp_count & SET_AFFINITY_CHECK_PERIOD_MASK)
212 return 0;
213 if (urcu_sched_getcpu() == crdp->cpu_affinity)
214 return 0;
c1d2c60b
MD
215
216 CPU_ZERO(&mask);
217 CPU_SET(crdp->cpu_affinity, &mask);
8e3690db 218 ret = sched_setaffinity(0, sizeof(mask), &mask);
0614a2e6 219
8e3690db
MD
220 /*
221 * EINVAL is fine: can be caused by hotunplugged CPUs, or by
222 * cpuset(7). This is why we should always retry if we detect
223 * migration.
224 */
225 if (ret && errno == EINVAL) {
226 ret = 0;
227 errno = 0;
228 }
229 return ret;
c1d2c60b
MD
230}
231#else
232static
a142df4e 233int set_thread_cpu_affinity(struct call_rcu_data *crdp __attribute__((unused)))
c1d2c60b
MD
234{
235 return 0;
236}
237#endif
238
03fe58b3
MD
239static void call_rcu_wait(struct call_rcu_data *crdp)
240{
241 /* Read call_rcu list before read futex */
242 cmm_smp_mb();
b0a841b4
MD
243 if (uatomic_read(&crdp->futex) != -1)
244 return;
245 while (futex_async(&crdp->futex, FUTEX_WAIT, -1,
246 NULL, NULL, 0)) {
247 switch (errno) {
248 case EWOULDBLOCK:
249 /* Value already changed. */
250 return;
251 case EINTR:
252 /* Retry if interrupted by signal. */
253 break; /* Get out of switch. */
254 default:
255 /* Unexpected error. */
256 urcu_die(errno);
257 }
258 }
03fe58b3
MD
259}
260
261static void call_rcu_wake_up(struct call_rcu_data *crdp)
262{
263 /* Write to call_rcu list before reading/writing futex */
264 cmm_smp_mb();
a0b7f7ea 265 if (caa_unlikely(uatomic_read(&crdp->futex) == -1)) {
03fe58b3 266 uatomic_set(&crdp->futex, 0);
b0a841b4
MD
267 if (futex_async(&crdp->futex, FUTEX_WAKE, 1,
268 NULL, NULL, 0) < 0)
269 urcu_die(errno);
03fe58b3
MD
270 }
271}
272
b7f721d9
MD
273static void call_rcu_completion_wait(struct call_rcu_completion *completion)
274{
275 /* Read completion barrier count before read futex */
276 cmm_smp_mb();
b0a841b4
MD
277 if (uatomic_read(&completion->futex) != -1)
278 return;
279 while (futex_async(&completion->futex, FUTEX_WAIT, -1,
280 NULL, NULL, 0)) {
281 switch (errno) {
282 case EWOULDBLOCK:
283 /* Value already changed. */
284 return;
285 case EINTR:
286 /* Retry if interrupted by signal. */
287 break; /* Get out of switch. */
288 default:
289 /* Unexpected error. */
290 urcu_die(errno);
291 }
292 }
b7f721d9
MD
293}
294
295static void call_rcu_completion_wake_up(struct call_rcu_completion *completion)
296{
297 /* Write to completion barrier count before reading/writing futex */
298 cmm_smp_mb();
299 if (caa_unlikely(uatomic_read(&completion->futex) == -1)) {
300 uatomic_set(&completion->futex, 0);
b0a841b4
MD
301 if (futex_async(&completion->futex, FUTEX_WAKE, 1,
302 NULL, NULL, 0) < 0)
303 urcu_die(errno);
b7f721d9
MD
304 }
305}
306
b57aee66
PM
307/* This is the code run by each call_rcu thread. */
308
309static void *call_rcu_thread(void *arg)
310{
311 unsigned long cbcount;
5161f31e 312 struct call_rcu_data *crdp = (struct call_rcu_data *) arg;
2870aa1e 313 int rt = !!(uatomic_read(&crdp->flags) & URCU_CALL_RCU_RT);
b57aee66 314
8e3690db 315 if (set_thread_cpu_affinity(crdp))
4a6d7378 316 urcu_die(errno);
c1d2c60b 317
765f3ead
MD
318 /*
319 * If callbacks take a read-side lock, we need to be registered.
320 */
321 rcu_register_thread();
322
bd252a04 323 URCU_TLS(thread_call_rcu_data) = crdp;
bc94ca9b
MD
324 if (!rt) {
325 uatomic_dec(&crdp->futex);
326 /* Decrement futex before reading call_rcu list */
327 cmm_smp_mb();
328 }
b57aee66 329 for (;;) {
5161f31e
MD
330 struct cds_wfcq_head cbs_tmp_head;
331 struct cds_wfcq_tail cbs_tmp_tail;
332 struct cds_wfcq_node *cbs, *cbs_tmp_n;
ae25b7e2 333 enum cds_wfcq_ret splice_ret;
5161f31e 334
8e3690db
MD
335 if (set_thread_cpu_affinity(crdp))
336 urcu_die(errno);
337
e85451a1
MD
338 if (uatomic_read(&crdp->flags) & URCU_CALL_RCU_PAUSE) {
339 /*
340 * Pause requested. Become quiescent: remove
341 * ourself from all global lists, and don't
342 * process any callback. The callback lists may
343 * still be non-empty though.
344 */
345 rcu_unregister_thread();
346 cmm_smp_mb__before_uatomic_or();
347 uatomic_or(&crdp->flags, URCU_CALL_RCU_PAUSED);
348 while ((uatomic_read(&crdp->flags) & URCU_CALL_RCU_PAUSE) != 0)
109105b7 349 (void) poll(NULL, 0, 1);
fc236e5e
KF
350 uatomic_and(&crdp->flags, ~URCU_CALL_RCU_PAUSED);
351 cmm_smp_mb__after_uatomic_and();
e85451a1
MD
352 rcu_register_thread();
353 }
354
5161f31e 355 cds_wfcq_init(&cbs_tmp_head, &cbs_tmp_tail);
ae25b7e2
MD
356 splice_ret = __cds_wfcq_splice_blocking(&cbs_tmp_head,
357 &cbs_tmp_tail, &crdp->cbs_head, &crdp->cbs_tail);
01477510
FD
358 urcu_posix_assert(splice_ret != CDS_WFCQ_RET_WOULDBLOCK);
359 urcu_posix_assert(splice_ret != CDS_WFCQ_RET_DEST_NON_EMPTY);
ae25b7e2 360 if (splice_ret != CDS_WFCQ_RET_SRC_EMPTY) {
b57aee66
PM
361 synchronize_rcu();
362 cbcount = 0;
5161f31e
MD
363 __cds_wfcq_for_each_blocking_safe(&cbs_tmp_head,
364 &cbs_tmp_tail, cbs, cbs_tmp_n) {
365 struct rcu_head *rhp;
366
367 rhp = caa_container_of(cbs,
368 struct rcu_head, next);
b57aee66
PM
369 rhp->func(rhp);
370 cbcount++;
5161f31e 371 }
b57aee66
PM
372 uatomic_sub(&crdp->qlen, cbcount);
373 }
bc94ca9b
MD
374 if (uatomic_read(&crdp->flags) & URCU_CALL_RCU_STOP)
375 break;
765f3ead 376 rcu_thread_offline();
bc94ca9b 377 if (!rt) {
5161f31e
MD
378 if (cds_wfcq_empty(&crdp->cbs_head,
379 &crdp->cbs_tail)) {
bc94ca9b 380 call_rcu_wait(crdp);
109105b7 381 (void) poll(NULL, 0, 10);
bc94ca9b 382 uatomic_dec(&crdp->futex);
c768e45e 383 /*
bc94ca9b
MD
384 * Decrement futex before reading
385 * call_rcu list.
c768e45e
MD
386 */
387 cmm_smp_mb();
ccbac24d 388 } else {
109105b7 389 (void) poll(NULL, 0, 10);
c768e45e 390 }
bc94ca9b 391 } else {
109105b7 392 (void) poll(NULL, 0, 10);
b57aee66 393 }
765f3ead 394 rcu_thread_online();
bc94ca9b
MD
395 }
396 if (!rt) {
397 /*
398 * Read call_rcu list before write futex.
399 */
400 cmm_smp_mb();
401 uatomic_set(&crdp->futex, 0);
b57aee66 402 }
2870aa1e 403 uatomic_or(&crdp->flags, URCU_CALL_RCU_STOPPED);
765f3ead 404 rcu_unregister_thread();
7106ddf8 405 return NULL;
b57aee66
PM
406}
407
408/*
409 * Create both a call_rcu thread and the corresponding call_rcu_data
3c24913f
PM
410 * structure, linking the structure in as specified. Caller must hold
411 * call_rcu_mutex.
b57aee66
PM
412 */
413
3c24913f 414static void call_rcu_data_init(struct call_rcu_data **crdpp,
c1d2c60b
MD
415 unsigned long flags,
416 int cpu_affinity)
b57aee66
PM
417{
418 struct call_rcu_data *crdp;
4a6d7378 419 int ret;
b57aee66
PM
420
421 crdp = malloc(sizeof(*crdp));
4a6d7378
MD
422 if (crdp == NULL)
423 urcu_die(errno);
b57aee66 424 memset(crdp, '\0', sizeof(*crdp));
5161f31e 425 cds_wfcq_init(&crdp->cbs_head, &crdp->cbs_tail);
b57aee66 426 crdp->qlen = 0;
263e3cf9
MD
427 crdp->futex = 0;
428 crdp->flags = flags;
3c24913f 429 cds_list_add(&crdp->list, &call_rcu_data_list);
c1d2c60b 430 crdp->cpu_affinity = cpu_affinity;
8e3690db 431 crdp->gp_count = 0;
b57aee66
PM
432 cmm_smp_mb(); /* Structure initialized before pointer is planted. */
433 *crdpp = crdp;
4a6d7378
MD
434 ret = pthread_create(&crdp->tid, NULL, call_rcu_thread, crdp);
435 if (ret)
436 urcu_die(ret);
b57aee66
PM
437}
438
439/*
440 * Return a pointer to the call_rcu_data structure for the specified
441 * CPU, returning NULL if there is none. We cannot automatically
442 * created it because the platform we are running on might not define
63b495d8 443 * urcu_sched_getcpu().
618b2595
MD
444 *
445 * The call to this function and use of the returned call_rcu_data
446 * should be protected by RCU read-side lock.
b57aee66
PM
447 */
448
449struct call_rcu_data *get_cpu_call_rcu_data(int cpu)
450{
451 static int warned = 0;
618b2595 452 struct call_rcu_data **pcpu_crdp;
b57aee66 453
618b2595
MD
454 pcpu_crdp = rcu_dereference(per_cpu_call_rcu_data);
455 if (pcpu_crdp == NULL)
b57aee66
PM
456 return NULL;
457 if (!warned && maxcpus > 0 && (cpu < 0 || maxcpus <= cpu)) {
458 fprintf(stderr, "[error] liburcu: get CPU # out of range\n");
459 warned = 1;
460 }
461 if (cpu < 0 || maxcpus <= cpu)
462 return NULL;
618b2595 463 return rcu_dereference(pcpu_crdp[cpu]);
b57aee66
PM
464}
465
466/*
467 * Return the tid corresponding to the call_rcu thread whose
468 * call_rcu_data structure is specified.
469 */
470
471pthread_t get_call_rcu_thread(struct call_rcu_data *crdp)
472{
473 return crdp->tid;
474}
475
476/*
477 * Create a call_rcu_data structure (with thread) and return a pointer.
478 */
479
c1d2c60b
MD
480static struct call_rcu_data *__create_call_rcu_data(unsigned long flags,
481 int cpu_affinity)
b57aee66
PM
482{
483 struct call_rcu_data *crdp;
484
c1d2c60b 485 call_rcu_data_init(&crdp, flags, cpu_affinity);
b57aee66
PM
486 return crdp;
487}
488
c1d2c60b
MD
489struct call_rcu_data *create_call_rcu_data(unsigned long flags,
490 int cpu_affinity)
3c24913f
PM
491{
492 struct call_rcu_data *crdp;
493
494 call_rcu_lock(&call_rcu_mutex);
c1d2c60b 495 crdp = __create_call_rcu_data(flags, cpu_affinity);
3c24913f
PM
496 call_rcu_unlock(&call_rcu_mutex);
497 return crdp;
498}
499
b57aee66
PM
500/*
501 * Set the specified CPU to use the specified call_rcu_data structure.
7106ddf8
PM
502 *
503 * Use NULL to remove a CPU's call_rcu_data structure, but it is
504 * the caller's responsibility to dispose of the removed structure.
505 * Use get_cpu_call_rcu_data() to obtain a pointer to the old structure
506 * (prior to NULLing it out, of course).
f9da0936
MD
507 *
508 * The caller must wait for a grace-period to pass between return from
509 * set_cpu_call_rcu_data() and call to call_rcu_data_free() passing the
510 * previous call rcu data as argument.
b57aee66
PM
511 */
512
513int set_cpu_call_rcu_data(int cpu, struct call_rcu_data *crdp)
514{
dcfc8165 515 static int warned = 0;
b57aee66
PM
516
517 call_rcu_lock(&call_rcu_mutex);
f3776786 518 alloc_cpu_call_rcu_data();
b57aee66
PM
519 if (cpu < 0 || maxcpus <= cpu) {
520 if (!warned) {
521 fprintf(stderr, "[error] liburcu: set CPU # out of range\n");
522 warned = 1;
523 }
524 call_rcu_unlock(&call_rcu_mutex);
525 errno = EINVAL;
526 return -EINVAL;
527 }
53a55535 528
b57aee66 529 if (per_cpu_call_rcu_data == NULL) {
3670fef2 530 call_rcu_unlock(&call_rcu_mutex);
b57aee66
PM
531 errno = ENOMEM;
532 return -ENOMEM;
533 }
53a55535
LJ
534
535 if (per_cpu_call_rcu_data[cpu] != NULL && crdp != NULL) {
536 call_rcu_unlock(&call_rcu_mutex);
537 errno = EEXIST;
538 return -EEXIST;
539 }
540
618b2595 541 rcu_set_pointer(&per_cpu_call_rcu_data[cpu], crdp);
3670fef2 542 call_rcu_unlock(&call_rcu_mutex);
b57aee66
PM
543 return 0;
544}
545
546/*
547 * Return a pointer to the default call_rcu_data structure, creating
548 * one if need be. Because we never free call_rcu_data structures,
549 * we don't need to be in an RCU read-side critical section.
550 */
551
552struct call_rcu_data *get_default_call_rcu_data(void)
553{
554 if (default_call_rcu_data != NULL)
555 return rcu_dereference(default_call_rcu_data);
556 call_rcu_lock(&call_rcu_mutex);
557 if (default_call_rcu_data != NULL) {
558 call_rcu_unlock(&call_rcu_mutex);
559 return default_call_rcu_data;
560 }
c1d2c60b 561 call_rcu_data_init(&default_call_rcu_data, 0, -1);
b57aee66
PM
562 call_rcu_unlock(&call_rcu_mutex);
563 return default_call_rcu_data;
564}
565
566/*
567 * Return the call_rcu_data structure that applies to the currently
568 * running thread. Any call_rcu_data structure assigned specifically
569 * to this thread has first priority, followed by any call_rcu_data
570 * structure assigned to the CPU on which the thread is running,
571 * followed by the default call_rcu_data structure. If there is not
572 * yet a default call_rcu_data structure, one will be created.
618b2595
MD
573 *
574 * Calls to this function and use of the returned call_rcu_data should
575 * be protected by RCU read-side lock.
b57aee66
PM
576 */
577struct call_rcu_data *get_call_rcu_data(void)
578{
9744f3bb 579 struct call_rcu_data *crd;
b57aee66 580
bd252a04
MD
581 if (URCU_TLS(thread_call_rcu_data) != NULL)
582 return URCU_TLS(thread_call_rcu_data);
9744f3bb
LJ
583
584 if (maxcpus > 0) {
63b495d8 585 crd = get_cpu_call_rcu_data(urcu_sched_getcpu());
9744f3bb
LJ
586 if (crd)
587 return crd;
b57aee66 588 }
9744f3bb 589
b57aee66
PM
590 return get_default_call_rcu_data();
591}
592
593/*
594 * Return a pointer to this task's call_rcu_data if there is one.
595 */
596
597struct call_rcu_data *get_thread_call_rcu_data(void)
598{
bd252a04 599 return URCU_TLS(thread_call_rcu_data);
b57aee66
PM
600}
601
602/*
603 * Set this task's call_rcu_data structure as specified, regardless
604 * of whether or not this task already had one. (This allows switching
605 * to and from real-time call_rcu threads, for example.)
7106ddf8
PM
606 *
607 * Use NULL to remove a thread's call_rcu_data structure, but it is
608 * the caller's responsibility to dispose of the removed structure.
609 * Use get_thread_call_rcu_data() to obtain a pointer to the old structure
610 * (prior to NULLing it out, of course).
b57aee66
PM
611 */
612
613void set_thread_call_rcu_data(struct call_rcu_data *crdp)
614{
bd252a04 615 URCU_TLS(thread_call_rcu_data) = crdp;
b57aee66
PM
616}
617
618/*
619 * Create a separate call_rcu thread for each CPU. This does not
620 * replace a pre-existing call_rcu thread -- use the set_cpu_call_rcu_data()
0938c541
MD
621 * function if you want that behavior. Should be paired with
622 * free_all_cpu_call_rcu_data() to teardown these call_rcu worker
623 * threads.
b57aee66
PM
624 */
625
626int create_all_cpu_call_rcu_data(unsigned long flags)
627{
628 int i;
629 struct call_rcu_data *crdp;
630 int ret;
631
632 call_rcu_lock(&call_rcu_mutex);
633 alloc_cpu_call_rcu_data();
634 call_rcu_unlock(&call_rcu_mutex);
635 if (maxcpus <= 0) {
636 errno = EINVAL;
637 return -EINVAL;
638 }
639 if (per_cpu_call_rcu_data == NULL) {
640 errno = ENOMEM;
641 return -ENOMEM;
642 }
643 for (i = 0; i < maxcpus; i++) {
644 call_rcu_lock(&call_rcu_mutex);
645 if (get_cpu_call_rcu_data(i)) {
646 call_rcu_unlock(&call_rcu_mutex);
647 continue;
648 }
c1d2c60b 649 crdp = __create_call_rcu_data(flags, i);
b57aee66
PM
650 if (crdp == NULL) {
651 call_rcu_unlock(&call_rcu_mutex);
652 errno = ENOMEM;
653 return -ENOMEM;
654 }
655 call_rcu_unlock(&call_rcu_mutex);
656 if ((ret = set_cpu_call_rcu_data(i, crdp)) != 0) {
356c8794
LJ
657 call_rcu_data_free(crdp);
658
659 /* it has been created by other thread */
660 if (ret == -EEXIST)
661 continue;
662
663 return ret;
b57aee66
PM
664 }
665 }
666 return 0;
667}
668
7106ddf8
PM
669/*
670 * Wake up the call_rcu thread corresponding to the specified
671 * call_rcu_data structure.
672 */
673static void wake_call_rcu_thread(struct call_rcu_data *crdp)
674{
263e3cf9
MD
675 if (!(_CMM_LOAD_SHARED(crdp->flags) & URCU_CALL_RCU_RT))
676 call_rcu_wake_up(crdp);
7106ddf8
PM
677}
678
b7f721d9
MD
679static void _call_rcu(struct rcu_head *head,
680 void (*func)(struct rcu_head *head),
681 struct call_rcu_data *crdp)
682{
683 cds_wfcq_node_init(&head->next);
684 head->func = func;
685 cds_wfcq_enqueue(&crdp->cbs_head, &crdp->cbs_tail, &head->next);
686 uatomic_inc(&crdp->qlen);
687 wake_call_rcu_thread(crdp);
688}
689
b57aee66
PM
690/*
691 * Schedule a function to be invoked after a following grace period.
692 * This is the only function that must be called -- the others are
693 * only present to allow applications to tune their use of RCU for
694 * maximum performance.
695 *
696 * Note that unless a call_rcu thread has not already been created,
697 * the first invocation of call_rcu() will create one. So, if you
698 * need the first invocation of call_rcu() to be fast, make sure
699 * to create a call_rcu thread first. One way to accomplish this is
700 * "get_call_rcu_data();", and another is create_all_cpu_call_rcu_data().
618b2595
MD
701 *
702 * call_rcu must be called by registered RCU read-side threads.
b57aee66 703 */
b57aee66
PM
704void call_rcu(struct rcu_head *head,
705 void (*func)(struct rcu_head *head))
706{
707 struct call_rcu_data *crdp;
708
618b2595 709 /* Holding rcu read-side lock across use of per-cpu crdp */
ffb4aaa7 710 _rcu_read_lock();
b57aee66 711 crdp = get_call_rcu_data();
b7f721d9 712 _call_rcu(head, func, crdp);
ffb4aaa7 713 _rcu_read_unlock();
7106ddf8
PM
714}
715
716/*
717 * Free up the specified call_rcu_data structure, terminating the
718 * associated call_rcu thread. The caller must have previously
719 * removed the call_rcu_data structure from per-thread or per-CPU
720 * usage. For example, set_cpu_call_rcu_data(cpu, NULL) for per-CPU
721 * call_rcu_data structures or set_thread_call_rcu_data(NULL) for
722 * per-thread call_rcu_data structures.
723 *
724 * We silently refuse to free up the default call_rcu_data structure
725 * because that is where we put any leftover callbacks. Note that
726 * the possibility of self-spawning callbacks makes it impossible
727 * to execute all the callbacks in finite time without putting any
728 * newly spawned callbacks somewhere else. The "somewhere else" of
729 * last resort is the default call_rcu_data structure.
730 *
731 * We also silently refuse to free NULL pointers. This simplifies
732 * the calling code.
f9da0936
MD
733 *
734 * The caller must wait for a grace-period to pass between return from
735 * set_cpu_call_rcu_data() and call to call_rcu_data_free() passing the
736 * previous call rcu data as argument.
03e5118f
MD
737 *
738 * Note: introducing __cds_wfcq_splice_blocking() in this function fixed
739 * a list corruption bug in the 0.7.x series. The equivalent fix
740 * appeared in 0.6.8 for the stable-0.6 branch.
7106ddf8
PM
741 */
742void call_rcu_data_free(struct call_rcu_data *crdp)
743{
7106ddf8
PM
744 if (crdp == NULL || crdp == default_call_rcu_data) {
745 return;
746 }
2870aa1e
PB
747 if ((uatomic_read(&crdp->flags) & URCU_CALL_RCU_STOPPED) == 0) {
748 uatomic_or(&crdp->flags, URCU_CALL_RCU_STOP);
7106ddf8 749 wake_call_rcu_thread(crdp);
2870aa1e 750 while ((uatomic_read(&crdp->flags) & URCU_CALL_RCU_STOPPED) == 0)
109105b7 751 (void) poll(NULL, 0, 1);
7106ddf8 752 }
51b98c3c 753 call_rcu_lock(&call_rcu_mutex);
5161f31e 754 if (!cds_wfcq_empty(&crdp->cbs_head, &crdp->cbs_tail)) {
51b98c3c
PM
755 call_rcu_unlock(&call_rcu_mutex);
756 /* Create default call rcu data if need be. */
757 /* CBs queued here will be handed to the default list. */
698d0778 758 (void) get_default_call_rcu_data();
51b98c3c 759 call_rcu_lock(&call_rcu_mutex);
5161f31e
MD
760 __cds_wfcq_splice_blocking(&default_call_rcu_data->cbs_head,
761 &default_call_rcu_data->cbs_tail,
762 &crdp->cbs_head, &crdp->cbs_tail);
7106ddf8
PM
763 uatomic_add(&default_call_rcu_data->qlen,
764 uatomic_read(&crdp->qlen));
1e92aa15 765 wake_call_rcu_thread(default_call_rcu_data);
7106ddf8 766 }
59dc9e9d
LJ
767
768 cds_list_del(&crdp->list);
b75dffe6
LJ
769 call_rcu_unlock(&call_rcu_mutex);
770
59dc9e9d 771 free(crdp);
7106ddf8
PM
772}
773
774/*
775 * Clean up all the per-CPU call_rcu threads.
776 */
777void free_all_cpu_call_rcu_data(void)
778{
779 int cpu;
618b2595
MD
780 struct call_rcu_data **crdp;
781 static int warned = 0;
7106ddf8
PM
782
783 if (maxcpus <= 0)
784 return;
618b2595
MD
785
786 crdp = malloc(sizeof(*crdp) * maxcpus);
787 if (!crdp) {
788 if (!warned) {
789 fprintf(stderr, "[error] liburcu: unable to allocate per-CPU pointer array\n");
790 }
791 warned = 1;
d31150b4 792 return;
618b2595
MD
793 }
794
7106ddf8 795 for (cpu = 0; cpu < maxcpus; cpu++) {
618b2595
MD
796 crdp[cpu] = get_cpu_call_rcu_data(cpu);
797 if (crdp[cpu] == NULL)
7106ddf8
PM
798 continue;
799 set_cpu_call_rcu_data(cpu, NULL);
7106ddf8 800 }
618b2595
MD
801 /*
802 * Wait for call_rcu sites acting as RCU readers of the
803 * call_rcu_data to become quiescent.
804 */
805 synchronize_rcu();
806 for (cpu = 0; cpu < maxcpus; cpu++) {
807 if (crdp[cpu] == NULL)
808 continue;
809 call_rcu_data_free(crdp[cpu]);
810 }
811 free(crdp);
7106ddf8
PM
812}
813
81dd9134
KF
814static
815void free_completion(struct urcu_ref *ref)
816{
817 struct call_rcu_completion *completion;
818
819 completion = caa_container_of(ref, struct call_rcu_completion, ref);
820 free(completion);
821}
822
b7f721d9
MD
823static
824void _rcu_barrier_complete(struct rcu_head *head)
825{
826 struct call_rcu_completion_work *work;
827 struct call_rcu_completion *completion;
828
829 work = caa_container_of(head, struct call_rcu_completion_work, head);
830 completion = work->completion;
81dd9134
KF
831 if (!uatomic_sub_return(&completion->barrier_count, 1))
832 call_rcu_completion_wake_up(completion);
833 urcu_ref_put(&completion->ref, free_completion);
b7f721d9
MD
834 free(work);
835}
836
837/*
838 * Wait for all in-flight call_rcu callbacks to complete execution.
839 */
840void rcu_barrier(void)
841{
842 struct call_rcu_data *crdp;
81dd9134 843 struct call_rcu_completion *completion;
63f0fc6d 844 int count = 0;
b7f721d9
MD
845 int was_online;
846
847 /* Put in offline state in QSBR. */
ffb4aaa7 848 was_online = _rcu_read_ongoing();
b7f721d9
MD
849 if (was_online)
850 rcu_thread_offline();
851 /*
852 * Calling a rcu_barrier() within a RCU read-side critical
853 * section is an error.
854 */
ffb4aaa7 855 if (_rcu_read_ongoing()) {
b7f721d9
MD
856 static int warned = 0;
857
858 if (!warned) {
859 fprintf(stderr, "[error] liburcu: rcu_barrier() called from within RCU read-side critical section.\n");
860 }
861 warned = 1;
862 goto online;
863 }
864
81dd9134
KF
865 completion = calloc(sizeof(*completion), 1);
866 if (!completion)
867 urcu_die(errno);
868
b7f721d9
MD
869 call_rcu_lock(&call_rcu_mutex);
870 cds_list_for_each_entry(crdp, &call_rcu_data_list, list)
871 count++;
872
81dd9134
KF
873 /* Referenced by rcu_barrier() and each call_rcu thread. */
874 urcu_ref_set(&completion->ref, count + 1);
875 completion->barrier_count = count;
b7f721d9
MD
876
877 cds_list_for_each_entry(crdp, &call_rcu_data_list, list) {
878 struct call_rcu_completion_work *work;
879
880 work = calloc(sizeof(*work), 1);
63f0fc6d
MD
881 if (!work)
882 urcu_die(errno);
81dd9134 883 work->completion = completion;
b7f721d9 884 _call_rcu(&work->head, _rcu_barrier_complete, crdp);
b7f721d9
MD
885 }
886 call_rcu_unlock(&call_rcu_mutex);
887
b7f721d9
MD
888 /* Wait for them */
889 for (;;) {
81dd9134 890 uatomic_dec(&completion->futex);
b7f721d9
MD
891 /* Decrement futex before reading barrier_count */
892 cmm_smp_mb();
81dd9134 893 if (!uatomic_read(&completion->barrier_count))
b7f721d9 894 break;
81dd9134 895 call_rcu_completion_wait(completion);
b7f721d9 896 }
81dd9134
KF
897
898 urcu_ref_put(&completion->ref, free_completion);
899
b7f721d9
MD
900online:
901 if (was_online)
902 rcu_thread_online();
903}
904
81ad2e19
PM
905/*
906 * Acquire the call_rcu_mutex in order to ensure that the child sees
e85451a1
MD
907 * all of the call_rcu() data structures in a consistent state. Ensure
908 * that all call_rcu threads are in a quiescent state across fork.
81ad2e19
PM
909 * Suitable for pthread_atfork() and friends.
910 */
911void call_rcu_before_fork(void)
912{
e85451a1 913 struct call_rcu_data *crdp;
d0ec0ed2 914 struct urcu_atfork *atfork;
e85451a1 915
81ad2e19 916 call_rcu_lock(&call_rcu_mutex);
e85451a1 917
d0ec0ed2
MD
918 atfork = registered_rculfhash_atfork;
919 if (atfork)
920 atfork->before_fork(atfork->priv);
921
e85451a1
MD
922 cds_list_for_each_entry(crdp, &call_rcu_data_list, list) {
923 uatomic_or(&crdp->flags, URCU_CALL_RCU_PAUSE);
924 cmm_smp_mb__after_uatomic_or();
925 wake_call_rcu_thread(crdp);
926 }
927 cds_list_for_each_entry(crdp, &call_rcu_data_list, list) {
928 while ((uatomic_read(&crdp->flags) & URCU_CALL_RCU_PAUSED) == 0)
109105b7 929 (void) poll(NULL, 0, 1);
e85451a1 930 }
81ad2e19
PM
931}
932
933/*
934 * Clean up call_rcu data structures in the parent of a successful fork()
935 * that is not followed by exec() in the child. Suitable for
936 * pthread_atfork() and friends.
937 */
938void call_rcu_after_fork_parent(void)
939{
e85451a1 940 struct call_rcu_data *crdp;
d0ec0ed2 941 struct urcu_atfork *atfork;
e85451a1
MD
942
943 cds_list_for_each_entry(crdp, &call_rcu_data_list, list)
944 uatomic_and(&crdp->flags, ~URCU_CALL_RCU_PAUSE);
fc236e5e
KF
945 cds_list_for_each_entry(crdp, &call_rcu_data_list, list) {
946 while ((uatomic_read(&crdp->flags) & URCU_CALL_RCU_PAUSED) != 0)
109105b7 947 (void) poll(NULL, 0, 1);
fc236e5e 948 }
d0ec0ed2
MD
949 atfork = registered_rculfhash_atfork;
950 if (atfork)
951 atfork->after_fork_parent(atfork->priv);
81ad2e19
PM
952 call_rcu_unlock(&call_rcu_mutex);
953}
954
7106ddf8
PM
955/*
956 * Clean up call_rcu data structures in the child of a successful fork()
81ad2e19
PM
957 * that is not followed by exec(). Suitable for pthread_atfork() and
958 * friends.
7106ddf8
PM
959 */
960void call_rcu_after_fork_child(void)
961{
077ff173 962 struct call_rcu_data *crdp, *next;
d0ec0ed2 963 struct urcu_atfork *atfork;
7106ddf8 964
81ad2e19
PM
965 /* Release the mutex. */
966 call_rcu_unlock(&call_rcu_mutex);
967
d0ec0ed2
MD
968 atfork = registered_rculfhash_atfork;
969 if (atfork)
970 atfork->after_fork_child(atfork->priv);
971
ad1b9909
LJ
972 /* Do nothing when call_rcu() has not been used */
973 if (cds_list_empty(&call_rcu_data_list))
974 return;
975
7106ddf8
PM
976 /*
977 * Allocate a new default call_rcu_data structure in order
978 * to get a working call_rcu thread to go with it.
979 */
980 default_call_rcu_data = NULL;
981 (void)get_default_call_rcu_data();
982
60af049d
LJ
983 /* Cleanup call_rcu_data pointers before use */
984 maxcpus_reset();
985 free(per_cpu_call_rcu_data);
618b2595 986 rcu_set_pointer(&per_cpu_call_rcu_data, NULL);
bd252a04 987 URCU_TLS(thread_call_rcu_data) = NULL;
60af049d 988
e85451a1
MD
989 /*
990 * Dispose of all of the rest of the call_rcu_data structures.
991 * Leftover call_rcu callbacks will be merged into the new
992 * default call_rcu thread queue.
993 */
077ff173 994 cds_list_for_each_entry_safe(crdp, next, &call_rcu_data_list, list) {
7106ddf8 995 if (crdp == default_call_rcu_data)
077ff173 996 continue;
2870aa1e 997 uatomic_set(&crdp->flags, URCU_CALL_RCU_STOPPED);
7106ddf8 998 call_rcu_data_free(crdp);
b57aee66
PM
999 }
1000}
d0ec0ed2
MD
1001
1002void urcu_register_rculfhash_atfork(struct urcu_atfork *atfork)
1003{
1004 call_rcu_lock(&call_rcu_mutex);
1005 if (registered_rculfhash_atfork_refcount++)
1006 goto end;
1007 registered_rculfhash_atfork = atfork;
1008end:
1009 call_rcu_unlock(&call_rcu_mutex);
1010}
1011
70469b43 1012void urcu_unregister_rculfhash_atfork(struct urcu_atfork *atfork __attribute__((unused)))
d0ec0ed2
MD
1013{
1014 call_rcu_lock(&call_rcu_mutex);
1015 if (--registered_rculfhash_atfork_refcount)
1016 goto end;
1017 registered_rculfhash_atfork = NULL;
1018end:
1019 call_rcu_unlock(&call_rcu_mutex);
1020}
This page took 0.087972 seconds and 4 git commands to generate.