Allow forcing the use of sys membarrier
[urcu.git] / src / urcu-call-rcu-impl.h
CommitLineData
b57aee66
PM
1/*
2 * urcu-call-rcu.c
3 *
4 * Userspace RCU library - batch memory reclamation with kernel API
5 *
6 * Copyright (c) 2010 Paul E. McKenney <paulmck@linux.vnet.ibm.com>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
5161f31e 23#define _LGPL_SOURCE
b57aee66
PM
24#include <stdio.h>
25#include <pthread.h>
26#include <signal.h>
27#include <assert.h>
28#include <stdlib.h>
6d841bc2 29#include <stdint.h>
b57aee66
PM
30#include <string.h>
31#include <errno.h>
32#include <poll.h>
33#include <sys/time.h>
b57aee66 34#include <unistd.h>
c1d2c60b 35#include <sched.h>
b57aee66 36
a47dd11c 37#include "compat-getcpu.h"
5161f31e 38#include "urcu/wfcqueue.h"
b57aee66
PM
39#include "urcu-call-rcu.h"
40#include "urcu-pointer.h"
3c24913f 41#include "urcu/list.h"
41849996 42#include "urcu/futex.h"
bd252a04 43#include "urcu/tls-compat.h"
81dd9134 44#include "urcu/ref.h"
4a6d7378 45#include "urcu-die.h"
b57aee66 46
8e3690db
MD
47#define SET_AFFINITY_CHECK_PERIOD (1U << 8) /* 256 */
48#define SET_AFFINITY_CHECK_PERIOD_MASK (SET_AFFINITY_CHECK_PERIOD - 1)
49
b57aee66
PM
50/* Data structure that identifies a call_rcu thread. */
51
52struct call_rcu_data {
5161f31e 53 /*
0b8ab7df
MD
54 * We do not align head on a different cache-line than tail
55 * mainly because call_rcu callback-invocation threads use
56 * batching ("splice") to get an entire list of callbacks, which
57 * effectively empties the queue, and requires to touch the tail
58 * anyway.
5161f31e 59 */
b9f893b6 60 struct cds_wfcq_tail cbs_tail;
0b8ab7df 61 struct cds_wfcq_head cbs_head;
b57aee66 62 unsigned long flags;
6d841bc2 63 int32_t futex;
73987721 64 unsigned long qlen; /* maintained for debugging. */
b57aee66 65 pthread_t tid;
c1d2c60b 66 int cpu_affinity;
8e3690db 67 unsigned long gp_count;
3c24913f 68 struct cds_list_head list;
b57aee66
PM
69} __attribute__((aligned(CAA_CACHE_LINE_SIZE)));
70
b7f721d9
MD
71struct call_rcu_completion {
72 int barrier_count;
73 int32_t futex;
81dd9134 74 struct urcu_ref ref;
b7f721d9
MD
75};
76
77struct call_rcu_completion_work {
78 struct rcu_head head;
79 struct call_rcu_completion *completion;
80};
81
3c24913f
PM
82/*
83 * List of all call_rcu_data structures to keep valgrind happy.
84 * Protected by call_rcu_mutex.
85 */
86
bab44e28 87static CDS_LIST_HEAD(call_rcu_data_list);
3c24913f 88
b57aee66
PM
89/* Link a thread using call_rcu() to its call_rcu thread. */
90
bd252a04 91static DEFINE_URCU_TLS(struct call_rcu_data *, thread_call_rcu_data);
b57aee66 92
e85451a1
MD
93/*
94 * Guard call_rcu thread creation and atfork handlers.
95 */
b57aee66
PM
96static pthread_mutex_t call_rcu_mutex = PTHREAD_MUTEX_INITIALIZER;
97
98/* If a given thread does not have its own call_rcu thread, this is default. */
99
100static struct call_rcu_data *default_call_rcu_data;
101
b57aee66
PM
102/*
103 * If the sched_getcpu() and sysconf(_SC_NPROCESSORS_CONF) calls are
104 * available, then we can have call_rcu threads assigned to individual
105 * CPUs rather than only to specific threads.
106 */
107
a47dd11c 108#if defined(HAVE_SYSCONF) && (defined(HAVE_SCHED_GETCPU) || defined(HAVE_GETCPUID))
b57aee66
PM
109
110/*
111 * Pointer to array of pointers to per-CPU call_rcu_data structures
618b2595
MD
112 * and # CPUs. per_cpu_call_rcu_data is a RCU-protected pointer to an
113 * array of RCU-protected pointers to call_rcu_data. call_rcu acts as a
114 * RCU read-side and reads per_cpu_call_rcu_data and the per-cpu pointer
115 * without mutex. The call_rcu_mutex protects updates.
b57aee66
PM
116 */
117
118static struct call_rcu_data **per_cpu_call_rcu_data;
119static long maxcpus;
120
60af049d
LJ
121static void maxcpus_reset(void)
122{
123 maxcpus = 0;
124}
125
b57aee66
PM
126/* Allocate the array if it has not already been allocated. */
127
128static void alloc_cpu_call_rcu_data(void)
129{
130 struct call_rcu_data **p;
131 static int warned = 0;
132
133 if (maxcpus != 0)
134 return;
135 maxcpus = sysconf(_SC_NPROCESSORS_CONF);
136 if (maxcpus <= 0) {
137 return;
138 }
139 p = malloc(maxcpus * sizeof(*per_cpu_call_rcu_data));
140 if (p != NULL) {
141 memset(p, '\0', maxcpus * sizeof(*per_cpu_call_rcu_data));
618b2595 142 rcu_set_pointer(&per_cpu_call_rcu_data, p);
b57aee66
PM
143 } else {
144 if (!warned) {
145 fprintf(stderr, "[error] liburcu: unable to allocate per-CPU pointer array\n");
146 }
147 warned = 1;
148 }
149}
150
63b495d8 151#else /* #if defined(HAVE_SYSCONF) && defined(HAVE_SCHED_GETCPU) */
b57aee66 152
f9437098
MD
153/*
154 * per_cpu_call_rcu_data should be constant, but some functions below, used both
155 * for cases where cpu number is available and not available, assume it it not
156 * constant.
157 */
158static struct call_rcu_data **per_cpu_call_rcu_data = NULL;
b57aee66
PM
159static const long maxcpus = -1;
160
60af049d
LJ
161static void maxcpus_reset(void)
162{
163}
164
b57aee66
PM
165static void alloc_cpu_call_rcu_data(void)
166{
167}
168
63b495d8 169#endif /* #else #if defined(HAVE_SYSCONF) && defined(HAVE_SCHED_GETCPU) */
b57aee66
PM
170
171/* Acquire the specified pthread mutex. */
172
173static void call_rcu_lock(pthread_mutex_t *pmp)
174{
4a6d7378
MD
175 int ret;
176
177 ret = pthread_mutex_lock(pmp);
178 if (ret)
179 urcu_die(ret);
b57aee66
PM
180}
181
182/* Release the specified pthread mutex. */
183
184static void call_rcu_unlock(pthread_mutex_t *pmp)
185{
4a6d7378
MD
186 int ret;
187
188 ret = pthread_mutex_unlock(pmp);
189 if (ret)
190 urcu_die(ret);
b57aee66
PM
191}
192
8e3690db
MD
193/*
194 * Periodically retry setting CPU affinity if we migrate.
195 * Losing affinity can be caused by CPU hotunplug/hotplug, or by
196 * cpuset(7).
197 */
c1d2c60b
MD
198#if HAVE_SCHED_SETAFFINITY
199static
200int set_thread_cpu_affinity(struct call_rcu_data *crdp)
201{
202 cpu_set_t mask;
8e3690db 203 int ret;
c1d2c60b
MD
204
205 if (crdp->cpu_affinity < 0)
206 return 0;
8e3690db
MD
207 if (++crdp->gp_count & SET_AFFINITY_CHECK_PERIOD_MASK)
208 return 0;
209 if (urcu_sched_getcpu() == crdp->cpu_affinity)
210 return 0;
c1d2c60b
MD
211
212 CPU_ZERO(&mask);
213 CPU_SET(crdp->cpu_affinity, &mask);
214#if SCHED_SETAFFINITY_ARGS == 2
8e3690db 215 ret = sched_setaffinity(0, &mask);
c1d2c60b 216#else
8e3690db 217 ret = sched_setaffinity(0, sizeof(mask), &mask);
c1d2c60b 218#endif
8e3690db
MD
219 /*
220 * EINVAL is fine: can be caused by hotunplugged CPUs, or by
221 * cpuset(7). This is why we should always retry if we detect
222 * migration.
223 */
224 if (ret && errno == EINVAL) {
225 ret = 0;
226 errno = 0;
227 }
228 return ret;
c1d2c60b
MD
229}
230#else
231static
232int set_thread_cpu_affinity(struct call_rcu_data *crdp)
233{
234 return 0;
235}
236#endif
237
03fe58b3
MD
238static void call_rcu_wait(struct call_rcu_data *crdp)
239{
240 /* Read call_rcu list before read futex */
241 cmm_smp_mb();
b0a841b4
MD
242 if (uatomic_read(&crdp->futex) != -1)
243 return;
244 while (futex_async(&crdp->futex, FUTEX_WAIT, -1,
245 NULL, NULL, 0)) {
246 switch (errno) {
247 case EWOULDBLOCK:
248 /* Value already changed. */
249 return;
250 case EINTR:
251 /* Retry if interrupted by signal. */
252 break; /* Get out of switch. */
253 default:
254 /* Unexpected error. */
255 urcu_die(errno);
256 }
257 }
03fe58b3
MD
258}
259
260static void call_rcu_wake_up(struct call_rcu_data *crdp)
261{
262 /* Write to call_rcu list before reading/writing futex */
263 cmm_smp_mb();
a0b7f7ea 264 if (caa_unlikely(uatomic_read(&crdp->futex) == -1)) {
03fe58b3 265 uatomic_set(&crdp->futex, 0);
b0a841b4
MD
266 if (futex_async(&crdp->futex, FUTEX_WAKE, 1,
267 NULL, NULL, 0) < 0)
268 urcu_die(errno);
03fe58b3
MD
269 }
270}
271
b7f721d9
MD
272static void call_rcu_completion_wait(struct call_rcu_completion *completion)
273{
274 /* Read completion barrier count before read futex */
275 cmm_smp_mb();
b0a841b4
MD
276 if (uatomic_read(&completion->futex) != -1)
277 return;
278 while (futex_async(&completion->futex, FUTEX_WAIT, -1,
279 NULL, NULL, 0)) {
280 switch (errno) {
281 case EWOULDBLOCK:
282 /* Value already changed. */
283 return;
284 case EINTR:
285 /* Retry if interrupted by signal. */
286 break; /* Get out of switch. */
287 default:
288 /* Unexpected error. */
289 urcu_die(errno);
290 }
291 }
b7f721d9
MD
292}
293
294static void call_rcu_completion_wake_up(struct call_rcu_completion *completion)
295{
296 /* Write to completion barrier count before reading/writing futex */
297 cmm_smp_mb();
298 if (caa_unlikely(uatomic_read(&completion->futex) == -1)) {
299 uatomic_set(&completion->futex, 0);
b0a841b4
MD
300 if (futex_async(&completion->futex, FUTEX_WAKE, 1,
301 NULL, NULL, 0) < 0)
302 urcu_die(errno);
b7f721d9
MD
303 }
304}
305
b57aee66
PM
306/* This is the code run by each call_rcu thread. */
307
308static void *call_rcu_thread(void *arg)
309{
310 unsigned long cbcount;
5161f31e 311 struct call_rcu_data *crdp = (struct call_rcu_data *) arg;
2870aa1e 312 int rt = !!(uatomic_read(&crdp->flags) & URCU_CALL_RCU_RT);
b57aee66 313
8e3690db 314 if (set_thread_cpu_affinity(crdp))
4a6d7378 315 urcu_die(errno);
c1d2c60b 316
765f3ead
MD
317 /*
318 * If callbacks take a read-side lock, we need to be registered.
319 */
320 rcu_register_thread();
321
bd252a04 322 URCU_TLS(thread_call_rcu_data) = crdp;
bc94ca9b
MD
323 if (!rt) {
324 uatomic_dec(&crdp->futex);
325 /* Decrement futex before reading call_rcu list */
326 cmm_smp_mb();
327 }
b57aee66 328 for (;;) {
5161f31e
MD
329 struct cds_wfcq_head cbs_tmp_head;
330 struct cds_wfcq_tail cbs_tmp_tail;
331 struct cds_wfcq_node *cbs, *cbs_tmp_n;
ae25b7e2 332 enum cds_wfcq_ret splice_ret;
5161f31e 333
8e3690db
MD
334 if (set_thread_cpu_affinity(crdp))
335 urcu_die(errno);
336
e85451a1
MD
337 if (uatomic_read(&crdp->flags) & URCU_CALL_RCU_PAUSE) {
338 /*
339 * Pause requested. Become quiescent: remove
340 * ourself from all global lists, and don't
341 * process any callback. The callback lists may
342 * still be non-empty though.
343 */
344 rcu_unregister_thread();
345 cmm_smp_mb__before_uatomic_or();
346 uatomic_or(&crdp->flags, URCU_CALL_RCU_PAUSED);
347 while ((uatomic_read(&crdp->flags) & URCU_CALL_RCU_PAUSE) != 0)
109105b7 348 (void) poll(NULL, 0, 1);
fc236e5e
KF
349 uatomic_and(&crdp->flags, ~URCU_CALL_RCU_PAUSED);
350 cmm_smp_mb__after_uatomic_and();
e85451a1
MD
351 rcu_register_thread();
352 }
353
5161f31e 354 cds_wfcq_init(&cbs_tmp_head, &cbs_tmp_tail);
ae25b7e2
MD
355 splice_ret = __cds_wfcq_splice_blocking(&cbs_tmp_head,
356 &cbs_tmp_tail, &crdp->cbs_head, &crdp->cbs_tail);
357 assert(splice_ret != CDS_WFCQ_RET_WOULDBLOCK);
358 assert(splice_ret != CDS_WFCQ_RET_DEST_NON_EMPTY);
359 if (splice_ret != CDS_WFCQ_RET_SRC_EMPTY) {
b57aee66
PM
360 synchronize_rcu();
361 cbcount = 0;
5161f31e
MD
362 __cds_wfcq_for_each_blocking_safe(&cbs_tmp_head,
363 &cbs_tmp_tail, cbs, cbs_tmp_n) {
364 struct rcu_head *rhp;
365
366 rhp = caa_container_of(cbs,
367 struct rcu_head, next);
b57aee66
PM
368 rhp->func(rhp);
369 cbcount++;
5161f31e 370 }
b57aee66
PM
371 uatomic_sub(&crdp->qlen, cbcount);
372 }
bc94ca9b
MD
373 if (uatomic_read(&crdp->flags) & URCU_CALL_RCU_STOP)
374 break;
765f3ead 375 rcu_thread_offline();
bc94ca9b 376 if (!rt) {
5161f31e
MD
377 if (cds_wfcq_empty(&crdp->cbs_head,
378 &crdp->cbs_tail)) {
bc94ca9b 379 call_rcu_wait(crdp);
109105b7 380 (void) poll(NULL, 0, 10);
bc94ca9b 381 uatomic_dec(&crdp->futex);
c768e45e 382 /*
bc94ca9b
MD
383 * Decrement futex before reading
384 * call_rcu list.
c768e45e
MD
385 */
386 cmm_smp_mb();
ccbac24d 387 } else {
109105b7 388 (void) poll(NULL, 0, 10);
c768e45e 389 }
bc94ca9b 390 } else {
109105b7 391 (void) poll(NULL, 0, 10);
b57aee66 392 }
765f3ead 393 rcu_thread_online();
bc94ca9b
MD
394 }
395 if (!rt) {
396 /*
397 * Read call_rcu list before write futex.
398 */
399 cmm_smp_mb();
400 uatomic_set(&crdp->futex, 0);
b57aee66 401 }
2870aa1e 402 uatomic_or(&crdp->flags, URCU_CALL_RCU_STOPPED);
765f3ead 403 rcu_unregister_thread();
7106ddf8 404 return NULL;
b57aee66
PM
405}
406
407/*
408 * Create both a call_rcu thread and the corresponding call_rcu_data
3c24913f
PM
409 * structure, linking the structure in as specified. Caller must hold
410 * call_rcu_mutex.
b57aee66
PM
411 */
412
3c24913f 413static void call_rcu_data_init(struct call_rcu_data **crdpp,
c1d2c60b
MD
414 unsigned long flags,
415 int cpu_affinity)
b57aee66
PM
416{
417 struct call_rcu_data *crdp;
4a6d7378 418 int ret;
b57aee66
PM
419
420 crdp = malloc(sizeof(*crdp));
4a6d7378
MD
421 if (crdp == NULL)
422 urcu_die(errno);
b57aee66 423 memset(crdp, '\0', sizeof(*crdp));
5161f31e 424 cds_wfcq_init(&crdp->cbs_head, &crdp->cbs_tail);
b57aee66 425 crdp->qlen = 0;
263e3cf9
MD
426 crdp->futex = 0;
427 crdp->flags = flags;
3c24913f 428 cds_list_add(&crdp->list, &call_rcu_data_list);
c1d2c60b 429 crdp->cpu_affinity = cpu_affinity;
8e3690db 430 crdp->gp_count = 0;
b57aee66
PM
431 cmm_smp_mb(); /* Structure initialized before pointer is planted. */
432 *crdpp = crdp;
4a6d7378
MD
433 ret = pthread_create(&crdp->tid, NULL, call_rcu_thread, crdp);
434 if (ret)
435 urcu_die(ret);
b57aee66
PM
436}
437
438/*
439 * Return a pointer to the call_rcu_data structure for the specified
440 * CPU, returning NULL if there is none. We cannot automatically
441 * created it because the platform we are running on might not define
63b495d8 442 * urcu_sched_getcpu().
618b2595
MD
443 *
444 * The call to this function and use of the returned call_rcu_data
445 * should be protected by RCU read-side lock.
b57aee66
PM
446 */
447
448struct call_rcu_data *get_cpu_call_rcu_data(int cpu)
449{
450 static int warned = 0;
618b2595 451 struct call_rcu_data **pcpu_crdp;
b57aee66 452
618b2595
MD
453 pcpu_crdp = rcu_dereference(per_cpu_call_rcu_data);
454 if (pcpu_crdp == NULL)
b57aee66
PM
455 return NULL;
456 if (!warned && maxcpus > 0 && (cpu < 0 || maxcpus <= cpu)) {
457 fprintf(stderr, "[error] liburcu: get CPU # out of range\n");
458 warned = 1;
459 }
460 if (cpu < 0 || maxcpus <= cpu)
461 return NULL;
618b2595 462 return rcu_dereference(pcpu_crdp[cpu]);
b57aee66
PM
463}
464
465/*
466 * Return the tid corresponding to the call_rcu thread whose
467 * call_rcu_data structure is specified.
468 */
469
470pthread_t get_call_rcu_thread(struct call_rcu_data *crdp)
471{
472 return crdp->tid;
473}
474
475/*
476 * Create a call_rcu_data structure (with thread) and return a pointer.
477 */
478
c1d2c60b
MD
479static struct call_rcu_data *__create_call_rcu_data(unsigned long flags,
480 int cpu_affinity)
b57aee66
PM
481{
482 struct call_rcu_data *crdp;
483
c1d2c60b 484 call_rcu_data_init(&crdp, flags, cpu_affinity);
b57aee66
PM
485 return crdp;
486}
487
c1d2c60b
MD
488struct call_rcu_data *create_call_rcu_data(unsigned long flags,
489 int cpu_affinity)
3c24913f
PM
490{
491 struct call_rcu_data *crdp;
492
493 call_rcu_lock(&call_rcu_mutex);
c1d2c60b 494 crdp = __create_call_rcu_data(flags, cpu_affinity);
3c24913f
PM
495 call_rcu_unlock(&call_rcu_mutex);
496 return crdp;
497}
498
b57aee66
PM
499/*
500 * Set the specified CPU to use the specified call_rcu_data structure.
7106ddf8
PM
501 *
502 * Use NULL to remove a CPU's call_rcu_data structure, but it is
503 * the caller's responsibility to dispose of the removed structure.
504 * Use get_cpu_call_rcu_data() to obtain a pointer to the old structure
505 * (prior to NULLing it out, of course).
f9da0936
MD
506 *
507 * The caller must wait for a grace-period to pass between return from
508 * set_cpu_call_rcu_data() and call to call_rcu_data_free() passing the
509 * previous call rcu data as argument.
b57aee66
PM
510 */
511
512int set_cpu_call_rcu_data(int cpu, struct call_rcu_data *crdp)
513{
dcfc8165 514 static int warned = 0;
b57aee66
PM
515
516 call_rcu_lock(&call_rcu_mutex);
f3776786 517 alloc_cpu_call_rcu_data();
b57aee66
PM
518 if (cpu < 0 || maxcpus <= cpu) {
519 if (!warned) {
520 fprintf(stderr, "[error] liburcu: set CPU # out of range\n");
521 warned = 1;
522 }
523 call_rcu_unlock(&call_rcu_mutex);
524 errno = EINVAL;
525 return -EINVAL;
526 }
53a55535 527
b57aee66 528 if (per_cpu_call_rcu_data == NULL) {
3670fef2 529 call_rcu_unlock(&call_rcu_mutex);
b57aee66
PM
530 errno = ENOMEM;
531 return -ENOMEM;
532 }
53a55535
LJ
533
534 if (per_cpu_call_rcu_data[cpu] != NULL && crdp != NULL) {
535 call_rcu_unlock(&call_rcu_mutex);
536 errno = EEXIST;
537 return -EEXIST;
538 }
539
618b2595 540 rcu_set_pointer(&per_cpu_call_rcu_data[cpu], crdp);
3670fef2 541 call_rcu_unlock(&call_rcu_mutex);
b57aee66
PM
542 return 0;
543}
544
545/*
546 * Return a pointer to the default call_rcu_data structure, creating
547 * one if need be. Because we never free call_rcu_data structures,
548 * we don't need to be in an RCU read-side critical section.
549 */
550
551struct call_rcu_data *get_default_call_rcu_data(void)
552{
553 if (default_call_rcu_data != NULL)
554 return rcu_dereference(default_call_rcu_data);
555 call_rcu_lock(&call_rcu_mutex);
556 if (default_call_rcu_data != NULL) {
557 call_rcu_unlock(&call_rcu_mutex);
558 return default_call_rcu_data;
559 }
c1d2c60b 560 call_rcu_data_init(&default_call_rcu_data, 0, -1);
b57aee66
PM
561 call_rcu_unlock(&call_rcu_mutex);
562 return default_call_rcu_data;
563}
564
565/*
566 * Return the call_rcu_data structure that applies to the currently
567 * running thread. Any call_rcu_data structure assigned specifically
568 * to this thread has first priority, followed by any call_rcu_data
569 * structure assigned to the CPU on which the thread is running,
570 * followed by the default call_rcu_data structure. If there is not
571 * yet a default call_rcu_data structure, one will be created.
618b2595
MD
572 *
573 * Calls to this function and use of the returned call_rcu_data should
574 * be protected by RCU read-side lock.
b57aee66
PM
575 */
576struct call_rcu_data *get_call_rcu_data(void)
577{
9744f3bb 578 struct call_rcu_data *crd;
b57aee66 579
bd252a04
MD
580 if (URCU_TLS(thread_call_rcu_data) != NULL)
581 return URCU_TLS(thread_call_rcu_data);
9744f3bb
LJ
582
583 if (maxcpus > 0) {
63b495d8 584 crd = get_cpu_call_rcu_data(urcu_sched_getcpu());
9744f3bb
LJ
585 if (crd)
586 return crd;
b57aee66 587 }
9744f3bb 588
b57aee66
PM
589 return get_default_call_rcu_data();
590}
591
592/*
593 * Return a pointer to this task's call_rcu_data if there is one.
594 */
595
596struct call_rcu_data *get_thread_call_rcu_data(void)
597{
bd252a04 598 return URCU_TLS(thread_call_rcu_data);
b57aee66
PM
599}
600
601/*
602 * Set this task's call_rcu_data structure as specified, regardless
603 * of whether or not this task already had one. (This allows switching
604 * to and from real-time call_rcu threads, for example.)
7106ddf8
PM
605 *
606 * Use NULL to remove a thread's call_rcu_data structure, but it is
607 * the caller's responsibility to dispose of the removed structure.
608 * Use get_thread_call_rcu_data() to obtain a pointer to the old structure
609 * (prior to NULLing it out, of course).
b57aee66
PM
610 */
611
612void set_thread_call_rcu_data(struct call_rcu_data *crdp)
613{
bd252a04 614 URCU_TLS(thread_call_rcu_data) = crdp;
b57aee66
PM
615}
616
617/*
618 * Create a separate call_rcu thread for each CPU. This does not
619 * replace a pre-existing call_rcu thread -- use the set_cpu_call_rcu_data()
0938c541
MD
620 * function if you want that behavior. Should be paired with
621 * free_all_cpu_call_rcu_data() to teardown these call_rcu worker
622 * threads.
b57aee66
PM
623 */
624
625int create_all_cpu_call_rcu_data(unsigned long flags)
626{
627 int i;
628 struct call_rcu_data *crdp;
629 int ret;
630
631 call_rcu_lock(&call_rcu_mutex);
632 alloc_cpu_call_rcu_data();
633 call_rcu_unlock(&call_rcu_mutex);
634 if (maxcpus <= 0) {
635 errno = EINVAL;
636 return -EINVAL;
637 }
638 if (per_cpu_call_rcu_data == NULL) {
639 errno = ENOMEM;
640 return -ENOMEM;
641 }
642 for (i = 0; i < maxcpus; i++) {
643 call_rcu_lock(&call_rcu_mutex);
644 if (get_cpu_call_rcu_data(i)) {
645 call_rcu_unlock(&call_rcu_mutex);
646 continue;
647 }
c1d2c60b 648 crdp = __create_call_rcu_data(flags, i);
b57aee66
PM
649 if (crdp == NULL) {
650 call_rcu_unlock(&call_rcu_mutex);
651 errno = ENOMEM;
652 return -ENOMEM;
653 }
654 call_rcu_unlock(&call_rcu_mutex);
655 if ((ret = set_cpu_call_rcu_data(i, crdp)) != 0) {
356c8794
LJ
656 call_rcu_data_free(crdp);
657
658 /* it has been created by other thread */
659 if (ret == -EEXIST)
660 continue;
661
662 return ret;
b57aee66
PM
663 }
664 }
665 return 0;
666}
667
7106ddf8
PM
668/*
669 * Wake up the call_rcu thread corresponding to the specified
670 * call_rcu_data structure.
671 */
672static void wake_call_rcu_thread(struct call_rcu_data *crdp)
673{
263e3cf9
MD
674 if (!(_CMM_LOAD_SHARED(crdp->flags) & URCU_CALL_RCU_RT))
675 call_rcu_wake_up(crdp);
7106ddf8
PM
676}
677
b7f721d9
MD
678static void _call_rcu(struct rcu_head *head,
679 void (*func)(struct rcu_head *head),
680 struct call_rcu_data *crdp)
681{
682 cds_wfcq_node_init(&head->next);
683 head->func = func;
684 cds_wfcq_enqueue(&crdp->cbs_head, &crdp->cbs_tail, &head->next);
685 uatomic_inc(&crdp->qlen);
686 wake_call_rcu_thread(crdp);
687}
688
b57aee66
PM
689/*
690 * Schedule a function to be invoked after a following grace period.
691 * This is the only function that must be called -- the others are
692 * only present to allow applications to tune their use of RCU for
693 * maximum performance.
694 *
695 * Note that unless a call_rcu thread has not already been created,
696 * the first invocation of call_rcu() will create one. So, if you
697 * need the first invocation of call_rcu() to be fast, make sure
698 * to create a call_rcu thread first. One way to accomplish this is
699 * "get_call_rcu_data();", and another is create_all_cpu_call_rcu_data().
618b2595
MD
700 *
701 * call_rcu must be called by registered RCU read-side threads.
b57aee66 702 */
b57aee66
PM
703void call_rcu(struct rcu_head *head,
704 void (*func)(struct rcu_head *head))
705{
706 struct call_rcu_data *crdp;
707
618b2595 708 /* Holding rcu read-side lock across use of per-cpu crdp */
ffb4aaa7 709 _rcu_read_lock();
b57aee66 710 crdp = get_call_rcu_data();
b7f721d9 711 _call_rcu(head, func, crdp);
ffb4aaa7 712 _rcu_read_unlock();
7106ddf8
PM
713}
714
715/*
716 * Free up the specified call_rcu_data structure, terminating the
717 * associated call_rcu thread. The caller must have previously
718 * removed the call_rcu_data structure from per-thread or per-CPU
719 * usage. For example, set_cpu_call_rcu_data(cpu, NULL) for per-CPU
720 * call_rcu_data structures or set_thread_call_rcu_data(NULL) for
721 * per-thread call_rcu_data structures.
722 *
723 * We silently refuse to free up the default call_rcu_data structure
724 * because that is where we put any leftover callbacks. Note that
725 * the possibility of self-spawning callbacks makes it impossible
726 * to execute all the callbacks in finite time without putting any
727 * newly spawned callbacks somewhere else. The "somewhere else" of
728 * last resort is the default call_rcu_data structure.
729 *
730 * We also silently refuse to free NULL pointers. This simplifies
731 * the calling code.
f9da0936
MD
732 *
733 * The caller must wait for a grace-period to pass between return from
734 * set_cpu_call_rcu_data() and call to call_rcu_data_free() passing the
735 * previous call rcu data as argument.
03e5118f
MD
736 *
737 * Note: introducing __cds_wfcq_splice_blocking() in this function fixed
738 * a list corruption bug in the 0.7.x series. The equivalent fix
739 * appeared in 0.6.8 for the stable-0.6 branch.
7106ddf8
PM
740 */
741void call_rcu_data_free(struct call_rcu_data *crdp)
742{
7106ddf8
PM
743 if (crdp == NULL || crdp == default_call_rcu_data) {
744 return;
745 }
2870aa1e
PB
746 if ((uatomic_read(&crdp->flags) & URCU_CALL_RCU_STOPPED) == 0) {
747 uatomic_or(&crdp->flags, URCU_CALL_RCU_STOP);
7106ddf8 748 wake_call_rcu_thread(crdp);
2870aa1e 749 while ((uatomic_read(&crdp->flags) & URCU_CALL_RCU_STOPPED) == 0)
109105b7 750 (void) poll(NULL, 0, 1);
7106ddf8 751 }
5161f31e 752 if (!cds_wfcq_empty(&crdp->cbs_head, &crdp->cbs_tail)) {
698d0778
MD
753 /* Create default call rcu data if need be */
754 (void) get_default_call_rcu_data();
5161f31e
MD
755 __cds_wfcq_splice_blocking(&default_call_rcu_data->cbs_head,
756 &default_call_rcu_data->cbs_tail,
757 &crdp->cbs_head, &crdp->cbs_tail);
7106ddf8
PM
758 uatomic_add(&default_call_rcu_data->qlen,
759 uatomic_read(&crdp->qlen));
1e92aa15 760 wake_call_rcu_thread(default_call_rcu_data);
7106ddf8 761 }
59dc9e9d 762
b75dffe6 763 call_rcu_lock(&call_rcu_mutex);
59dc9e9d 764 cds_list_del(&crdp->list);
b75dffe6
LJ
765 call_rcu_unlock(&call_rcu_mutex);
766
59dc9e9d 767 free(crdp);
7106ddf8
PM
768}
769
770/*
771 * Clean up all the per-CPU call_rcu threads.
772 */
773void free_all_cpu_call_rcu_data(void)
774{
775 int cpu;
618b2595
MD
776 struct call_rcu_data **crdp;
777 static int warned = 0;
7106ddf8
PM
778
779 if (maxcpus <= 0)
780 return;
618b2595
MD
781
782 crdp = malloc(sizeof(*crdp) * maxcpus);
783 if (!crdp) {
784 if (!warned) {
785 fprintf(stderr, "[error] liburcu: unable to allocate per-CPU pointer array\n");
786 }
787 warned = 1;
d31150b4 788 return;
618b2595
MD
789 }
790
7106ddf8 791 for (cpu = 0; cpu < maxcpus; cpu++) {
618b2595
MD
792 crdp[cpu] = get_cpu_call_rcu_data(cpu);
793 if (crdp[cpu] == NULL)
7106ddf8
PM
794 continue;
795 set_cpu_call_rcu_data(cpu, NULL);
7106ddf8 796 }
618b2595
MD
797 /*
798 * Wait for call_rcu sites acting as RCU readers of the
799 * call_rcu_data to become quiescent.
800 */
801 synchronize_rcu();
802 for (cpu = 0; cpu < maxcpus; cpu++) {
803 if (crdp[cpu] == NULL)
804 continue;
805 call_rcu_data_free(crdp[cpu]);
806 }
807 free(crdp);
7106ddf8
PM
808}
809
81dd9134
KF
810static
811void free_completion(struct urcu_ref *ref)
812{
813 struct call_rcu_completion *completion;
814
815 completion = caa_container_of(ref, struct call_rcu_completion, ref);
816 free(completion);
817}
818
b7f721d9
MD
819static
820void _rcu_barrier_complete(struct rcu_head *head)
821{
822 struct call_rcu_completion_work *work;
823 struct call_rcu_completion *completion;
824
825 work = caa_container_of(head, struct call_rcu_completion_work, head);
826 completion = work->completion;
81dd9134
KF
827 if (!uatomic_sub_return(&completion->barrier_count, 1))
828 call_rcu_completion_wake_up(completion);
829 urcu_ref_put(&completion->ref, free_completion);
b7f721d9
MD
830 free(work);
831}
832
833/*
834 * Wait for all in-flight call_rcu callbacks to complete execution.
835 */
836void rcu_barrier(void)
837{
838 struct call_rcu_data *crdp;
81dd9134 839 struct call_rcu_completion *completion;
63f0fc6d 840 int count = 0;
b7f721d9
MD
841 int was_online;
842
843 /* Put in offline state in QSBR. */
ffb4aaa7 844 was_online = _rcu_read_ongoing();
b7f721d9
MD
845 if (was_online)
846 rcu_thread_offline();
847 /*
848 * Calling a rcu_barrier() within a RCU read-side critical
849 * section is an error.
850 */
ffb4aaa7 851 if (_rcu_read_ongoing()) {
b7f721d9
MD
852 static int warned = 0;
853
854 if (!warned) {
855 fprintf(stderr, "[error] liburcu: rcu_barrier() called from within RCU read-side critical section.\n");
856 }
857 warned = 1;
858 goto online;
859 }
860
81dd9134
KF
861 completion = calloc(sizeof(*completion), 1);
862 if (!completion)
863 urcu_die(errno);
864
b7f721d9
MD
865 call_rcu_lock(&call_rcu_mutex);
866 cds_list_for_each_entry(crdp, &call_rcu_data_list, list)
867 count++;
868
81dd9134
KF
869 /* Referenced by rcu_barrier() and each call_rcu thread. */
870 urcu_ref_set(&completion->ref, count + 1);
871 completion->barrier_count = count;
b7f721d9
MD
872
873 cds_list_for_each_entry(crdp, &call_rcu_data_list, list) {
874 struct call_rcu_completion_work *work;
875
876 work = calloc(sizeof(*work), 1);
63f0fc6d
MD
877 if (!work)
878 urcu_die(errno);
81dd9134 879 work->completion = completion;
b7f721d9 880 _call_rcu(&work->head, _rcu_barrier_complete, crdp);
b7f721d9
MD
881 }
882 call_rcu_unlock(&call_rcu_mutex);
883
b7f721d9
MD
884 /* Wait for them */
885 for (;;) {
81dd9134 886 uatomic_dec(&completion->futex);
b7f721d9
MD
887 /* Decrement futex before reading barrier_count */
888 cmm_smp_mb();
81dd9134 889 if (!uatomic_read(&completion->barrier_count))
b7f721d9 890 break;
81dd9134 891 call_rcu_completion_wait(completion);
b7f721d9 892 }
81dd9134
KF
893
894 urcu_ref_put(&completion->ref, free_completion);
895
b7f721d9
MD
896online:
897 if (was_online)
898 rcu_thread_online();
899}
900
81ad2e19
PM
901/*
902 * Acquire the call_rcu_mutex in order to ensure that the child sees
e85451a1
MD
903 * all of the call_rcu() data structures in a consistent state. Ensure
904 * that all call_rcu threads are in a quiescent state across fork.
81ad2e19
PM
905 * Suitable for pthread_atfork() and friends.
906 */
907void call_rcu_before_fork(void)
908{
e85451a1
MD
909 struct call_rcu_data *crdp;
910
81ad2e19 911 call_rcu_lock(&call_rcu_mutex);
e85451a1
MD
912
913 cds_list_for_each_entry(crdp, &call_rcu_data_list, list) {
914 uatomic_or(&crdp->flags, URCU_CALL_RCU_PAUSE);
915 cmm_smp_mb__after_uatomic_or();
916 wake_call_rcu_thread(crdp);
917 }
918 cds_list_for_each_entry(crdp, &call_rcu_data_list, list) {
919 while ((uatomic_read(&crdp->flags) & URCU_CALL_RCU_PAUSED) == 0)
109105b7 920 (void) poll(NULL, 0, 1);
e85451a1 921 }
81ad2e19
PM
922}
923
924/*
925 * Clean up call_rcu data structures in the parent of a successful fork()
926 * that is not followed by exec() in the child. Suitable for
927 * pthread_atfork() and friends.
928 */
929void call_rcu_after_fork_parent(void)
930{
e85451a1
MD
931 struct call_rcu_data *crdp;
932
933 cds_list_for_each_entry(crdp, &call_rcu_data_list, list)
934 uatomic_and(&crdp->flags, ~URCU_CALL_RCU_PAUSE);
fc236e5e
KF
935 cds_list_for_each_entry(crdp, &call_rcu_data_list, list) {
936 while ((uatomic_read(&crdp->flags) & URCU_CALL_RCU_PAUSED) != 0)
109105b7 937 (void) poll(NULL, 0, 1);
fc236e5e 938 }
81ad2e19
PM
939 call_rcu_unlock(&call_rcu_mutex);
940}
941
7106ddf8
PM
942/*
943 * Clean up call_rcu data structures in the child of a successful fork()
81ad2e19
PM
944 * that is not followed by exec(). Suitable for pthread_atfork() and
945 * friends.
7106ddf8
PM
946 */
947void call_rcu_after_fork_child(void)
948{
077ff173 949 struct call_rcu_data *crdp, *next;
7106ddf8 950
81ad2e19
PM
951 /* Release the mutex. */
952 call_rcu_unlock(&call_rcu_mutex);
953
ad1b9909
LJ
954 /* Do nothing when call_rcu() has not been used */
955 if (cds_list_empty(&call_rcu_data_list))
956 return;
957
7106ddf8
PM
958 /*
959 * Allocate a new default call_rcu_data structure in order
960 * to get a working call_rcu thread to go with it.
961 */
962 default_call_rcu_data = NULL;
963 (void)get_default_call_rcu_data();
964
60af049d
LJ
965 /* Cleanup call_rcu_data pointers before use */
966 maxcpus_reset();
967 free(per_cpu_call_rcu_data);
618b2595 968 rcu_set_pointer(&per_cpu_call_rcu_data, NULL);
bd252a04 969 URCU_TLS(thread_call_rcu_data) = NULL;
60af049d 970
e85451a1
MD
971 /*
972 * Dispose of all of the rest of the call_rcu_data structures.
973 * Leftover call_rcu callbacks will be merged into the new
974 * default call_rcu thread queue.
975 */
077ff173 976 cds_list_for_each_entry_safe(crdp, next, &call_rcu_data_list, list) {
7106ddf8 977 if (crdp == default_call_rcu_data)
077ff173 978 continue;
2870aa1e 979 uatomic_set(&crdp->flags, URCU_CALL_RCU_STOPPED);
7106ddf8 980 call_rcu_data_free(crdp);
b57aee66
PM
981 }
982}
This page took 0.082252 seconds and 4 git commands to generate.