Return -EEXIST when the old cpu call_rcu_data has not been removed
[urcu.git] / urcu-call-rcu-impl.h
... / ...
CommitLineData
1/*
2 * urcu-call-rcu.c
3 *
4 * Userspace RCU library - batch memory reclamation with kernel API
5 *
6 * Copyright (c) 2010 Paul E. McKenney <paulmck@linux.vnet.ibm.com>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23#define _GNU_SOURCE
24#include <stdio.h>
25#include <pthread.h>
26#include <signal.h>
27#include <assert.h>
28#include <stdlib.h>
29#include <stdint.h>
30#include <string.h>
31#include <errno.h>
32#include <poll.h>
33#include <sys/time.h>
34#include <unistd.h>
35#include <sched.h>
36
37#include "config.h"
38#include "urcu/wfqueue.h"
39#include "urcu-call-rcu.h"
40#include "urcu-pointer.h"
41#include "urcu/list.h"
42#include "urcu/futex.h"
43
44/* Data structure that identifies a call_rcu thread. */
45
46struct call_rcu_data {
47 struct cds_wfq_queue cbs;
48 unsigned long flags;
49 int32_t futex;
50 unsigned long qlen; /* maintained for debugging. */
51 pthread_t tid;
52 int cpu_affinity;
53 struct cds_list_head list;
54} __attribute__((aligned(CAA_CACHE_LINE_SIZE)));
55
56/*
57 * List of all call_rcu_data structures to keep valgrind happy.
58 * Protected by call_rcu_mutex.
59 */
60
61CDS_LIST_HEAD(call_rcu_data_list);
62
63/* Link a thread using call_rcu() to its call_rcu thread. */
64
65static __thread struct call_rcu_data *thread_call_rcu_data;
66
67/* Guard call_rcu thread creation. */
68
69static pthread_mutex_t call_rcu_mutex = PTHREAD_MUTEX_INITIALIZER;
70
71/* If a given thread does not have its own call_rcu thread, this is default. */
72
73static struct call_rcu_data *default_call_rcu_data;
74
75/*
76 * If the sched_getcpu() and sysconf(_SC_NPROCESSORS_CONF) calls are
77 * available, then we can have call_rcu threads assigned to individual
78 * CPUs rather than only to specific threads.
79 */
80
81#if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF)
82
83/*
84 * Pointer to array of pointers to per-CPU call_rcu_data structures
85 * and # CPUs.
86 */
87
88static struct call_rcu_data **per_cpu_call_rcu_data;
89static long maxcpus;
90
91/* Allocate the array if it has not already been allocated. */
92
93static void alloc_cpu_call_rcu_data(void)
94{
95 struct call_rcu_data **p;
96 static int warned = 0;
97
98 if (maxcpus != 0)
99 return;
100 maxcpus = sysconf(_SC_NPROCESSORS_CONF);
101 if (maxcpus <= 0) {
102 return;
103 }
104 p = malloc(maxcpus * sizeof(*per_cpu_call_rcu_data));
105 if (p != NULL) {
106 memset(p, '\0', maxcpus * sizeof(*per_cpu_call_rcu_data));
107 per_cpu_call_rcu_data = p;
108 } else {
109 if (!warned) {
110 fprintf(stderr, "[error] liburcu: unable to allocate per-CPU pointer array\n");
111 }
112 warned = 1;
113 }
114}
115
116#else /* #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF) */
117
118/*
119 * per_cpu_call_rcu_data should be constant, but some functions below, used both
120 * for cases where cpu number is available and not available, assume it it not
121 * constant.
122 */
123static struct call_rcu_data **per_cpu_call_rcu_data = NULL;
124static const long maxcpus = -1;
125
126static void alloc_cpu_call_rcu_data(void)
127{
128}
129
130static int sched_getcpu(void)
131{
132 return -1;
133}
134
135#endif /* #else #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF) */
136
137/* Acquire the specified pthread mutex. */
138
139static void call_rcu_lock(pthread_mutex_t *pmp)
140{
141 if (pthread_mutex_lock(pmp) != 0) {
142 perror("pthread_mutex_lock");
143 exit(-1);
144 }
145}
146
147/* Release the specified pthread mutex. */
148
149static void call_rcu_unlock(pthread_mutex_t *pmp)
150{
151 if (pthread_mutex_unlock(pmp) != 0) {
152 perror("pthread_mutex_unlock");
153 exit(-1);
154 }
155}
156
157#if HAVE_SCHED_SETAFFINITY
158static
159int set_thread_cpu_affinity(struct call_rcu_data *crdp)
160{
161 cpu_set_t mask;
162
163 if (crdp->cpu_affinity < 0)
164 return 0;
165
166 CPU_ZERO(&mask);
167 CPU_SET(crdp->cpu_affinity, &mask);
168#if SCHED_SETAFFINITY_ARGS == 2
169 return sched_setaffinity(0, &mask);
170#else
171 return sched_setaffinity(0, sizeof(mask), &mask);
172#endif
173}
174#else
175static
176int set_thread_cpu_affinity(struct call_rcu_data *crdp)
177{
178 return 0;
179}
180#endif
181
182static void call_rcu_wait(struct call_rcu_data *crdp)
183{
184 /* Read call_rcu list before read futex */
185 cmm_smp_mb();
186 if (uatomic_read(&crdp->futex) == -1)
187 futex_async(&crdp->futex, FUTEX_WAIT, -1,
188 NULL, NULL, 0);
189}
190
191static void call_rcu_wake_up(struct call_rcu_data *crdp)
192{
193 /* Write to call_rcu list before reading/writing futex */
194 cmm_smp_mb();
195 if (unlikely(uatomic_read(&crdp->futex) == -1)) {
196 uatomic_set(&crdp->futex, 0);
197 futex_async(&crdp->futex, FUTEX_WAKE, 1,
198 NULL, NULL, 0);
199 }
200}
201
202/* This is the code run by each call_rcu thread. */
203
204static void *call_rcu_thread(void *arg)
205{
206 unsigned long cbcount;
207 struct cds_wfq_node *cbs;
208 struct cds_wfq_node **cbs_tail;
209 struct call_rcu_data *crdp = (struct call_rcu_data *)arg;
210 struct rcu_head *rhp;
211 int rt = !!(uatomic_read(&crdp->flags) & URCU_CALL_RCU_RT);
212
213 if (set_thread_cpu_affinity(crdp) != 0) {
214 perror("pthread_setaffinity_np");
215 exit(-1);
216 }
217
218 /*
219 * If callbacks take a read-side lock, we need to be registered.
220 */
221 rcu_register_thread();
222
223 thread_call_rcu_data = crdp;
224 if (!rt) {
225 uatomic_dec(&crdp->futex);
226 /* Decrement futex before reading call_rcu list */
227 cmm_smp_mb();
228 }
229 for (;;) {
230 if (&crdp->cbs.head != _CMM_LOAD_SHARED(crdp->cbs.tail)) {
231 while ((cbs = _CMM_LOAD_SHARED(crdp->cbs.head)) == NULL)
232 poll(NULL, 0, 1);
233 _CMM_STORE_SHARED(crdp->cbs.head, NULL);
234 cbs_tail = (struct cds_wfq_node **)
235 uatomic_xchg(&crdp->cbs.tail, &crdp->cbs.head);
236 synchronize_rcu();
237 cbcount = 0;
238 do {
239 while (cbs->next == NULL &&
240 &cbs->next != cbs_tail)
241 poll(NULL, 0, 1);
242 if (cbs == &crdp->cbs.dummy) {
243 cbs = cbs->next;
244 continue;
245 }
246 rhp = (struct rcu_head *)cbs;
247 cbs = cbs->next;
248 rhp->func(rhp);
249 cbcount++;
250 } while (cbs != NULL);
251 uatomic_sub(&crdp->qlen, cbcount);
252 }
253 if (uatomic_read(&crdp->flags) & URCU_CALL_RCU_STOP)
254 break;
255 rcu_thread_offline();
256 if (!rt) {
257 if (&crdp->cbs.head
258 == _CMM_LOAD_SHARED(crdp->cbs.tail)) {
259 call_rcu_wait(crdp);
260 poll(NULL, 0, 10);
261 uatomic_dec(&crdp->futex);
262 /*
263 * Decrement futex before reading
264 * call_rcu list.
265 */
266 cmm_smp_mb();
267 } else {
268 poll(NULL, 0, 10);
269 }
270 } else {
271 poll(NULL, 0, 10);
272 }
273 rcu_thread_online();
274 }
275 if (!rt) {
276 /*
277 * Read call_rcu list before write futex.
278 */
279 cmm_smp_mb();
280 uatomic_set(&crdp->futex, 0);
281 }
282 uatomic_or(&crdp->flags, URCU_CALL_RCU_STOPPED);
283 rcu_unregister_thread();
284 return NULL;
285}
286
287/*
288 * Create both a call_rcu thread and the corresponding call_rcu_data
289 * structure, linking the structure in as specified. Caller must hold
290 * call_rcu_mutex.
291 */
292
293static void call_rcu_data_init(struct call_rcu_data **crdpp,
294 unsigned long flags,
295 int cpu_affinity)
296{
297 struct call_rcu_data *crdp;
298
299 crdp = malloc(sizeof(*crdp));
300 if (crdp == NULL) {
301 fprintf(stderr, "Out of memory.\n");
302 exit(-1);
303 }
304 memset(crdp, '\0', sizeof(*crdp));
305 cds_wfq_init(&crdp->cbs);
306 crdp->qlen = 0;
307 crdp->futex = 0;
308 crdp->flags = flags;
309 cds_list_add(&crdp->list, &call_rcu_data_list);
310 crdp->cpu_affinity = cpu_affinity;
311 cmm_smp_mb(); /* Structure initialized before pointer is planted. */
312 *crdpp = crdp;
313 if (pthread_create(&crdp->tid, NULL, call_rcu_thread, crdp) != 0) {
314 perror("pthread_create");
315 exit(-1);
316 }
317}
318
319/*
320 * Return a pointer to the call_rcu_data structure for the specified
321 * CPU, returning NULL if there is none. We cannot automatically
322 * created it because the platform we are running on might not define
323 * sched_getcpu().
324 */
325
326struct call_rcu_data *get_cpu_call_rcu_data(int cpu)
327{
328 static int warned = 0;
329
330 if (per_cpu_call_rcu_data == NULL)
331 return NULL;
332 if (!warned && maxcpus > 0 && (cpu < 0 || maxcpus <= cpu)) {
333 fprintf(stderr, "[error] liburcu: get CPU # out of range\n");
334 warned = 1;
335 }
336 if (cpu < 0 || maxcpus <= cpu)
337 return NULL;
338 return per_cpu_call_rcu_data[cpu];
339}
340
341/*
342 * Return the tid corresponding to the call_rcu thread whose
343 * call_rcu_data structure is specified.
344 */
345
346pthread_t get_call_rcu_thread(struct call_rcu_data *crdp)
347{
348 return crdp->tid;
349}
350
351/*
352 * Create a call_rcu_data structure (with thread) and return a pointer.
353 */
354
355static struct call_rcu_data *__create_call_rcu_data(unsigned long flags,
356 int cpu_affinity)
357{
358 struct call_rcu_data *crdp;
359
360 call_rcu_data_init(&crdp, flags, cpu_affinity);
361 return crdp;
362}
363
364struct call_rcu_data *create_call_rcu_data(unsigned long flags,
365 int cpu_affinity)
366{
367 struct call_rcu_data *crdp;
368
369 call_rcu_lock(&call_rcu_mutex);
370 crdp = __create_call_rcu_data(flags, cpu_affinity);
371 call_rcu_unlock(&call_rcu_mutex);
372 return crdp;
373}
374
375/*
376 * Set the specified CPU to use the specified call_rcu_data structure.
377 *
378 * Use NULL to remove a CPU's call_rcu_data structure, but it is
379 * the caller's responsibility to dispose of the removed structure.
380 * Use get_cpu_call_rcu_data() to obtain a pointer to the old structure
381 * (prior to NULLing it out, of course).
382 */
383
384int set_cpu_call_rcu_data(int cpu, struct call_rcu_data *crdp)
385{
386 static int warned = 0;
387
388 call_rcu_lock(&call_rcu_mutex);
389 alloc_cpu_call_rcu_data();
390 if (cpu < 0 || maxcpus <= cpu) {
391 if (!warned) {
392 fprintf(stderr, "[error] liburcu: set CPU # out of range\n");
393 warned = 1;
394 }
395 call_rcu_unlock(&call_rcu_mutex);
396 errno = EINVAL;
397 return -EINVAL;
398 }
399
400 if (per_cpu_call_rcu_data == NULL) {
401 call_rcu_unlock(&call_rcu_mutex);
402 errno = ENOMEM;
403 return -ENOMEM;
404 }
405
406 if (per_cpu_call_rcu_data[cpu] != NULL && crdp != NULL) {
407 call_rcu_unlock(&call_rcu_mutex);
408 errno = EEXIST;
409 return -EEXIST;
410 }
411
412 per_cpu_call_rcu_data[cpu] = crdp;
413 call_rcu_unlock(&call_rcu_mutex);
414 return 0;
415}
416
417/*
418 * Return a pointer to the default call_rcu_data structure, creating
419 * one if need be. Because we never free call_rcu_data structures,
420 * we don't need to be in an RCU read-side critical section.
421 */
422
423struct call_rcu_data *get_default_call_rcu_data(void)
424{
425 if (default_call_rcu_data != NULL)
426 return rcu_dereference(default_call_rcu_data);
427 call_rcu_lock(&call_rcu_mutex);
428 if (default_call_rcu_data != NULL) {
429 call_rcu_unlock(&call_rcu_mutex);
430 return default_call_rcu_data;
431 }
432 call_rcu_data_init(&default_call_rcu_data, 0, -1);
433 call_rcu_unlock(&call_rcu_mutex);
434 return default_call_rcu_data;
435}
436
437/*
438 * Return the call_rcu_data structure that applies to the currently
439 * running thread. Any call_rcu_data structure assigned specifically
440 * to this thread has first priority, followed by any call_rcu_data
441 * structure assigned to the CPU on which the thread is running,
442 * followed by the default call_rcu_data structure. If there is not
443 * yet a default call_rcu_data structure, one will be created.
444 */
445struct call_rcu_data *get_call_rcu_data(void)
446{
447 struct call_rcu_data *crd;
448
449 if (thread_call_rcu_data != NULL)
450 return thread_call_rcu_data;
451
452 if (maxcpus > 0) {
453 crd = get_cpu_call_rcu_data(sched_getcpu());
454 if (crd)
455 return crd;
456 }
457
458 return get_default_call_rcu_data();
459}
460
461/*
462 * Return a pointer to this task's call_rcu_data if there is one.
463 */
464
465struct call_rcu_data *get_thread_call_rcu_data(void)
466{
467 return thread_call_rcu_data;
468}
469
470/*
471 * Set this task's call_rcu_data structure as specified, regardless
472 * of whether or not this task already had one. (This allows switching
473 * to and from real-time call_rcu threads, for example.)
474 *
475 * Use NULL to remove a thread's call_rcu_data structure, but it is
476 * the caller's responsibility to dispose of the removed structure.
477 * Use get_thread_call_rcu_data() to obtain a pointer to the old structure
478 * (prior to NULLing it out, of course).
479 */
480
481void set_thread_call_rcu_data(struct call_rcu_data *crdp)
482{
483 thread_call_rcu_data = crdp;
484}
485
486/*
487 * Create a separate call_rcu thread for each CPU. This does not
488 * replace a pre-existing call_rcu thread -- use the set_cpu_call_rcu_data()
489 * function if you want that behavior. Should be paired with
490 * free_all_cpu_call_rcu_data() to teardown these call_rcu worker
491 * threads.
492 */
493
494int create_all_cpu_call_rcu_data(unsigned long flags)
495{
496 int i;
497 struct call_rcu_data *crdp;
498 int ret;
499
500 call_rcu_lock(&call_rcu_mutex);
501 alloc_cpu_call_rcu_data();
502 call_rcu_unlock(&call_rcu_mutex);
503 if (maxcpus <= 0) {
504 errno = EINVAL;
505 return -EINVAL;
506 }
507 if (per_cpu_call_rcu_data == NULL) {
508 errno = ENOMEM;
509 return -ENOMEM;
510 }
511 for (i = 0; i < maxcpus; i++) {
512 call_rcu_lock(&call_rcu_mutex);
513 if (get_cpu_call_rcu_data(i)) {
514 call_rcu_unlock(&call_rcu_mutex);
515 continue;
516 }
517 crdp = __create_call_rcu_data(flags, i);
518 if (crdp == NULL) {
519 call_rcu_unlock(&call_rcu_mutex);
520 errno = ENOMEM;
521 return -ENOMEM;
522 }
523 call_rcu_unlock(&call_rcu_mutex);
524 if ((ret = set_cpu_call_rcu_data(i, crdp)) != 0) {
525 /* FIXME: Leaks crdp for now. */
526 return ret; /* Can happen on race. */
527 }
528 }
529 return 0;
530}
531
532/*
533 * Wake up the call_rcu thread corresponding to the specified
534 * call_rcu_data structure.
535 */
536static void wake_call_rcu_thread(struct call_rcu_data *crdp)
537{
538 if (!(_CMM_LOAD_SHARED(crdp->flags) & URCU_CALL_RCU_RT))
539 call_rcu_wake_up(crdp);
540}
541
542/*
543 * Schedule a function to be invoked after a following grace period.
544 * This is the only function that must be called -- the others are
545 * only present to allow applications to tune their use of RCU for
546 * maximum performance.
547 *
548 * Note that unless a call_rcu thread has not already been created,
549 * the first invocation of call_rcu() will create one. So, if you
550 * need the first invocation of call_rcu() to be fast, make sure
551 * to create a call_rcu thread first. One way to accomplish this is
552 * "get_call_rcu_data();", and another is create_all_cpu_call_rcu_data().
553 */
554
555void call_rcu(struct rcu_head *head,
556 void (*func)(struct rcu_head *head))
557{
558 struct call_rcu_data *crdp;
559
560 cds_wfq_node_init(&head->next);
561 head->func = func;
562 crdp = get_call_rcu_data();
563 cds_wfq_enqueue(&crdp->cbs, &head->next);
564 uatomic_inc(&crdp->qlen);
565 wake_call_rcu_thread(crdp);
566}
567
568/*
569 * Free up the specified call_rcu_data structure, terminating the
570 * associated call_rcu thread. The caller must have previously
571 * removed the call_rcu_data structure from per-thread or per-CPU
572 * usage. For example, set_cpu_call_rcu_data(cpu, NULL) for per-CPU
573 * call_rcu_data structures or set_thread_call_rcu_data(NULL) for
574 * per-thread call_rcu_data structures.
575 *
576 * We silently refuse to free up the default call_rcu_data structure
577 * because that is where we put any leftover callbacks. Note that
578 * the possibility of self-spawning callbacks makes it impossible
579 * to execute all the callbacks in finite time without putting any
580 * newly spawned callbacks somewhere else. The "somewhere else" of
581 * last resort is the default call_rcu_data structure.
582 *
583 * We also silently refuse to free NULL pointers. This simplifies
584 * the calling code.
585 */
586void call_rcu_data_free(struct call_rcu_data *crdp)
587{
588 struct cds_wfq_node *cbs;
589 struct cds_wfq_node **cbs_tail;
590 struct cds_wfq_node **cbs_endprev;
591
592 if (crdp == NULL || crdp == default_call_rcu_data) {
593 return;
594 }
595 if ((uatomic_read(&crdp->flags) & URCU_CALL_RCU_STOPPED) == 0) {
596 uatomic_or(&crdp->flags, URCU_CALL_RCU_STOP);
597 wake_call_rcu_thread(crdp);
598 while ((uatomic_read(&crdp->flags) & URCU_CALL_RCU_STOPPED) == 0)
599 poll(NULL, 0, 1);
600 }
601 if (&crdp->cbs.head != _CMM_LOAD_SHARED(crdp->cbs.tail)) {
602 while ((cbs = _CMM_LOAD_SHARED(crdp->cbs.head)) == NULL)
603 poll(NULL, 0, 1);
604 _CMM_STORE_SHARED(crdp->cbs.head, NULL);
605 cbs_tail = (struct cds_wfq_node **)
606 uatomic_xchg(&crdp->cbs.tail, &crdp->cbs.head);
607 cbs_endprev = (struct cds_wfq_node **)
608 uatomic_xchg(&default_call_rcu_data, cbs_tail);
609 *cbs_endprev = cbs;
610 uatomic_add(&default_call_rcu_data->qlen,
611 uatomic_read(&crdp->qlen));
612 wake_call_rcu_thread(default_call_rcu_data);
613 }
614
615 cds_list_del(&crdp->list);
616 free(crdp);
617}
618
619/*
620 * Clean up all the per-CPU call_rcu threads.
621 */
622void free_all_cpu_call_rcu_data(void)
623{
624 int cpu;
625 struct call_rcu_data *crdp;
626
627 if (maxcpus <= 0)
628 return;
629 for (cpu = 0; cpu < maxcpus; cpu++) {
630 crdp = get_cpu_call_rcu_data(cpu);
631 if (crdp == NULL)
632 continue;
633 set_cpu_call_rcu_data(cpu, NULL);
634 call_rcu_data_free(crdp);
635 }
636}
637
638/*
639 * Acquire the call_rcu_mutex in order to ensure that the child sees
640 * all of the call_rcu() data structures in a consistent state.
641 * Suitable for pthread_atfork() and friends.
642 */
643void call_rcu_before_fork(void)
644{
645 call_rcu_lock(&call_rcu_mutex);
646}
647
648/*
649 * Clean up call_rcu data structures in the parent of a successful fork()
650 * that is not followed by exec() in the child. Suitable for
651 * pthread_atfork() and friends.
652 */
653void call_rcu_after_fork_parent(void)
654{
655 call_rcu_unlock(&call_rcu_mutex);
656}
657
658/*
659 * Clean up call_rcu data structures in the child of a successful fork()
660 * that is not followed by exec(). Suitable for pthread_atfork() and
661 * friends.
662 */
663void call_rcu_after_fork_child(void)
664{
665 struct call_rcu_data *crdp, *next;
666
667 /* Release the mutex. */
668 call_rcu_unlock(&call_rcu_mutex);
669
670 /*
671 * Allocate a new default call_rcu_data structure in order
672 * to get a working call_rcu thread to go with it.
673 */
674 default_call_rcu_data = NULL;
675 (void)get_default_call_rcu_data();
676
677 /* Dispose of all of the rest of the call_rcu_data structures. */
678 cds_list_for_each_entry_safe(crdp, next, &call_rcu_data_list, list) {
679 if (crdp == default_call_rcu_data)
680 continue;
681 uatomic_set(&crdp->flags, URCU_CALL_RCU_STOPPED);
682 call_rcu_data_free(crdp);
683 }
684}
This page took 0.024078 seconds and 4 git commands to generate.