X-Git-Url: http://git.liburcu.org/?a=blobdiff_plain;f=src%2Furcu-call-rcu-impl.h;h=9f2ed7dd709263f3eaa8418d7562a7ec91c993b6;hb=HEAD;hp=cb010687829959f1750242dadb6ecfa99b9cd912;hpb=1cf55ba47342156cdf25335264b9774a16e0bb2d;p=urcu.git diff --git a/src/urcu-call-rcu-impl.h b/src/urcu-call-rcu-impl.h index cb01068..2ea1efc 100644 --- a/src/urcu-call-rcu-impl.h +++ b/src/urcu-call-rcu-impl.h @@ -1,23 +1,9 @@ +// SPDX-FileCopyrightText: 2010 Paul E. McKenney +// +// SPDX-License-Identifier: LGPL-2.1-or-later + /* - * urcu-call-rcu.c - * * Userspace RCU library - batch memory reclamation with kernel API - * - * Copyright (c) 2010 Paul E. McKenney - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #define _LGPL_SOURCE @@ -450,8 +436,7 @@ static void call_rcu_data_init(struct call_rcu_data **crdpp, cds_list_add(&crdp->list, &call_rcu_data_list); crdp->cpu_affinity = cpu_affinity; crdp->gp_count = 0; - cmm_smp_mb(); /* Structure initialized before pointer is planted. */ - *crdpp = crdp; + rcu_set_pointer(crdpp, crdp); ret = sigfillset(&newmask); urcu_posix_assert(!ret); @@ -575,22 +560,27 @@ int set_cpu_call_rcu_data(int cpu, struct call_rcu_data *crdp) /* * Return a pointer to the default call_rcu_data structure, creating - * one if need be. Because we never free call_rcu_data structures, - * we don't need to be in an RCU read-side critical section. + * one if need be. + * + * The call to this function with intent to use the returned + * call_rcu_data should be protected by RCU read-side lock. */ struct call_rcu_data *get_default_call_rcu_data(void) { - if (default_call_rcu_data != NULL) - return rcu_dereference(default_call_rcu_data); + struct call_rcu_data *crdp; + + crdp = rcu_dereference(default_call_rcu_data); + if (crdp != NULL) + return crdp; + call_rcu_lock(&call_rcu_mutex); - if (default_call_rcu_data != NULL) { - call_rcu_unlock(&call_rcu_mutex); - return default_call_rcu_data; - } - call_rcu_data_init(&default_call_rcu_data, 0, -1); + if (default_call_rcu_data == NULL) + call_rcu_data_init(&default_call_rcu_data, 0, -1); + crdp = default_call_rcu_data; call_rcu_unlock(&call_rcu_mutex); - return default_call_rcu_data; + + return crdp; } /* @@ -905,7 +895,7 @@ void rcu_barrier(void) goto online; } - completion = calloc(sizeof(*completion), 1); + completion = calloc(1, sizeof(*completion)); if (!completion) urcu_die(errno); @@ -920,7 +910,7 @@ void rcu_barrier(void) cds_list_for_each_entry(crdp, &call_rcu_data_list, list) { struct call_rcu_completion_work *work; - work = calloc(sizeof(*work), 1); + work = calloc(1, sizeof(*work)); if (!work) urcu_die(errno); work->completion = completion; @@ -1060,8 +1050,63 @@ void urcu_register_rculfhash_atfork(struct urcu_atfork *atfork) * This unregistration function is deprecated, meant only for internal * use by rculfhash. */ -__attribute__((noreturn)) +__attribute__((__noreturn__)) void urcu_unregister_rculfhash_atfork(struct urcu_atfork *atfork __attribute__((unused))) { urcu_die(EPERM); } + +/* + * Teardown the default call_rcu worker thread if there are no queued + * callbacks on process exit. This prevents leaking memory. + * + * Here is how an application can ensure graceful teardown of this + * worker thread: + * + * - An application queuing call_rcu callbacks should invoke + * rcu_barrier() before it exits. + * - When chaining call_rcu callbacks, the number of calls to + * rcu_barrier() on application exit must match at least the maximum + * number of chained callbacks. + * - If an application chains callbacks endlessly, it would have to be + * modified to stop chaining callbacks when it detects an application + * exit (e.g. with a flag), and wait for quiescence with rcu_barrier() + * after setting that flag. + * - The statements above apply to a library which queues call_rcu + * callbacks, only it needs to invoke rcu_barrier in its library + * destructor. + * + * Note that this function does not presume it is being called when the + * application is single-threaded even though this is invoked from a + * destructor: this function synchronizes against concurrent calls to + * get_default_call_rcu_data(). + */ +static void urcu_call_rcu_exit(void) +{ + struct call_rcu_data *crdp; + bool teardown = true; + + if (default_call_rcu_data == NULL) + return; + call_rcu_lock(&call_rcu_mutex); + /* + * If the application leaves callbacks in the default call_rcu + * worker queue, keep the default worker in place. + */ + crdp = default_call_rcu_data; + if (!crdp) { + teardown = false; + goto unlock; + } + if (!cds_wfcq_empty(&crdp->cbs_head, &crdp->cbs_tail)) { + teardown = false; + goto unlock; + } + rcu_set_pointer(&default_call_rcu_data, NULL); +unlock: + call_rcu_unlock(&call_rcu_mutex); + if (teardown) { + synchronize_rcu(); + call_rcu_data_free(crdp); + } +}