static struct call_rcu_data **per_cpu_call_rcu_data;
static long maxcpus;
+static void maxcpus_reset(void)
+{
+ maxcpus = 0;
+}
+
/* Allocate the array if it has not already been allocated. */
static void alloc_cpu_call_rcu_data(void)
static struct call_rcu_data **per_cpu_call_rcu_data = NULL;
static const long maxcpus = -1;
+static void maxcpus_reset(void)
+{
+}
+
static void alloc_cpu_call_rcu_data(void)
{
}
_CMM_STORE_SHARED(crdp->cbs.head, NULL);
cbs_tail = (struct cds_wfq_node **)
uatomic_xchg(&crdp->cbs.tail, &crdp->cbs.head);
+ /* Create default call rcu data if need be */
+ (void) get_default_call_rcu_data();
cbs_endprev = (struct cds_wfq_node **)
uatomic_xchg(&default_call_rcu_data, cbs_tail);
*cbs_endprev = cbs;
wake_call_rcu_thread(default_call_rcu_data);
}
+ call_rcu_lock(&call_rcu_mutex);
cds_list_del(&crdp->list);
+ call_rcu_unlock(&call_rcu_mutex);
+
free(crdp);
}
/* Release the mutex. */
call_rcu_unlock(&call_rcu_mutex);
+ /* Do nothing when call_rcu() has not been used */
+ if (cds_list_empty(&call_rcu_data_list))
+ return;
+
/*
* Allocate a new default call_rcu_data structure in order
* to get a working call_rcu thread to go with it.
default_call_rcu_data = NULL;
(void)get_default_call_rcu_data();
+ /* Cleanup call_rcu_data pointers before use */
+ maxcpus_reset();
+ free(per_cpu_call_rcu_data);
+ per_cpu_call_rcu_data = NULL;
+ thread_call_rcu_data = NULL;
+
/* Dispose of all of the rest of the call_rcu_data structures. */
cds_list_for_each_entry_safe(crdp, next, &call_rcu_data_list, list) {
if (crdp == default_call_rcu_data)