#else /* #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF) */
-static const struct call_rcu_data **per_cpu_call_rcu_data = NULL;
+/*
+ * per_cpu_call_rcu_data should be constant, but some functions below, used both
+ * for cases where cpu number is available and not available, assume it it not
+ * constant.
+ */
+static struct call_rcu_data **per_cpu_call_rcu_data = NULL;
static const long maxcpus = -1;
static void alloc_cpu_call_rcu_data(void)
exit(-1);
}
+ /*
+ * If callbacks take a read-side lock, we need to be registered.
+ */
+ rcu_register_thread();
+
thread_call_rcu_data = crdp;
if (!rt) {
uatomic_dec(&crdp->futex);
}
if (uatomic_read(&crdp->flags) & URCU_CALL_RCU_STOP)
break;
+ rcu_thread_offline();
if (!rt) {
if (&crdp->cbs.head
== _CMM_LOAD_SHARED(crdp->cbs.tail)) {
} else {
poll(NULL, 0, 10);
}
+ rcu_thread_online();
}
if (!rt) {
/*
uatomic_set(&crdp->futex, 0);
}
uatomic_or(&crdp->flags, URCU_CALL_RCU_STOPPED);
+ rcu_unregister_thread();
return NULL;
}
/*
* Create a separate call_rcu thread for each CPU. This does not
* replace a pre-existing call_rcu thread -- use the set_cpu_call_rcu_data()
- * function if you want that behavior.
+ * function if you want that behavior. Should be paired with
+ * free_all_cpu_call_rcu_data() to teardown these call_rcu worker
+ * threads.
*/
int create_all_cpu_call_rcu_data(unsigned long flags)