projects
/
urcu.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
rculfhash: output approximation of number of nodes in counting
[urcu.git]
/
urcu-call-rcu-impl.h
diff --git
a/urcu-call-rcu-impl.h
b/urcu-call-rcu-impl.h
index d5baa69685387808044529f634a69157ca9126c0..700d1289192a31a5f2693cf79e4efd99be993220 100644
(file)
--- a/
urcu-call-rcu-impl.h
+++ b/
urcu-call-rcu-impl.h
@@
-115,7
+115,12
@@
static void alloc_cpu_call_rcu_data(void)
#else /* #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF) */
#else /* #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF) */
-static const struct call_rcu_data **per_cpu_call_rcu_data = NULL;
+/*
+ * per_cpu_call_rcu_data should be constant, but some functions below, used both
+ * for cases where cpu number is available and not available, assume it it not
+ * constant.
+ */
+static struct call_rcu_data **per_cpu_call_rcu_data = NULL;
static const long maxcpus = -1;
static void alloc_cpu_call_rcu_data(void)
static const long maxcpus = -1;
static void alloc_cpu_call_rcu_data(void)
@@
-210,6
+215,11
@@
static void *call_rcu_thread(void *arg)
exit(-1);
}
exit(-1);
}
+ /*
+ * If callbacks take a read-side lock, we need to be registered.
+ */
+ rcu_register_thread();
+
thread_call_rcu_data = crdp;
if (!rt) {
uatomic_dec(&crdp->futex);
thread_call_rcu_data = crdp;
if (!rt) {
uatomic_dec(&crdp->futex);
@@
-242,6
+252,7
@@
static void *call_rcu_thread(void *arg)
}
if (uatomic_read(&crdp->flags) & URCU_CALL_RCU_STOP)
break;
}
if (uatomic_read(&crdp->flags) & URCU_CALL_RCU_STOP)
break;
+ rcu_thread_offline();
if (!rt) {
if (&crdp->cbs.head
== _CMM_LOAD_SHARED(crdp->cbs.tail)) {
if (!rt) {
if (&crdp->cbs.head
== _CMM_LOAD_SHARED(crdp->cbs.tail)) {
@@
-259,6
+270,7
@@
static void *call_rcu_thread(void *arg)
} else {
poll(NULL, 0, 10);
}
} else {
poll(NULL, 0, 10);
}
+ rcu_thread_online();
}
if (!rt) {
/*
}
if (!rt) {
/*
@@
-268,6
+280,7
@@
static void *call_rcu_thread(void *arg)
uatomic_set(&crdp->futex, 0);
}
uatomic_or(&crdp->flags, URCU_CALL_RCU_STOPPED);
uatomic_set(&crdp->futex, 0);
}
uatomic_or(&crdp->flags, URCU_CALL_RCU_STOPPED);
+ rcu_unregister_thread();
return NULL;
}
return NULL;
}
@@
-370,7
+383,7
@@
struct call_rcu_data *create_call_rcu_data(unsigned long flags,
int set_cpu_call_rcu_data(int cpu, struct call_rcu_data *crdp)
{
int set_cpu_call_rcu_data(int cpu, struct call_rcu_data *crdp)
{
- int warned = 0;
+
static
int warned = 0;
call_rcu_lock(&call_rcu_mutex);
if (cpu < 0 || maxcpus <= cpu) {
call_rcu_lock(&call_rcu_mutex);
if (cpu < 0 || maxcpus <= cpu) {
@@
-469,7
+482,9
@@
void set_thread_call_rcu_data(struct call_rcu_data *crdp)
/*
* Create a separate call_rcu thread for each CPU. This does not
* replace a pre-existing call_rcu thread -- use the set_cpu_call_rcu_data()
/*
* Create a separate call_rcu thread for each CPU. This does not
* replace a pre-existing call_rcu thread -- use the set_cpu_call_rcu_data()
- * function if you want that behavior.
+ * function if you want that behavior. Should be paired with
+ * free_all_cpu_call_rcu_data() to teardown these call_rcu worker
+ * threads.
*/
int create_all_cpu_call_rcu_data(unsigned long flags)
*/
int create_all_cpu_call_rcu_data(unsigned long flags)
This page took
0.025194 seconds
and
4
git commands to generate.