projects
/
userspace-rcu.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
Fix: call_rcu_thread() affinity failure
[userspace-rcu.git]
/
urcu-call-rcu-impl.h
diff --git
a/urcu-call-rcu-impl.h
b/urcu-call-rcu-impl.h
index 51ee91ffd1ef87e0e8a8e76ff00b64d6dd24c660..e70789a5906d6dec6f707025ec3db5895ea34b0f 100644
(file)
--- a/
urcu-call-rcu-impl.h
+++ b/
urcu-call-rcu-impl.h
@@
-43,6
+43,9
@@
#include "urcu/tls-compat.h"
#include "urcu-die.h"
#include "urcu/tls-compat.h"
#include "urcu-die.h"
+#define SET_AFFINITY_CHECK_PERIOD (1U << 8) /* 256 */
+#define SET_AFFINITY_CHECK_PERIOD_MASK (SET_AFFINITY_CHECK_PERIOD - 1)
+
/* Data structure that identifies a call_rcu thread. */
struct call_rcu_data {
/* Data structure that identifies a call_rcu thread. */
struct call_rcu_data {
@@
-52,6
+55,7
@@
struct call_rcu_data {
unsigned long qlen; /* maintained for debugging. */
pthread_t tid;
int cpu_affinity;
unsigned long qlen; /* maintained for debugging. */
pthread_t tid;
int cpu_affinity;
+ unsigned long gp_count;
struct cds_list_head list;
} __attribute__((aligned(CAA_CACHE_LINE_SIZE)));
struct cds_list_head list;
} __attribute__((aligned(CAA_CACHE_LINE_SIZE)));
@@
-182,22
+186,42
@@
static void call_rcu_unlock(pthread_mutex_t *pmp)
urcu_die(ret);
}
urcu_die(ret);
}
+/*
+ * Periodically retry setting CPU affinity if we migrate.
+ * Losing affinity can be caused by CPU hotunplug/hotplug, or by
+ * cpuset(7).
+ */
#if HAVE_SCHED_SETAFFINITY
static
int set_thread_cpu_affinity(struct call_rcu_data *crdp)
{
cpu_set_t mask;
#if HAVE_SCHED_SETAFFINITY
static
int set_thread_cpu_affinity(struct call_rcu_data *crdp)
{
cpu_set_t mask;
+ int ret;
if (crdp->cpu_affinity < 0)
return 0;
if (crdp->cpu_affinity < 0)
return 0;
+ if (++crdp->gp_count & SET_AFFINITY_CHECK_PERIOD_MASK)
+ return 0;
+ if (urcu_sched_getcpu() == crdp->cpu_affinity)
+ return 0;
CPU_ZERO(&mask);
CPU_SET(crdp->cpu_affinity, &mask);
#if SCHED_SETAFFINITY_ARGS == 2
CPU_ZERO(&mask);
CPU_SET(crdp->cpu_affinity, &mask);
#if SCHED_SETAFFINITY_ARGS == 2
- ret
urn
sched_setaffinity(0, &mask);
+ ret
=
sched_setaffinity(0, &mask);
#else
#else
- ret
urn
sched_setaffinity(0, sizeof(mask), &mask);
+ ret
=
sched_setaffinity(0, sizeof(mask), &mask);
#endif
#endif
+ /*
+ * EINVAL is fine: can be caused by hotunplugged CPUs, or by
+ * cpuset(7). This is why we should always retry if we detect
+ * migration.
+ */
+ if (ret && errno == EINVAL) {
+ ret = 0;
+ errno = 0;
+ }
+ return ret;
}
#else
static
}
#else
static
@@
-237,10
+261,8
@@
static void *call_rcu_thread(void *arg)
struct call_rcu_data *crdp = (struct call_rcu_data *)arg;
struct rcu_head *rhp;
int rt = !!(uatomic_read(&crdp->flags) & URCU_CALL_RCU_RT);
struct call_rcu_data *crdp = (struct call_rcu_data *)arg;
struct rcu_head *rhp;
int rt = !!(uatomic_read(&crdp->flags) & URCU_CALL_RCU_RT);
- int ret;
- ret = set_thread_cpu_affinity(crdp);
- if (ret)
+ if (set_thread_cpu_affinity(crdp))
urcu_die(errno);
/*
urcu_die(errno);
/*
@@
-255,6
+277,9
@@
static void *call_rcu_thread(void *arg)
cmm_smp_mb();
}
for (;;) {
cmm_smp_mb();
}
for (;;) {
+ if (set_thread_cpu_affinity(crdp))
+ urcu_die(errno);
+
if (uatomic_read(&crdp->flags) & URCU_CALL_RCU_PAUSE) {
/*
* Pause requested. Become quiescent: remove
if (uatomic_read(&crdp->flags) & URCU_CALL_RCU_PAUSE) {
/*
* Pause requested. Become quiescent: remove
@@
-352,6
+377,7
@@
static void call_rcu_data_init(struct call_rcu_data **crdpp,
crdp->flags = flags;
cds_list_add(&crdp->list, &call_rcu_data_list);
crdp->cpu_affinity = cpu_affinity;
crdp->flags = flags;
cds_list_add(&crdp->list, &call_rcu_data_list);
crdp->cpu_affinity = cpu_affinity;
+ crdp->gp_count = 0;
cmm_smp_mb(); /* Structure initialized before pointer is planted. */
*crdpp = crdp;
ret = pthread_create(&crdp->tid, NULL, call_rcu_thread, crdp);
cmm_smp_mb(); /* Structure initialized before pointer is planted. */
*crdpp = crdp;
ret = pthread_create(&crdp->tid, NULL, call_rcu_thread, crdp);
This page took
0.024072 seconds
and
4
git commands to generate.