Make call_rcu_thread() affine itself more persistently
Currently, URCU simply fails if a call_rcu_thread() fails to affine
itself. This is problematic when execution is constrained by cgroup
and hotunplugged CPUs. This commit therefore makes call_rcu_thread()
retry setting its affinity every 256 grace periods, but only if it
detects that it migrated to a different CPU. Since sched_getcpu() is
cheap on many architectures, this check is less costly than going
through a system call.
Reported-by: Michael Jeanson <mjeanson@efficios.com>
Suggested-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
#include "urcu/tls-compat.h"
#include "urcu-die.h"
#include "urcu/tls-compat.h"
#include "urcu-die.h"
+#define SET_AFFINITY_CHECK_PERIOD (1U << 8) /* 256 */
+#define SET_AFFINITY_CHECK_PERIOD_MASK (SET_AFFINITY_CHECK_PERIOD - 1)
+
/* Data structure that identifies a call_rcu thread. */
struct call_rcu_data {
/* Data structure that identifies a call_rcu thread. */
struct call_rcu_data {
unsigned long qlen; /* maintained for debugging. */
pthread_t tid;
int cpu_affinity;
unsigned long qlen; /* maintained for debugging. */
pthread_t tid;
int cpu_affinity;
+ unsigned long gp_count;
struct cds_list_head list;
} __attribute__((aligned(CAA_CACHE_LINE_SIZE)));
struct cds_list_head list;
} __attribute__((aligned(CAA_CACHE_LINE_SIZE)));
+/*
+ * Periodically retry setting CPU affinity if we migrate.
+ * Losing affinity can be caused by CPU hotunplug/hotplug, or by
+ * cpuset(7).
+ */
#if HAVE_SCHED_SETAFFINITY
static
int set_thread_cpu_affinity(struct call_rcu_data *crdp)
{
cpu_set_t mask;
#if HAVE_SCHED_SETAFFINITY
static
int set_thread_cpu_affinity(struct call_rcu_data *crdp)
{
cpu_set_t mask;
if (crdp->cpu_affinity < 0)
return 0;
if (crdp->cpu_affinity < 0)
return 0;
+ if (++crdp->gp_count & SET_AFFINITY_CHECK_PERIOD_MASK)
+ return 0;
+ if (urcu_sched_getcpu() == crdp->cpu_affinity)
+ return 0;
CPU_ZERO(&mask);
CPU_SET(crdp->cpu_affinity, &mask);
#if SCHED_SETAFFINITY_ARGS == 2
CPU_ZERO(&mask);
CPU_SET(crdp->cpu_affinity, &mask);
#if SCHED_SETAFFINITY_ARGS == 2
- return sched_setaffinity(0, &mask);
+ ret = sched_setaffinity(0, &mask);
- return sched_setaffinity(0, sizeof(mask), &mask);
+ ret = sched_setaffinity(0, sizeof(mask), &mask);
+ /*
+ * EINVAL is fine: can be caused by hotunplugged CPUs, or by
+ * cpuset(7). This is why we should always retry if we detect
+ * migration.
+ */
+ if (ret && errno == EINVAL) {
+ ret = 0;
+ errno = 0;
+ }
+ return ret;
struct call_rcu_data *crdp = (struct call_rcu_data *)arg;
struct rcu_head *rhp;
int rt = !!(uatomic_read(&crdp->flags) & URCU_CALL_RCU_RT);
struct call_rcu_data *crdp = (struct call_rcu_data *)arg;
struct rcu_head *rhp;
int rt = !!(uatomic_read(&crdp->flags) & URCU_CALL_RCU_RT);
- ret = set_thread_cpu_affinity(crdp);
- if (ret)
+ if (set_thread_cpu_affinity(crdp))
cmm_smp_mb();
}
for (;;) {
cmm_smp_mb();
}
for (;;) {
+ if (set_thread_cpu_affinity(crdp))
+ urcu_die(errno);
+
if (uatomic_read(&crdp->flags) & URCU_CALL_RCU_PAUSE) {
/*
* Pause requested. Become quiescent: remove
if (uatomic_read(&crdp->flags) & URCU_CALL_RCU_PAUSE) {
/*
* Pause requested. Become quiescent: remove
crdp->flags = flags;
cds_list_add(&crdp->list, &call_rcu_data_list);
crdp->cpu_affinity = cpu_affinity;
crdp->flags = flags;
cds_list_add(&crdp->list, &call_rcu_data_list);
crdp->cpu_affinity = cpu_affinity;
cmm_smp_mb(); /* Structure initialized before pointer is planted. */
*crdpp = crdp;
ret = pthread_create(&crdp->tid, NULL, call_rcu_thread, crdp);
cmm_smp_mb(); /* Structure initialized before pointer is planted. */
*crdpp = crdp;
ret = pthread_create(&crdp->tid, NULL, call_rcu_thread, crdp);