X-Git-Url: http://git.liburcu.org/?a=blobdiff_plain;f=urcu-call-rcu-impl.h;h=6495f465e1fade0477179be441694751cbc8c473;hb=2819ef641bb6e2f7aa60c28f3a8f5ad252aa7d00;hp=0cc2ba239d4f2c4a6f0b898c76762027c522cbb1;hpb=750cfec5f1a025abf67c615f3bdb00c7d9625df5;p=userspace-rcu.git diff --git a/urcu-call-rcu-impl.h b/urcu-call-rcu-impl.h index 0cc2ba2..6495f46 100644 --- a/urcu-call-rcu-impl.h +++ b/urcu-call-rcu-impl.h @@ -43,6 +43,9 @@ #include "urcu/tls-compat.h" #include "urcu-die.h" +#define SET_AFFINITY_CHECK_PERIOD (1U << 8) /* 256 */ +#define SET_AFFINITY_CHECK_PERIOD_MASK (SET_AFFINITY_CHECK_PERIOD - 1) + /* Data structure that identifies a call_rcu thread. */ struct call_rcu_data { @@ -52,6 +55,7 @@ struct call_rcu_data { unsigned long qlen; /* maintained for debugging. */ pthread_t tid; int cpu_affinity; + unsigned long gp_count; struct cds_list_head list; } __attribute__((aligned(CAA_CACHE_LINE_SIZE))); @@ -81,7 +85,23 @@ static struct call_rcu_data *default_call_rcu_data; * CPUs rather than only to specific threads. */ -#if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF) +#ifdef HAVE_SCHED_GETCPU + +static int urcu_sched_getcpu(void) +{ + return sched_getcpu(); +} + +#else /* #ifdef HAVE_SCHED_GETCPU */ + +static int urcu_sched_getcpu(void) +{ + return -1; +} + +#endif /* #else #ifdef HAVE_SCHED_GETCPU */ + +#if defined(HAVE_SYSCONF) && defined(HAVE_SCHED_GETCPU) /* * Pointer to array of pointers to per-CPU call_rcu_data structures @@ -124,7 +144,7 @@ static void alloc_cpu_call_rcu_data(void) } } -#else /* #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF) */ +#else /* #if defined(HAVE_SYSCONF) && defined(HAVE_SCHED_GETCPU) */ /* * per_cpu_call_rcu_data should be constant, but some functions below, used both @@ -142,12 +162,7 @@ static void alloc_cpu_call_rcu_data(void) { } -static int sched_getcpu(void) -{ - return -1; -} - -#endif /* #else #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF) */ +#endif /* #else #if defined(HAVE_SYSCONF) && defined(HAVE_SCHED_GETCPU) */ /* Acquire the specified pthread mutex. */ @@ -171,22 +186,42 @@ static void call_rcu_unlock(pthread_mutex_t *pmp) urcu_die(ret); } +/* + * Periodically retry setting CPU affinity if we migrate. + * Losing affinity can be caused by CPU hotunplug/hotplug, or by + * cpuset(7). + */ #if HAVE_SCHED_SETAFFINITY static int set_thread_cpu_affinity(struct call_rcu_data *crdp) { cpu_set_t mask; + int ret; if (crdp->cpu_affinity < 0) return 0; + if (++crdp->gp_count & SET_AFFINITY_CHECK_PERIOD_MASK) + return 0; + if (urcu_sched_getcpu() == crdp->cpu_affinity) + return 0; CPU_ZERO(&mask); CPU_SET(crdp->cpu_affinity, &mask); #if SCHED_SETAFFINITY_ARGS == 2 - return sched_setaffinity(0, &mask); + ret = sched_setaffinity(0, &mask); #else - return sched_setaffinity(0, sizeof(mask), &mask); + ret = sched_setaffinity(0, sizeof(mask), &mask); #endif + /* + * EINVAL is fine: can be caused by hotunplugged CPUs, or by + * cpuset(7). This is why we should always retry if we detect + * migration. + */ + if (ret && errno == EINVAL) { + ret = 0; + errno = 0; + } + return ret; } #else static @@ -200,9 +235,22 @@ static void call_rcu_wait(struct call_rcu_data *crdp) { /* Read call_rcu list before read futex */ cmm_smp_mb(); - if (uatomic_read(&crdp->futex) == -1) - futex_async(&crdp->futex, FUTEX_WAIT, -1, - NULL, NULL, 0); + if (uatomic_read(&crdp->futex) != -1) + return; + while (futex_async(&crdp->futex, FUTEX_WAIT, -1, + NULL, NULL, 0)) { + switch (errno) { + case EWOULDBLOCK: + /* Value already changed. */ + return; + case EINTR: + /* Retry if interrupted by signal. */ + break; /* Get out of switch. */ + default: + /* Unexpected error. */ + urcu_die(errno); + } + } } static void call_rcu_wake_up(struct call_rcu_data *crdp) @@ -211,8 +259,9 @@ static void call_rcu_wake_up(struct call_rcu_data *crdp) cmm_smp_mb(); if (caa_unlikely(uatomic_read(&crdp->futex) == -1)) { uatomic_set(&crdp->futex, 0); - futex_async(&crdp->futex, FUTEX_WAKE, 1, - NULL, NULL, 0); + if (futex_async(&crdp->futex, FUTEX_WAKE, 1, + NULL, NULL, 0) < 0) + urcu_die(errno); } } @@ -226,10 +275,8 @@ static void *call_rcu_thread(void *arg) struct call_rcu_data *crdp = (struct call_rcu_data *)arg; struct rcu_head *rhp; int rt = !!(uatomic_read(&crdp->flags) & URCU_CALL_RCU_RT); - int ret; - ret = set_thread_cpu_affinity(crdp); - if (ret) + if (set_thread_cpu_affinity(crdp)) urcu_die(errno); /* @@ -244,6 +291,9 @@ static void *call_rcu_thread(void *arg) cmm_smp_mb(); } for (;;) { + if (set_thread_cpu_affinity(crdp)) + urcu_die(errno); + if (uatomic_read(&crdp->flags) & URCU_CALL_RCU_PAUSE) { /* * Pause requested. Become quiescent: remove @@ -256,6 +306,8 @@ static void *call_rcu_thread(void *arg) uatomic_or(&crdp->flags, URCU_CALL_RCU_PAUSED); while ((uatomic_read(&crdp->flags) & URCU_CALL_RCU_PAUSE) != 0) poll(NULL, 0, 1); + uatomic_and(&crdp->flags, ~URCU_CALL_RCU_PAUSED); + cmm_smp_mb__after_uatomic_and(); rcu_register_thread(); } @@ -339,6 +391,7 @@ static void call_rcu_data_init(struct call_rcu_data **crdpp, crdp->flags = flags; cds_list_add(&crdp->list, &call_rcu_data_list); crdp->cpu_affinity = cpu_affinity; + crdp->gp_count = 0; cmm_smp_mb(); /* Structure initialized before pointer is planted. */ *crdpp = crdp; ret = pthread_create(&crdp->tid, NULL, call_rcu_thread, crdp); @@ -350,7 +403,7 @@ static void call_rcu_data_init(struct call_rcu_data **crdpp, * Return a pointer to the call_rcu_data structure for the specified * CPU, returning NULL if there is none. We cannot automatically * created it because the platform we are running on might not define - * sched_getcpu(). + * urcu_sched_getcpu(). * * The call to this function and use of the returned call_rcu_data * should be protected by RCU read-side lock. @@ -492,7 +545,7 @@ struct call_rcu_data *get_call_rcu_data(void) return URCU_TLS(thread_call_rcu_data); if (maxcpus > 0) { - crd = get_cpu_call_rcu_data(sched_getcpu()); + crd = get_cpu_call_rcu_data(urcu_sched_getcpu()); if (crd) return crd; } @@ -609,12 +662,12 @@ void call_rcu(struct rcu_head *head, cds_wfq_node_init(&head->next); head->func = func; /* Holding rcu read-side lock across use of per-cpu crdp */ - rcu_read_lock(); + _rcu_read_lock(); crdp = get_call_rcu_data(); cds_wfq_enqueue(&crdp->cbs, &head->next); uatomic_inc(&crdp->qlen); wake_call_rcu_thread(crdp); - rcu_read_unlock(); + _rcu_read_unlock(); } /* @@ -752,6 +805,10 @@ void call_rcu_after_fork_parent(void) cds_list_for_each_entry(crdp, &call_rcu_data_list, list) uatomic_and(&crdp->flags, ~URCU_CALL_RCU_PAUSE); + cds_list_for_each_entry(crdp, &call_rcu_data_list, list) { + while ((uatomic_read(&crdp->flags) & URCU_CALL_RCU_PAUSED) != 0) + poll(NULL, 0, 1); + } call_rcu_unlock(&call_rcu_mutex); }