X-Git-Url: http://git.liburcu.org/?p=userspace-rcu.git;a=blobdiff_plain;f=urcu-call-rcu-impl.h;h=6495f465e1fade0477179be441694751cbc8c473;hp=f9250e8b4129b925aeb7b47f609d3e91dd3f790d;hb=refs%2Fheads%2Fstable-0.7;hpb=356c8794ee9e562954a52653fbb8f37b7fe16082 diff --git a/urcu-call-rcu-impl.h b/urcu-call-rcu-impl.h index f9250e8..6495f46 100644 --- a/urcu-call-rcu-impl.h +++ b/urcu-call-rcu-impl.h @@ -40,6 +40,11 @@ #include "urcu-pointer.h" #include "urcu/list.h" #include "urcu/futex.h" +#include "urcu/tls-compat.h" +#include "urcu-die.h" + +#define SET_AFFINITY_CHECK_PERIOD (1U << 8) /* 256 */ +#define SET_AFFINITY_CHECK_PERIOD_MASK (SET_AFFINITY_CHECK_PERIOD - 1) /* Data structure that identifies a call_rcu thread. */ @@ -50,6 +55,7 @@ struct call_rcu_data { unsigned long qlen; /* maintained for debugging. */ pthread_t tid; int cpu_affinity; + unsigned long gp_count; struct cds_list_head list; } __attribute__((aligned(CAA_CACHE_LINE_SIZE))); @@ -62,10 +68,11 @@ CDS_LIST_HEAD(call_rcu_data_list); /* Link a thread using call_rcu() to its call_rcu thread. */ -static __thread struct call_rcu_data *thread_call_rcu_data; - -/* Guard call_rcu thread creation. */ +static DEFINE_URCU_TLS(struct call_rcu_data *, thread_call_rcu_data); +/* + * Guard call_rcu thread creation and atfork handlers. + */ static pthread_mutex_t call_rcu_mutex = PTHREAD_MUTEX_INITIALIZER; /* If a given thread does not have its own call_rcu thread, this is default. */ @@ -78,16 +85,40 @@ static struct call_rcu_data *default_call_rcu_data; * CPUs rather than only to specific threads. */ -#if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF) +#ifdef HAVE_SCHED_GETCPU + +static int urcu_sched_getcpu(void) +{ + return sched_getcpu(); +} + +#else /* #ifdef HAVE_SCHED_GETCPU */ + +static int urcu_sched_getcpu(void) +{ + return -1; +} + +#endif /* #else #ifdef HAVE_SCHED_GETCPU */ + +#if defined(HAVE_SYSCONF) && defined(HAVE_SCHED_GETCPU) /* * Pointer to array of pointers to per-CPU call_rcu_data structures - * and # CPUs. + * and # CPUs. per_cpu_call_rcu_data is a RCU-protected pointer to an + * array of RCU-protected pointers to call_rcu_data. call_rcu acts as a + * RCU read-side and reads per_cpu_call_rcu_data and the per-cpu pointer + * without mutex. The call_rcu_mutex protects updates. */ static struct call_rcu_data **per_cpu_call_rcu_data; static long maxcpus; +static void maxcpus_reset(void) +{ + maxcpus = 0; +} + /* Allocate the array if it has not already been allocated. */ static void alloc_cpu_call_rcu_data(void) @@ -104,7 +135,7 @@ static void alloc_cpu_call_rcu_data(void) p = malloc(maxcpus * sizeof(*per_cpu_call_rcu_data)); if (p != NULL) { memset(p, '\0', maxcpus * sizeof(*per_cpu_call_rcu_data)); - per_cpu_call_rcu_data = p; + rcu_set_pointer(&per_cpu_call_rcu_data, p); } else { if (!warned) { fprintf(stderr, "[error] liburcu: unable to allocate per-CPU pointer array\n"); @@ -113,7 +144,7 @@ static void alloc_cpu_call_rcu_data(void) } } -#else /* #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF) */ +#else /* #if defined(HAVE_SYSCONF) && defined(HAVE_SCHED_GETCPU) */ /* * per_cpu_call_rcu_data should be constant, but some functions below, used both @@ -123,53 +154,74 @@ static void alloc_cpu_call_rcu_data(void) static struct call_rcu_data **per_cpu_call_rcu_data = NULL; static const long maxcpus = -1; -static void alloc_cpu_call_rcu_data(void) +static void maxcpus_reset(void) { } -static int sched_getcpu(void) +static void alloc_cpu_call_rcu_data(void) { - return -1; } -#endif /* #else #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF) */ +#endif /* #else #if defined(HAVE_SYSCONF) && defined(HAVE_SCHED_GETCPU) */ /* Acquire the specified pthread mutex. */ static void call_rcu_lock(pthread_mutex_t *pmp) { - if (pthread_mutex_lock(pmp) != 0) { - perror("pthread_mutex_lock"); - exit(-1); - } + int ret; + + ret = pthread_mutex_lock(pmp); + if (ret) + urcu_die(ret); } /* Release the specified pthread mutex. */ static void call_rcu_unlock(pthread_mutex_t *pmp) { - if (pthread_mutex_unlock(pmp) != 0) { - perror("pthread_mutex_unlock"); - exit(-1); - } + int ret; + + ret = pthread_mutex_unlock(pmp); + if (ret) + urcu_die(ret); } +/* + * Periodically retry setting CPU affinity if we migrate. + * Losing affinity can be caused by CPU hotunplug/hotplug, or by + * cpuset(7). + */ #if HAVE_SCHED_SETAFFINITY static int set_thread_cpu_affinity(struct call_rcu_data *crdp) { cpu_set_t mask; + int ret; if (crdp->cpu_affinity < 0) return 0; + if (++crdp->gp_count & SET_AFFINITY_CHECK_PERIOD_MASK) + return 0; + if (urcu_sched_getcpu() == crdp->cpu_affinity) + return 0; CPU_ZERO(&mask); CPU_SET(crdp->cpu_affinity, &mask); #if SCHED_SETAFFINITY_ARGS == 2 - return sched_setaffinity(0, &mask); + ret = sched_setaffinity(0, &mask); #else - return sched_setaffinity(0, sizeof(mask), &mask); + ret = sched_setaffinity(0, sizeof(mask), &mask); #endif + /* + * EINVAL is fine: can be caused by hotunplugged CPUs, or by + * cpuset(7). This is why we should always retry if we detect + * migration. + */ + if (ret && errno == EINVAL) { + ret = 0; + errno = 0; + } + return ret; } #else static @@ -183,19 +235,33 @@ static void call_rcu_wait(struct call_rcu_data *crdp) { /* Read call_rcu list before read futex */ cmm_smp_mb(); - if (uatomic_read(&crdp->futex) == -1) - futex_async(&crdp->futex, FUTEX_WAIT, -1, - NULL, NULL, 0); + if (uatomic_read(&crdp->futex) != -1) + return; + while (futex_async(&crdp->futex, FUTEX_WAIT, -1, + NULL, NULL, 0)) { + switch (errno) { + case EWOULDBLOCK: + /* Value already changed. */ + return; + case EINTR: + /* Retry if interrupted by signal. */ + break; /* Get out of switch. */ + default: + /* Unexpected error. */ + urcu_die(errno); + } + } } static void call_rcu_wake_up(struct call_rcu_data *crdp) { /* Write to call_rcu list before reading/writing futex */ cmm_smp_mb(); - if (unlikely(uatomic_read(&crdp->futex) == -1)) { + if (caa_unlikely(uatomic_read(&crdp->futex) == -1)) { uatomic_set(&crdp->futex, 0); - futex_async(&crdp->futex, FUTEX_WAKE, 1, - NULL, NULL, 0); + if (futex_async(&crdp->futex, FUTEX_WAKE, 1, + NULL, NULL, 0) < 0) + urcu_die(errno); } } @@ -210,23 +276,41 @@ static void *call_rcu_thread(void *arg) struct rcu_head *rhp; int rt = !!(uatomic_read(&crdp->flags) & URCU_CALL_RCU_RT); - if (set_thread_cpu_affinity(crdp) != 0) { - perror("pthread_setaffinity_np"); - exit(-1); - } + if (set_thread_cpu_affinity(crdp)) + urcu_die(errno); /* * If callbacks take a read-side lock, we need to be registered. */ rcu_register_thread(); - thread_call_rcu_data = crdp; + URCU_TLS(thread_call_rcu_data) = crdp; if (!rt) { uatomic_dec(&crdp->futex); /* Decrement futex before reading call_rcu list */ cmm_smp_mb(); } for (;;) { + if (set_thread_cpu_affinity(crdp)) + urcu_die(errno); + + if (uatomic_read(&crdp->flags) & URCU_CALL_RCU_PAUSE) { + /* + * Pause requested. Become quiescent: remove + * ourself from all global lists, and don't + * process any callback. The callback lists may + * still be non-empty though. + */ + rcu_unregister_thread(); + cmm_smp_mb__before_uatomic_or(); + uatomic_or(&crdp->flags, URCU_CALL_RCU_PAUSED); + while ((uatomic_read(&crdp->flags) & URCU_CALL_RCU_PAUSE) != 0) + poll(NULL, 0, 1); + uatomic_and(&crdp->flags, ~URCU_CALL_RCU_PAUSED); + cmm_smp_mb__after_uatomic_and(); + rcu_register_thread(); + } + if (&crdp->cbs.head != _CMM_LOAD_SHARED(crdp->cbs.tail)) { while ((cbs = _CMM_LOAD_SHARED(crdp->cbs.head)) == NULL) poll(NULL, 0, 1); @@ -295,12 +379,11 @@ static void call_rcu_data_init(struct call_rcu_data **crdpp, int cpu_affinity) { struct call_rcu_data *crdp; + int ret; crdp = malloc(sizeof(*crdp)); - if (crdp == NULL) { - fprintf(stderr, "Out of memory.\n"); - exit(-1); - } + if (crdp == NULL) + urcu_die(errno); memset(crdp, '\0', sizeof(*crdp)); cds_wfq_init(&crdp->cbs); crdp->qlen = 0; @@ -308,26 +391,31 @@ static void call_rcu_data_init(struct call_rcu_data **crdpp, crdp->flags = flags; cds_list_add(&crdp->list, &call_rcu_data_list); crdp->cpu_affinity = cpu_affinity; + crdp->gp_count = 0; cmm_smp_mb(); /* Structure initialized before pointer is planted. */ *crdpp = crdp; - if (pthread_create(&crdp->tid, NULL, call_rcu_thread, crdp) != 0) { - perror("pthread_create"); - exit(-1); - } + ret = pthread_create(&crdp->tid, NULL, call_rcu_thread, crdp); + if (ret) + urcu_die(ret); } /* * Return a pointer to the call_rcu_data structure for the specified * CPU, returning NULL if there is none. We cannot automatically * created it because the platform we are running on might not define - * sched_getcpu(). + * urcu_sched_getcpu(). + * + * The call to this function and use of the returned call_rcu_data + * should be protected by RCU read-side lock. */ struct call_rcu_data *get_cpu_call_rcu_data(int cpu) { static int warned = 0; + struct call_rcu_data **pcpu_crdp; - if (per_cpu_call_rcu_data == NULL) + pcpu_crdp = rcu_dereference(per_cpu_call_rcu_data); + if (pcpu_crdp == NULL) return NULL; if (!warned && maxcpus > 0 && (cpu < 0 || maxcpus <= cpu)) { fprintf(stderr, "[error] liburcu: get CPU # out of range\n"); @@ -335,7 +423,7 @@ struct call_rcu_data *get_cpu_call_rcu_data(int cpu) } if (cpu < 0 || maxcpus <= cpu) return NULL; - return per_cpu_call_rcu_data[cpu]; + return rcu_dereference(pcpu_crdp[cpu]); } /* @@ -379,6 +467,10 @@ struct call_rcu_data *create_call_rcu_data(unsigned long flags, * the caller's responsibility to dispose of the removed structure. * Use get_cpu_call_rcu_data() to obtain a pointer to the old structure * (prior to NULLing it out, of course). + * + * The caller must wait for a grace-period to pass between return from + * set_cpu_call_rcu_data() and call to call_rcu_data_free() passing the + * previous call rcu data as argument. */ int set_cpu_call_rcu_data(int cpu, struct call_rcu_data *crdp) @@ -409,7 +501,7 @@ int set_cpu_call_rcu_data(int cpu, struct call_rcu_data *crdp) return -EEXIST; } - per_cpu_call_rcu_data[cpu] = crdp; + rcu_set_pointer(&per_cpu_call_rcu_data[cpu], crdp); call_rcu_unlock(&call_rcu_mutex); return 0; } @@ -441,16 +533,19 @@ struct call_rcu_data *get_default_call_rcu_data(void) * structure assigned to the CPU on which the thread is running, * followed by the default call_rcu_data structure. If there is not * yet a default call_rcu_data structure, one will be created. + * + * Calls to this function and use of the returned call_rcu_data should + * be protected by RCU read-side lock. */ struct call_rcu_data *get_call_rcu_data(void) { struct call_rcu_data *crd; - if (thread_call_rcu_data != NULL) - return thread_call_rcu_data; + if (URCU_TLS(thread_call_rcu_data) != NULL) + return URCU_TLS(thread_call_rcu_data); if (maxcpus > 0) { - crd = get_cpu_call_rcu_data(sched_getcpu()); + crd = get_cpu_call_rcu_data(urcu_sched_getcpu()); if (crd) return crd; } @@ -464,7 +559,7 @@ struct call_rcu_data *get_call_rcu_data(void) struct call_rcu_data *get_thread_call_rcu_data(void) { - return thread_call_rcu_data; + return URCU_TLS(thread_call_rcu_data); } /* @@ -480,7 +575,7 @@ struct call_rcu_data *get_thread_call_rcu_data(void) void set_thread_call_rcu_data(struct call_rcu_data *crdp) { - thread_call_rcu_data = crdp; + URCU_TLS(thread_call_rcu_data) = crdp; } /* @@ -555,6 +650,8 @@ static void wake_call_rcu_thread(struct call_rcu_data *crdp) * need the first invocation of call_rcu() to be fast, make sure * to create a call_rcu thread first. One way to accomplish this is * "get_call_rcu_data();", and another is create_all_cpu_call_rcu_data(). + * + * call_rcu must be called by registered RCU read-side threads. */ void call_rcu(struct rcu_head *head, @@ -564,10 +661,13 @@ void call_rcu(struct rcu_head *head, cds_wfq_node_init(&head->next); head->func = func; + /* Holding rcu read-side lock across use of per-cpu crdp */ + _rcu_read_lock(); crdp = get_call_rcu_data(); cds_wfq_enqueue(&crdp->cbs, &head->next); uatomic_inc(&crdp->qlen); wake_call_rcu_thread(crdp); + _rcu_read_unlock(); } /* @@ -587,6 +687,10 @@ void call_rcu(struct rcu_head *head, * * We also silently refuse to free NULL pointers. This simplifies * the calling code. + * + * The caller must wait for a grace-period to pass between return from + * set_cpu_call_rcu_data() and call to call_rcu_data_free() passing the + * previous call rcu data as argument. */ void call_rcu_data_free(struct call_rcu_data *crdp) { @@ -609,15 +713,21 @@ void call_rcu_data_free(struct call_rcu_data *crdp) _CMM_STORE_SHARED(crdp->cbs.head, NULL); cbs_tail = (struct cds_wfq_node **) uatomic_xchg(&crdp->cbs.tail, &crdp->cbs.head); + /* Create default call rcu data if need be */ + (void) get_default_call_rcu_data(); cbs_endprev = (struct cds_wfq_node **) - uatomic_xchg(&default_call_rcu_data, cbs_tail); - *cbs_endprev = cbs; + uatomic_xchg(&default_call_rcu_data->cbs.tail, + cbs_tail); + _CMM_STORE_SHARED(*cbs_endprev, cbs); uatomic_add(&default_call_rcu_data->qlen, uatomic_read(&crdp->qlen)); wake_call_rcu_thread(default_call_rcu_data); } + call_rcu_lock(&call_rcu_mutex); cds_list_del(&crdp->list); + call_rcu_unlock(&call_rcu_mutex); + free(crdp); } @@ -627,27 +737,61 @@ void call_rcu_data_free(struct call_rcu_data *crdp) void free_all_cpu_call_rcu_data(void) { int cpu; - struct call_rcu_data *crdp; + struct call_rcu_data **crdp; + static int warned = 0; if (maxcpus <= 0) return; + + crdp = malloc(sizeof(*crdp) * maxcpus); + if (!crdp) { + if (!warned) { + fprintf(stderr, "[error] liburcu: unable to allocate per-CPU pointer array\n"); + } + warned = 1; + return; + } + for (cpu = 0; cpu < maxcpus; cpu++) { - crdp = get_cpu_call_rcu_data(cpu); - if (crdp == NULL) + crdp[cpu] = get_cpu_call_rcu_data(cpu); + if (crdp[cpu] == NULL) continue; set_cpu_call_rcu_data(cpu, NULL); - call_rcu_data_free(crdp); } + /* + * Wait for call_rcu sites acting as RCU readers of the + * call_rcu_data to become quiescent. + */ + synchronize_rcu(); + for (cpu = 0; cpu < maxcpus; cpu++) { + if (crdp[cpu] == NULL) + continue; + call_rcu_data_free(crdp[cpu]); + } + free(crdp); } /* * Acquire the call_rcu_mutex in order to ensure that the child sees - * all of the call_rcu() data structures in a consistent state. + * all of the call_rcu() data structures in a consistent state. Ensure + * that all call_rcu threads are in a quiescent state across fork. * Suitable for pthread_atfork() and friends. */ void call_rcu_before_fork(void) { + struct call_rcu_data *crdp; + call_rcu_lock(&call_rcu_mutex); + + cds_list_for_each_entry(crdp, &call_rcu_data_list, list) { + uatomic_or(&crdp->flags, URCU_CALL_RCU_PAUSE); + cmm_smp_mb__after_uatomic_or(); + wake_call_rcu_thread(crdp); + } + cds_list_for_each_entry(crdp, &call_rcu_data_list, list) { + while ((uatomic_read(&crdp->flags) & URCU_CALL_RCU_PAUSED) == 0) + poll(NULL, 0, 1); + } } /* @@ -657,6 +801,14 @@ void call_rcu_before_fork(void) */ void call_rcu_after_fork_parent(void) { + struct call_rcu_data *crdp; + + cds_list_for_each_entry(crdp, &call_rcu_data_list, list) + uatomic_and(&crdp->flags, ~URCU_CALL_RCU_PAUSE); + cds_list_for_each_entry(crdp, &call_rcu_data_list, list) { + while ((uatomic_read(&crdp->flags) & URCU_CALL_RCU_PAUSED) != 0) + poll(NULL, 0, 1); + } call_rcu_unlock(&call_rcu_mutex); } @@ -672,6 +824,10 @@ void call_rcu_after_fork_child(void) /* Release the mutex. */ call_rcu_unlock(&call_rcu_mutex); + /* Do nothing when call_rcu() has not been used */ + if (cds_list_empty(&call_rcu_data_list)) + return; + /* * Allocate a new default call_rcu_data structure in order * to get a working call_rcu thread to go with it. @@ -679,7 +835,17 @@ void call_rcu_after_fork_child(void) default_call_rcu_data = NULL; (void)get_default_call_rcu_data(); - /* Dispose of all of the rest of the call_rcu_data structures. */ + /* Cleanup call_rcu_data pointers before use */ + maxcpus_reset(); + free(per_cpu_call_rcu_data); + rcu_set_pointer(&per_cpu_call_rcu_data, NULL); + URCU_TLS(thread_call_rcu_data) = NULL; + + /* + * Dispose of all of the rest of the call_rcu_data structures. + * Leftover call_rcu callbacks will be merged into the new + * default call_rcu thread queue. + */ cds_list_for_each_entry_safe(crdp, next, &call_rcu_data_list, list) { if (crdp == default_call_rcu_data) continue;