X-Git-Url: http://git.liburcu.org/?p=userspace-rcu.git;a=blobdiff_plain;f=urcu-call-rcu-impl.h;h=04f7798c3f57a433a3756f267e0ffdc07e5e4552;hp=182e9b15bd96552ed5e4fb8125bee02da04a8715;hb=f6d1a94fb64e9beeaf4558f12c157784831d9084;hpb=f96d597f3a04010d432c36b9bb07e910720000ab diff --git a/urcu-call-rcu-impl.h b/urcu-call-rcu-impl.h index 182e9b1..04f7798 100644 --- a/urcu-call-rcu-impl.h +++ b/urcu-call-rcu-impl.h @@ -40,6 +40,8 @@ #include "urcu-pointer.h" #include "urcu/list.h" #include "urcu/futex.h" +#include "urcu/tls-compat.h" +#include "urcu-die.h" /* Data structure that identifies a call_rcu thread. */ @@ -62,10 +64,11 @@ CDS_LIST_HEAD(call_rcu_data_list); /* Link a thread using call_rcu() to its call_rcu thread. */ -static __thread struct call_rcu_data *thread_call_rcu_data; - -/* Guard call_rcu thread creation. */ +static DEFINE_URCU_TLS(struct call_rcu_data *, thread_call_rcu_data); +/* + * Guard call_rcu thread creation and atfork handlers. + */ static pthread_mutex_t call_rcu_mutex = PTHREAD_MUTEX_INITIALIZER; /* If a given thread does not have its own call_rcu thread, this is default. */ @@ -78,7 +81,23 @@ static struct call_rcu_data *default_call_rcu_data; * CPUs rather than only to specific threads. */ -#if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF) +#ifdef HAVE_SCHED_GETCPU + +static int urcu_sched_getcpu(void) +{ + return sched_getcpu(); +} + +#else /* #ifdef HAVE_SCHED_GETCPU */ + +static int urcu_sched_getcpu(void) +{ + return -1; +} + +#endif /* #else #ifdef HAVE_SCHED_GETCPU */ + +#if defined(HAVE_SYSCONF) && defined(HAVE_SCHED_GETCPU) /* * Pointer to array of pointers to per-CPU call_rcu_data structures @@ -121,7 +140,7 @@ static void alloc_cpu_call_rcu_data(void) } } -#else /* #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF) */ +#else /* #if defined(HAVE_SYSCONF) && defined(HAVE_SCHED_GETCPU) */ /* * per_cpu_call_rcu_data should be constant, but some functions below, used both @@ -139,31 +158,28 @@ static void alloc_cpu_call_rcu_data(void) { } -static int sched_getcpu(void) -{ - return -1; -} - -#endif /* #else #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF) */ +#endif /* #else #if defined(HAVE_SYSCONF) && defined(HAVE_SCHED_GETCPU) */ /* Acquire the specified pthread mutex. */ static void call_rcu_lock(pthread_mutex_t *pmp) { - if (pthread_mutex_lock(pmp) != 0) { - perror("pthread_mutex_lock"); - exit(-1); - } + int ret; + + ret = pthread_mutex_lock(pmp); + if (ret) + urcu_die(ret); } /* Release the specified pthread mutex. */ static void call_rcu_unlock(pthread_mutex_t *pmp) { - if (pthread_mutex_unlock(pmp) != 0) { - perror("pthread_mutex_unlock"); - exit(-1); - } + int ret; + + ret = pthread_mutex_unlock(pmp); + if (ret) + urcu_die(ret); } #if HAVE_SCHED_SETAFFINITY @@ -204,7 +220,7 @@ static void call_rcu_wake_up(struct call_rcu_data *crdp) { /* Write to call_rcu list before reading/writing futex */ cmm_smp_mb(); - if (unlikely(uatomic_read(&crdp->futex) == -1)) { + if (caa_unlikely(uatomic_read(&crdp->futex) == -1)) { uatomic_set(&crdp->futex, 0); futex_async(&crdp->futex, FUTEX_WAKE, 1, NULL, NULL, 0); @@ -221,24 +237,39 @@ static void *call_rcu_thread(void *arg) struct call_rcu_data *crdp = (struct call_rcu_data *)arg; struct rcu_head *rhp; int rt = !!(uatomic_read(&crdp->flags) & URCU_CALL_RCU_RT); + int ret; - if (set_thread_cpu_affinity(crdp) != 0) { - perror("pthread_setaffinity_np"); - exit(-1); - } + ret = set_thread_cpu_affinity(crdp); + if (ret) + urcu_die(errno); /* * If callbacks take a read-side lock, we need to be registered. */ rcu_register_thread(); - thread_call_rcu_data = crdp; + URCU_TLS(thread_call_rcu_data) = crdp; if (!rt) { uatomic_dec(&crdp->futex); /* Decrement futex before reading call_rcu list */ cmm_smp_mb(); } for (;;) { + if (uatomic_read(&crdp->flags) & URCU_CALL_RCU_PAUSE) { + /* + * Pause requested. Become quiescent: remove + * ourself from all global lists, and don't + * process any callback. The callback lists may + * still be non-empty though. + */ + rcu_unregister_thread(); + cmm_smp_mb__before_uatomic_or(); + uatomic_or(&crdp->flags, URCU_CALL_RCU_PAUSED); + while ((uatomic_read(&crdp->flags) & URCU_CALL_RCU_PAUSE) != 0) + poll(NULL, 0, 1); + rcu_register_thread(); + } + if (&crdp->cbs.head != _CMM_LOAD_SHARED(crdp->cbs.tail)) { while ((cbs = _CMM_LOAD_SHARED(crdp->cbs.head)) == NULL) poll(NULL, 0, 1); @@ -307,12 +338,11 @@ static void call_rcu_data_init(struct call_rcu_data **crdpp, int cpu_affinity) { struct call_rcu_data *crdp; + int ret; crdp = malloc(sizeof(*crdp)); - if (crdp == NULL) { - fprintf(stderr, "Out of memory.\n"); - exit(-1); - } + if (crdp == NULL) + urcu_die(errno); memset(crdp, '\0', sizeof(*crdp)); cds_wfq_init(&crdp->cbs); crdp->qlen = 0; @@ -322,17 +352,16 @@ static void call_rcu_data_init(struct call_rcu_data **crdpp, crdp->cpu_affinity = cpu_affinity; cmm_smp_mb(); /* Structure initialized before pointer is planted. */ *crdpp = crdp; - if (pthread_create(&crdp->tid, NULL, call_rcu_thread, crdp) != 0) { - perror("pthread_create"); - exit(-1); - } + ret = pthread_create(&crdp->tid, NULL, call_rcu_thread, crdp); + if (ret) + urcu_die(ret); } /* * Return a pointer to the call_rcu_data structure for the specified * CPU, returning NULL if there is none. We cannot automatically * created it because the platform we are running on might not define - * sched_getcpu(). + * urcu_sched_getcpu(). * * The call to this function and use of the returned call_rcu_data * should be protected by RCU read-side lock. @@ -470,11 +499,11 @@ struct call_rcu_data *get_call_rcu_data(void) { struct call_rcu_data *crd; - if (thread_call_rcu_data != NULL) - return thread_call_rcu_data; + if (URCU_TLS(thread_call_rcu_data) != NULL) + return URCU_TLS(thread_call_rcu_data); if (maxcpus > 0) { - crd = get_cpu_call_rcu_data(sched_getcpu()); + crd = get_cpu_call_rcu_data(urcu_sched_getcpu()); if (crd) return crd; } @@ -488,7 +517,7 @@ struct call_rcu_data *get_call_rcu_data(void) struct call_rcu_data *get_thread_call_rcu_data(void) { - return thread_call_rcu_data; + return URCU_TLS(thread_call_rcu_data); } /* @@ -504,7 +533,7 @@ struct call_rcu_data *get_thread_call_rcu_data(void) void set_thread_call_rcu_data(struct call_rcu_data *crdp) { - thread_call_rcu_data = crdp; + URCU_TLS(thread_call_rcu_data) = crdp; } /* @@ -645,8 +674,9 @@ void call_rcu_data_free(struct call_rcu_data *crdp) /* Create default call rcu data if need be */ (void) get_default_call_rcu_data(); cbs_endprev = (struct cds_wfq_node **) - uatomic_xchg(&default_call_rcu_data, cbs_tail); - *cbs_endprev = cbs; + uatomic_xchg(&default_call_rcu_data->cbs.tail, + cbs_tail); + _CMM_STORE_SHARED(*cbs_endprev, cbs); uatomic_add(&default_call_rcu_data->qlen, uatomic_read(&crdp->qlen)); wake_call_rcu_thread(default_call_rcu_data); @@ -701,12 +731,25 @@ void free_all_cpu_call_rcu_data(void) /* * Acquire the call_rcu_mutex in order to ensure that the child sees - * all of the call_rcu() data structures in a consistent state. + * all of the call_rcu() data structures in a consistent state. Ensure + * that all call_rcu threads are in a quiescent state across fork. * Suitable for pthread_atfork() and friends. */ void call_rcu_before_fork(void) { + struct call_rcu_data *crdp; + call_rcu_lock(&call_rcu_mutex); + + cds_list_for_each_entry(crdp, &call_rcu_data_list, list) { + uatomic_or(&crdp->flags, URCU_CALL_RCU_PAUSE); + cmm_smp_mb__after_uatomic_or(); + wake_call_rcu_thread(crdp); + } + cds_list_for_each_entry(crdp, &call_rcu_data_list, list) { + while ((uatomic_read(&crdp->flags) & URCU_CALL_RCU_PAUSED) == 0) + poll(NULL, 0, 1); + } } /* @@ -716,6 +759,10 @@ void call_rcu_before_fork(void) */ void call_rcu_after_fork_parent(void) { + struct call_rcu_data *crdp; + + cds_list_for_each_entry(crdp, &call_rcu_data_list, list) + uatomic_and(&crdp->flags, ~URCU_CALL_RCU_PAUSE); call_rcu_unlock(&call_rcu_mutex); } @@ -746,9 +793,13 @@ void call_rcu_after_fork_child(void) maxcpus_reset(); free(per_cpu_call_rcu_data); rcu_set_pointer(&per_cpu_call_rcu_data, NULL); - thread_call_rcu_data = NULL; + URCU_TLS(thread_call_rcu_data) = NULL; - /* Dispose of all of the rest of the call_rcu_data structures. */ + /* + * Dispose of all of the rest of the call_rcu_data structures. + * Leftover call_rcu callbacks will be merged into the new + * default call_rcu thread queue. + */ cds_list_for_each_entry_safe(crdp, next, &call_rcu_data_list, list) { if (crdp == default_call_rcu_data) continue;