X-Git-Url: http://git.liburcu.org/?a=blobdiff_plain;f=urcu-call-rcu-impl.h;h=c0298d24b6a732ba7979fa3aa740b4085e1ccb45;hb=ccdbf780e4c751ac1763eeb82b50ff98e0dd33d0;hp=8ed2ab384f81c0806d7c40a74430eb527ad825de;hpb=bd252a04bbbb163aa4d8864b1e1e5a3a4d9d0892;p=userspace-rcu.git diff --git a/urcu-call-rcu-impl.h b/urcu-call-rcu-impl.h index 8ed2ab3..c0298d2 100644 --- a/urcu-call-rcu-impl.h +++ b/urcu-call-rcu-impl.h @@ -41,6 +41,7 @@ #include "urcu/list.h" #include "urcu/futex.h" #include "urcu/tls-compat.h" +#include "urcu-die.h" /* Data structure that identifies a call_rcu thread. */ @@ -65,8 +66,9 @@ CDS_LIST_HEAD(call_rcu_data_list); static DEFINE_URCU_TLS(struct call_rcu_data *, thread_call_rcu_data); -/* Guard call_rcu thread creation. */ - +/* + * Guard call_rcu thread creation and atfork handlers. + */ static pthread_mutex_t call_rcu_mutex = PTHREAD_MUTEX_INITIALIZER; /* If a given thread does not have its own call_rcu thread, this is default. */ @@ -79,7 +81,23 @@ static struct call_rcu_data *default_call_rcu_data; * CPUs rather than only to specific threads. */ -#if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF) +#ifdef HAVE_SCHED_GETCPU + +static int urcu_sched_getcpu(void) +{ + return sched_getcpu(); +} + +#else /* #ifdef HAVE_SCHED_GETCPU */ + +static int urcu_sched_getcpu(void) +{ + return -1; +} + +#endif /* #else #ifdef HAVE_SCHED_GETCPU */ + +#if defined(HAVE_SYSCONF) && defined(HAVE_SCHED_GETCPU) /* * Pointer to array of pointers to per-CPU call_rcu_data structures @@ -122,7 +140,7 @@ static void alloc_cpu_call_rcu_data(void) } } -#else /* #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF) */ +#else /* #if defined(HAVE_SYSCONF) && defined(HAVE_SCHED_GETCPU) */ /* * per_cpu_call_rcu_data should be constant, but some functions below, used both @@ -140,31 +158,28 @@ static void alloc_cpu_call_rcu_data(void) { } -static int sched_getcpu(void) -{ - return -1; -} - -#endif /* #else #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF) */ +#endif /* #else #if defined(HAVE_SYSCONF) && defined(HAVE_SCHED_GETCPU) */ /* Acquire the specified pthread mutex. */ static void call_rcu_lock(pthread_mutex_t *pmp) { - if (pthread_mutex_lock(pmp) != 0) { - perror("pthread_mutex_lock"); - exit(-1); - } + int ret; + + ret = pthread_mutex_lock(pmp); + if (ret) + urcu_die(ret); } /* Release the specified pthread mutex. */ static void call_rcu_unlock(pthread_mutex_t *pmp) { - if (pthread_mutex_unlock(pmp) != 0) { - perror("pthread_mutex_unlock"); - exit(-1); - } + int ret; + + ret = pthread_mutex_unlock(pmp); + if (ret) + urcu_die(ret); } #if HAVE_SCHED_SETAFFINITY @@ -222,11 +237,11 @@ static void *call_rcu_thread(void *arg) struct call_rcu_data *crdp = (struct call_rcu_data *)arg; struct rcu_head *rhp; int rt = !!(uatomic_read(&crdp->flags) & URCU_CALL_RCU_RT); + int ret; - if (set_thread_cpu_affinity(crdp) != 0) { - perror("pthread_setaffinity_np"); - exit(-1); - } + ret = set_thread_cpu_affinity(crdp); + if (ret) + urcu_die(errno); /* * If callbacks take a read-side lock, we need to be registered. @@ -240,6 +255,23 @@ static void *call_rcu_thread(void *arg) cmm_smp_mb(); } for (;;) { + if (uatomic_read(&crdp->flags) & URCU_CALL_RCU_PAUSE) { + /* + * Pause requested. Become quiescent: remove + * ourself from all global lists, and don't + * process any callback. The callback lists may + * still be non-empty though. + */ + rcu_unregister_thread(); + cmm_smp_mb__before_uatomic_or(); + uatomic_or(&crdp->flags, URCU_CALL_RCU_PAUSED); + while ((uatomic_read(&crdp->flags) & URCU_CALL_RCU_PAUSE) != 0) + poll(NULL, 0, 1); + uatomic_and(&crdp->flags, ~URCU_CALL_RCU_PAUSED); + cmm_smp_mb__after_uatomic_and(); + rcu_register_thread(); + } + if (&crdp->cbs.head != _CMM_LOAD_SHARED(crdp->cbs.tail)) { while ((cbs = _CMM_LOAD_SHARED(crdp->cbs.head)) == NULL) poll(NULL, 0, 1); @@ -308,12 +340,11 @@ static void call_rcu_data_init(struct call_rcu_data **crdpp, int cpu_affinity) { struct call_rcu_data *crdp; + int ret; crdp = malloc(sizeof(*crdp)); - if (crdp == NULL) { - fprintf(stderr, "Out of memory.\n"); - exit(-1); - } + if (crdp == NULL) + urcu_die(errno); memset(crdp, '\0', sizeof(*crdp)); cds_wfq_init(&crdp->cbs); crdp->qlen = 0; @@ -323,17 +354,16 @@ static void call_rcu_data_init(struct call_rcu_data **crdpp, crdp->cpu_affinity = cpu_affinity; cmm_smp_mb(); /* Structure initialized before pointer is planted. */ *crdpp = crdp; - if (pthread_create(&crdp->tid, NULL, call_rcu_thread, crdp) != 0) { - perror("pthread_create"); - exit(-1); - } + ret = pthread_create(&crdp->tid, NULL, call_rcu_thread, crdp); + if (ret) + urcu_die(ret); } /* * Return a pointer to the call_rcu_data structure for the specified * CPU, returning NULL if there is none. We cannot automatically * created it because the platform we are running on might not define - * sched_getcpu(). + * urcu_sched_getcpu(). * * The call to this function and use of the returned call_rcu_data * should be protected by RCU read-side lock. @@ -475,7 +505,7 @@ struct call_rcu_data *get_call_rcu_data(void) return URCU_TLS(thread_call_rcu_data); if (maxcpus > 0) { - crd = get_cpu_call_rcu_data(sched_getcpu()); + crd = get_cpu_call_rcu_data(urcu_sched_getcpu()); if (crd) return crd; } @@ -646,8 +676,9 @@ void call_rcu_data_free(struct call_rcu_data *crdp) /* Create default call rcu data if need be */ (void) get_default_call_rcu_data(); cbs_endprev = (struct cds_wfq_node **) - uatomic_xchg(&default_call_rcu_data, cbs_tail); - *cbs_endprev = cbs; + uatomic_xchg(&default_call_rcu_data->cbs.tail, + cbs_tail); + _CMM_STORE_SHARED(*cbs_endprev, cbs); uatomic_add(&default_call_rcu_data->qlen, uatomic_read(&crdp->qlen)); wake_call_rcu_thread(default_call_rcu_data); @@ -702,12 +733,25 @@ void free_all_cpu_call_rcu_data(void) /* * Acquire the call_rcu_mutex in order to ensure that the child sees - * all of the call_rcu() data structures in a consistent state. + * all of the call_rcu() data structures in a consistent state. Ensure + * that all call_rcu threads are in a quiescent state across fork. * Suitable for pthread_atfork() and friends. */ void call_rcu_before_fork(void) { + struct call_rcu_data *crdp; + call_rcu_lock(&call_rcu_mutex); + + cds_list_for_each_entry(crdp, &call_rcu_data_list, list) { + uatomic_or(&crdp->flags, URCU_CALL_RCU_PAUSE); + cmm_smp_mb__after_uatomic_or(); + wake_call_rcu_thread(crdp); + } + cds_list_for_each_entry(crdp, &call_rcu_data_list, list) { + while ((uatomic_read(&crdp->flags) & URCU_CALL_RCU_PAUSED) == 0) + poll(NULL, 0, 1); + } } /* @@ -717,6 +761,14 @@ void call_rcu_before_fork(void) */ void call_rcu_after_fork_parent(void) { + struct call_rcu_data *crdp; + + cds_list_for_each_entry(crdp, &call_rcu_data_list, list) + uatomic_and(&crdp->flags, ~URCU_CALL_RCU_PAUSE); + cds_list_for_each_entry(crdp, &call_rcu_data_list, list) { + while ((uatomic_read(&crdp->flags) & URCU_CALL_RCU_PAUSED) != 0) + poll(NULL, 0, 1); + } call_rcu_unlock(&call_rcu_mutex); } @@ -749,7 +801,11 @@ void call_rcu_after_fork_child(void) rcu_set_pointer(&per_cpu_call_rcu_data, NULL); URCU_TLS(thread_call_rcu_data) = NULL; - /* Dispose of all of the rest of the call_rcu_data structures. */ + /* + * Dispose of all of the rest of the call_rcu_data structures. + * Leftover call_rcu callbacks will be merged into the new + * default call_rcu thread queue. + */ cds_list_for_each_entry_safe(crdp, next, &call_rcu_data_list, list) { if (crdp == default_call_rcu_data) continue;