X-Git-Url: https://git.liburcu.org/?p=urcu.git;a=blobdiff_plain;f=urcu-call-rcu-impl.h;h=f6625ba14e2d05689443caec171605c13b120a1f;hp=29c4e28e334a0654f98725392229dbe98b07645a;hb=ffa11a1830c532f3b052146eb9f0dd450cb2a0f2;hpb=d31150b44042083fa11edcdc788b0c60d71367a7 diff --git a/urcu-call-rcu-impl.h b/urcu-call-rcu-impl.h index 29c4e28..f6625ba 100644 --- a/urcu-call-rcu-impl.h +++ b/urcu-call-rcu-impl.h @@ -21,6 +21,7 @@ */ #define _GNU_SOURCE +#define _LGPL_SOURCE #include #include #include @@ -35,16 +36,26 @@ #include #include "config.h" -#include "urcu/wfqueue.h" +#include "urcu/wfcqueue.h" #include "urcu-call-rcu.h" #include "urcu-pointer.h" #include "urcu/list.h" #include "urcu/futex.h" +#include "urcu/tls-compat.h" +#include "urcu-die.h" /* Data structure that identifies a call_rcu thread. */ struct call_rcu_data { - struct cds_wfq_queue cbs; + /* + * We do not align head on a different cache-line than tail + * mainly because call_rcu callback-invocation threads use + * batching ("splice") to get an entire list of callbacks, which + * effectively empties the queue, and requires to touch the tail + * anyway. + */ + struct cds_wfcq_tail cbs_tail; + struct cds_wfcq_head cbs_head; unsigned long flags; int32_t futex; unsigned long qlen; /* maintained for debugging. */ @@ -58,14 +69,15 @@ struct call_rcu_data { * Protected by call_rcu_mutex. */ -CDS_LIST_HEAD(call_rcu_data_list); +static CDS_LIST_HEAD(call_rcu_data_list); /* Link a thread using call_rcu() to its call_rcu thread. */ -static __thread struct call_rcu_data *thread_call_rcu_data; - -/* Guard call_rcu thread creation. */ +static DEFINE_URCU_TLS(struct call_rcu_data *, thread_call_rcu_data); +/* + * Guard call_rcu thread creation and atfork handlers. + */ static pthread_mutex_t call_rcu_mutex = PTHREAD_MUTEX_INITIALIZER; /* If a given thread does not have its own call_rcu thread, this is default. */ @@ -150,20 +162,22 @@ static int sched_getcpu(void) static void call_rcu_lock(pthread_mutex_t *pmp) { - if (pthread_mutex_lock(pmp) != 0) { - perror("pthread_mutex_lock"); - exit(-1); - } + int ret; + + ret = pthread_mutex_lock(pmp); + if (ret) + urcu_die(ret); } /* Release the specified pthread mutex. */ static void call_rcu_unlock(pthread_mutex_t *pmp) { - if (pthread_mutex_unlock(pmp) != 0) { - perror("pthread_mutex_unlock"); - exit(-1); - } + int ret; + + ret = pthread_mutex_unlock(pmp); + if (ret) + urcu_die(ret); } #if HAVE_SCHED_SETAFFINITY @@ -204,7 +218,7 @@ static void call_rcu_wake_up(struct call_rcu_data *crdp) { /* Write to call_rcu list before reading/writing futex */ cmm_smp_mb(); - if (unlikely(uatomic_read(&crdp->futex) == -1)) { + if (caa_unlikely(uatomic_read(&crdp->futex) == -1)) { uatomic_set(&crdp->futex, 0); futex_async(&crdp->futex, FUTEX_WAKE, 1, NULL, NULL, 0); @@ -216,58 +230,71 @@ static void call_rcu_wake_up(struct call_rcu_data *crdp) static void *call_rcu_thread(void *arg) { unsigned long cbcount; - struct cds_wfq_node *cbs; - struct cds_wfq_node **cbs_tail; - struct call_rcu_data *crdp = (struct call_rcu_data *)arg; - struct rcu_head *rhp; + struct call_rcu_data *crdp = (struct call_rcu_data *) arg; int rt = !!(uatomic_read(&crdp->flags) & URCU_CALL_RCU_RT); + int ret; - if (set_thread_cpu_affinity(crdp) != 0) { - perror("pthread_setaffinity_np"); - exit(-1); - } + ret = set_thread_cpu_affinity(crdp); + if (ret) + urcu_die(errno); /* * If callbacks take a read-side lock, we need to be registered. */ rcu_register_thread(); - thread_call_rcu_data = crdp; + URCU_TLS(thread_call_rcu_data) = crdp; if (!rt) { uatomic_dec(&crdp->futex); /* Decrement futex before reading call_rcu list */ cmm_smp_mb(); } for (;;) { - if (&crdp->cbs.head != _CMM_LOAD_SHARED(crdp->cbs.tail)) { - while ((cbs = _CMM_LOAD_SHARED(crdp->cbs.head)) == NULL) + struct cds_wfcq_head cbs_tmp_head; + struct cds_wfcq_tail cbs_tmp_tail; + struct cds_wfcq_node *cbs, *cbs_tmp_n; + enum cds_wfcq_ret splice_ret; + + if (uatomic_read(&crdp->flags) & URCU_CALL_RCU_PAUSE) { + /* + * Pause requested. Become quiescent: remove + * ourself from all global lists, and don't + * process any callback. The callback lists may + * still be non-empty though. + */ + rcu_unregister_thread(); + cmm_smp_mb__before_uatomic_or(); + uatomic_or(&crdp->flags, URCU_CALL_RCU_PAUSED); + while ((uatomic_read(&crdp->flags) & URCU_CALL_RCU_PAUSE) != 0) poll(NULL, 0, 1); - _CMM_STORE_SHARED(crdp->cbs.head, NULL); - cbs_tail = (struct cds_wfq_node **) - uatomic_xchg(&crdp->cbs.tail, &crdp->cbs.head); + rcu_register_thread(); + } + + cds_wfcq_init(&cbs_tmp_head, &cbs_tmp_tail); + splice_ret = __cds_wfcq_splice_blocking(&cbs_tmp_head, + &cbs_tmp_tail, &crdp->cbs_head, &crdp->cbs_tail); + assert(splice_ret != CDS_WFCQ_RET_WOULDBLOCK); + assert(splice_ret != CDS_WFCQ_RET_DEST_NON_EMPTY); + if (splice_ret != CDS_WFCQ_RET_SRC_EMPTY) { synchronize_rcu(); cbcount = 0; - do { - while (cbs->next == NULL && - &cbs->next != cbs_tail) - poll(NULL, 0, 1); - if (cbs == &crdp->cbs.dummy) { - cbs = cbs->next; - continue; - } - rhp = (struct rcu_head *)cbs; - cbs = cbs->next; + __cds_wfcq_for_each_blocking_safe(&cbs_tmp_head, + &cbs_tmp_tail, cbs, cbs_tmp_n) { + struct rcu_head *rhp; + + rhp = caa_container_of(cbs, + struct rcu_head, next); rhp->func(rhp); cbcount++; - } while (cbs != NULL); + } uatomic_sub(&crdp->qlen, cbcount); } if (uatomic_read(&crdp->flags) & URCU_CALL_RCU_STOP) break; rcu_thread_offline(); if (!rt) { - if (&crdp->cbs.head - == _CMM_LOAD_SHARED(crdp->cbs.tail)) { + if (cds_wfcq_empty(&crdp->cbs_head, + &crdp->cbs_tail)) { call_rcu_wait(crdp); poll(NULL, 0, 10); uatomic_dec(&crdp->futex); @@ -307,14 +334,13 @@ static void call_rcu_data_init(struct call_rcu_data **crdpp, int cpu_affinity) { struct call_rcu_data *crdp; + int ret; crdp = malloc(sizeof(*crdp)); - if (crdp == NULL) { - fprintf(stderr, "Out of memory.\n"); - exit(-1); - } + if (crdp == NULL) + urcu_die(errno); memset(crdp, '\0', sizeof(*crdp)); - cds_wfq_init(&crdp->cbs); + cds_wfcq_init(&crdp->cbs_head, &crdp->cbs_tail); crdp->qlen = 0; crdp->futex = 0; crdp->flags = flags; @@ -322,10 +348,9 @@ static void call_rcu_data_init(struct call_rcu_data **crdpp, crdp->cpu_affinity = cpu_affinity; cmm_smp_mb(); /* Structure initialized before pointer is planted. */ *crdpp = crdp; - if (pthread_create(&crdp->tid, NULL, call_rcu_thread, crdp) != 0) { - perror("pthread_create"); - exit(-1); - } + ret = pthread_create(&crdp->tid, NULL, call_rcu_thread, crdp); + if (ret) + urcu_die(ret); } /* @@ -396,6 +421,10 @@ struct call_rcu_data *create_call_rcu_data(unsigned long flags, * the caller's responsibility to dispose of the removed structure. * Use get_cpu_call_rcu_data() to obtain a pointer to the old structure * (prior to NULLing it out, of course). + * + * The caller must wait for a grace-period to pass between return from + * set_cpu_call_rcu_data() and call to call_rcu_data_free() passing the + * previous call rcu data as argument. */ int set_cpu_call_rcu_data(int cpu, struct call_rcu_data *crdp) @@ -466,8 +495,8 @@ struct call_rcu_data *get_call_rcu_data(void) { struct call_rcu_data *crd; - if (thread_call_rcu_data != NULL) - return thread_call_rcu_data; + if (URCU_TLS(thread_call_rcu_data) != NULL) + return URCU_TLS(thread_call_rcu_data); if (maxcpus > 0) { crd = get_cpu_call_rcu_data(sched_getcpu()); @@ -484,7 +513,7 @@ struct call_rcu_data *get_call_rcu_data(void) struct call_rcu_data *get_thread_call_rcu_data(void) { - return thread_call_rcu_data; + return URCU_TLS(thread_call_rcu_data); } /* @@ -500,7 +529,7 @@ struct call_rcu_data *get_thread_call_rcu_data(void) void set_thread_call_rcu_data(struct call_rcu_data *crdp) { - thread_call_rcu_data = crdp; + URCU_TLS(thread_call_rcu_data) = crdp; } /* @@ -584,12 +613,12 @@ void call_rcu(struct rcu_head *head, { struct call_rcu_data *crdp; - cds_wfq_node_init(&head->next); + cds_wfcq_node_init(&head->next); head->func = func; /* Holding rcu read-side lock across use of per-cpu crdp */ rcu_read_lock(); crdp = get_call_rcu_data(); - cds_wfq_enqueue(&crdp->cbs, &head->next); + cds_wfcq_enqueue(&crdp->cbs_head, &crdp->cbs_tail, &head->next); uatomic_inc(&crdp->qlen); wake_call_rcu_thread(crdp); rcu_read_unlock(); @@ -612,13 +641,17 @@ void call_rcu(struct rcu_head *head, * * We also silently refuse to free NULL pointers. This simplifies * the calling code. + * + * The caller must wait for a grace-period to pass between return from + * set_cpu_call_rcu_data() and call to call_rcu_data_free() passing the + * previous call rcu data as argument. + * + * Note: introducing __cds_wfcq_splice_blocking() in this function fixed + * a list corruption bug in the 0.7.x series. The equivalent fix + * appeared in 0.6.8 for the stable-0.6 branch. */ void call_rcu_data_free(struct call_rcu_data *crdp) { - struct cds_wfq_node *cbs; - struct cds_wfq_node **cbs_tail; - struct cds_wfq_node **cbs_endprev; - if (crdp == NULL || crdp == default_call_rcu_data) { return; } @@ -628,17 +661,12 @@ void call_rcu_data_free(struct call_rcu_data *crdp) while ((uatomic_read(&crdp->flags) & URCU_CALL_RCU_STOPPED) == 0) poll(NULL, 0, 1); } - if (&crdp->cbs.head != _CMM_LOAD_SHARED(crdp->cbs.tail)) { - while ((cbs = _CMM_LOAD_SHARED(crdp->cbs.head)) == NULL) - poll(NULL, 0, 1); - _CMM_STORE_SHARED(crdp->cbs.head, NULL); - cbs_tail = (struct cds_wfq_node **) - uatomic_xchg(&crdp->cbs.tail, &crdp->cbs.head); + if (!cds_wfcq_empty(&crdp->cbs_head, &crdp->cbs_tail)) { /* Create default call rcu data if need be */ (void) get_default_call_rcu_data(); - cbs_endprev = (struct cds_wfq_node **) - uatomic_xchg(&default_call_rcu_data, cbs_tail); - *cbs_endprev = cbs; + __cds_wfcq_splice_blocking(&default_call_rcu_data->cbs_head, + &default_call_rcu_data->cbs_tail, + &crdp->cbs_head, &crdp->cbs_tail); uatomic_add(&default_call_rcu_data->qlen, uatomic_read(&crdp->qlen)); wake_call_rcu_thread(default_call_rcu_data); @@ -693,12 +721,25 @@ void free_all_cpu_call_rcu_data(void) /* * Acquire the call_rcu_mutex in order to ensure that the child sees - * all of the call_rcu() data structures in a consistent state. + * all of the call_rcu() data structures in a consistent state. Ensure + * that all call_rcu threads are in a quiescent state across fork. * Suitable for pthread_atfork() and friends. */ void call_rcu_before_fork(void) { + struct call_rcu_data *crdp; + call_rcu_lock(&call_rcu_mutex); + + cds_list_for_each_entry(crdp, &call_rcu_data_list, list) { + uatomic_or(&crdp->flags, URCU_CALL_RCU_PAUSE); + cmm_smp_mb__after_uatomic_or(); + wake_call_rcu_thread(crdp); + } + cds_list_for_each_entry(crdp, &call_rcu_data_list, list) { + while ((uatomic_read(&crdp->flags) & URCU_CALL_RCU_PAUSED) == 0) + poll(NULL, 0, 1); + } } /* @@ -708,6 +749,10 @@ void call_rcu_before_fork(void) */ void call_rcu_after_fork_parent(void) { + struct call_rcu_data *crdp; + + cds_list_for_each_entry(crdp, &call_rcu_data_list, list) + uatomic_and(&crdp->flags, ~URCU_CALL_RCU_PAUSE); call_rcu_unlock(&call_rcu_mutex); } @@ -738,9 +783,13 @@ void call_rcu_after_fork_child(void) maxcpus_reset(); free(per_cpu_call_rcu_data); rcu_set_pointer(&per_cpu_call_rcu_data, NULL); - thread_call_rcu_data = NULL; + URCU_TLS(thread_call_rcu_data) = NULL; - /* Dispose of all of the rest of the call_rcu_data structures. */ + /* + * Dispose of all of the rest of the call_rcu_data structures. + * Leftover call_rcu callbacks will be merged into the new + * default call_rcu thread queue. + */ cds_list_for_each_entry_safe(crdp, next, &call_rcu_data_list, list) { if (crdp == default_call_rcu_data) continue;