X-Git-Url: https://git.liburcu.org/?p=urcu.git;a=blobdiff_plain;f=urcu-call-rcu-impl.h;h=ae93468d8beb4e9fb9167a4dcc5b462892f3d9a2;hp=9beb58cbf59cfba7d8f82aae5e85fb8882db9e70;hb=53a55535da7db6ac6a2f37107baed9130c049333;hpb=739877210827591639f5448815e5f9186f4b31d2 diff --git a/urcu-call-rcu-impl.h b/urcu-call-rcu-impl.h index 9beb58c..ae93468 100644 --- a/urcu-call-rcu-impl.h +++ b/urcu-call-rcu-impl.h @@ -26,11 +26,11 @@ #include #include #include +#include #include #include #include #include -#include #include #include @@ -39,15 +39,14 @@ #include "urcu-call-rcu.h" #include "urcu-pointer.h" #include "urcu/list.h" -#include "urcu/urcu-futex.h" +#include "urcu/futex.h" /* Data structure that identifies a call_rcu thread. */ struct call_rcu_data { struct cds_wfq_queue cbs; unsigned long flags; - pthread_mutex_t mtx; - int futex; + int32_t futex; unsigned long qlen; /* maintained for debugging. */ pthread_t tid; int cpu_affinity; @@ -89,26 +88,6 @@ static struct call_rcu_data *default_call_rcu_data; static struct call_rcu_data **per_cpu_call_rcu_data; static long maxcpus; -static void call_rcu_wait(struct call_rcu_data *crdp) -{ - /* Read call_rcu list before read futex */ - cmm_smp_mb(); - if (uatomic_read(&crdp->futex) == -1) - futex_async(&crdp->futex, FUTEX_WAIT, -1, - NULL, NULL, 0); -} - -static void call_rcu_wake_up(struct call_rcu_data *crdp) -{ - /* Write to call_rcu list before reading/writing futex */ - cmm_smp_mb(); - if (unlikely(uatomic_read(&crdp->futex) == -1)) { - uatomic_set(&crdp->futex, 0); - futex_async(&crdp->futex, FUTEX_WAKE, 1, - NULL, NULL, 0); - } -} - /* Allocate the array if it has not already been allocated. */ static void alloc_cpu_call_rcu_data(void) @@ -136,7 +115,12 @@ static void alloc_cpu_call_rcu_data(void) #else /* #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF) */ -static const struct call_rcu_data **per_cpu_call_rcu_data = NULL; +/* + * per_cpu_call_rcu_data should be constant, but some functions below, used both + * for cases where cpu number is available and not available, assume it it not + * constant. + */ +static struct call_rcu_data **per_cpu_call_rcu_data = NULL; static const long maxcpus = -1; static void alloc_cpu_call_rcu_data(void) @@ -195,6 +179,26 @@ int set_thread_cpu_affinity(struct call_rcu_data *crdp) } #endif +static void call_rcu_wait(struct call_rcu_data *crdp) +{ + /* Read call_rcu list before read futex */ + cmm_smp_mb(); + if (uatomic_read(&crdp->futex) == -1) + futex_async(&crdp->futex, FUTEX_WAIT, -1, + NULL, NULL, 0); +} + +static void call_rcu_wake_up(struct call_rcu_data *crdp) +{ + /* Write to call_rcu list before reading/writing futex */ + cmm_smp_mb(); + if (unlikely(uatomic_read(&crdp->futex) == -1)) { + uatomic_set(&crdp->futex, 0); + futex_async(&crdp->futex, FUTEX_WAKE, 1, + NULL, NULL, 0); + } +} + /* This is the code run by each call_rcu thread. */ static void *call_rcu_thread(void *arg) @@ -204,19 +208,25 @@ static void *call_rcu_thread(void *arg) struct cds_wfq_node **cbs_tail; struct call_rcu_data *crdp = (struct call_rcu_data *)arg; struct rcu_head *rhp; + int rt = !!(uatomic_read(&crdp->flags) & URCU_CALL_RCU_RT); if (set_thread_cpu_affinity(crdp) != 0) { perror("pthread_setaffinity_np"); exit(-1); } + /* + * If callbacks take a read-side lock, we need to be registered. + */ + rcu_register_thread(); + thread_call_rcu_data = crdp; + if (!rt) { + uatomic_dec(&crdp->futex); + /* Decrement futex before reading call_rcu list */ + cmm_smp_mb(); + } for (;;) { - if (!(crdp->flags & URCU_CALL_RCU_RT)) { - uatomic_dec(&crdp->futex); - /* Decrement futex before reading call_rcu list */ - cmm_smp_mb(); - } if (&crdp->cbs.head != _CMM_LOAD_SHARED(crdp->cbs.tail)) { while ((cbs = _CMM_LOAD_SHARED(crdp->cbs.head)) == NULL) poll(NULL, 0, 1); @@ -240,25 +250,37 @@ static void *call_rcu_thread(void *arg) } while (cbs != NULL); uatomic_sub(&crdp->qlen, cbcount); } - if (crdp->flags & URCU_CALL_RCU_STOP) { - if (!(crdp->flags & URCU_CALL_RCU_RT)) { + if (uatomic_read(&crdp->flags) & URCU_CALL_RCU_STOP) + break; + rcu_thread_offline(); + if (!rt) { + if (&crdp->cbs.head + == _CMM_LOAD_SHARED(crdp->cbs.tail)) { + call_rcu_wait(crdp); + poll(NULL, 0, 10); + uatomic_dec(&crdp->futex); /* - * Read call_rcu list before write futex. + * Decrement futex before reading + * call_rcu list. */ cmm_smp_mb(); - uatomic_set(&crdp->futex, 0); + } else { + poll(NULL, 0, 10); } - break; + } else { + poll(NULL, 0, 10); } - if (!(crdp->flags & URCU_CALL_RCU_RT)) { - if (&crdp->cbs.head == _CMM_LOAD_SHARED(crdp->cbs.tail)) - call_rcu_wait(crdp); - } - poll(NULL, 0, 10); + rcu_thread_online(); } - call_rcu_lock(&crdp->mtx); - crdp->flags |= URCU_CALL_RCU_STOPPED; - call_rcu_unlock(&crdp->mtx); + if (!rt) { + /* + * Read call_rcu list before write futex. + */ + cmm_smp_mb(); + uatomic_set(&crdp->futex, 0); + } + uatomic_or(&crdp->flags, URCU_CALL_RCU_STOPPED); + rcu_unregister_thread(); return NULL; } @@ -282,10 +304,6 @@ static void call_rcu_data_init(struct call_rcu_data **crdpp, memset(crdp, '\0', sizeof(*crdp)); cds_wfq_init(&crdp->cbs); crdp->qlen = 0; - if (pthread_mutex_init(&crdp->mtx, NULL) != 0) { - perror("pthread_mutex_init"); - exit(-1); - } crdp->futex = 0; crdp->flags = flags; cds_list_add(&crdp->list, &call_rcu_data_list); @@ -365,9 +383,10 @@ struct call_rcu_data *create_call_rcu_data(unsigned long flags, int set_cpu_call_rcu_data(int cpu, struct call_rcu_data *crdp) { - int warned = 0; + static int warned = 0; call_rcu_lock(&call_rcu_mutex); + alloc_cpu_call_rcu_data(); if (cpu < 0 || maxcpus <= cpu) { if (!warned) { fprintf(stderr, "[error] liburcu: set CPU # out of range\n"); @@ -377,13 +396,21 @@ int set_cpu_call_rcu_data(int cpu, struct call_rcu_data *crdp) errno = EINVAL; return -EINVAL; } - alloc_cpu_call_rcu_data(); - call_rcu_unlock(&call_rcu_mutex); + if (per_cpu_call_rcu_data == NULL) { + call_rcu_unlock(&call_rcu_mutex); errno = ENOMEM; return -ENOMEM; } + + if (per_cpu_call_rcu_data[cpu] != NULL && crdp != NULL) { + call_rcu_unlock(&call_rcu_mutex); + errno = EEXIST; + return -EEXIST; + } + per_cpu_call_rcu_data[cpu] = crdp; + call_rcu_unlock(&call_rcu_mutex); return 0; } @@ -417,22 +444,17 @@ struct call_rcu_data *get_default_call_rcu_data(void) */ struct call_rcu_data *get_call_rcu_data(void) { - int curcpu; - static int warned = 0; + struct call_rcu_data *crd; if (thread_call_rcu_data != NULL) return thread_call_rcu_data; - if (maxcpus <= 0) - return get_default_call_rcu_data(); - curcpu = sched_getcpu(); - if (!warned && (curcpu < 0 || maxcpus <= curcpu)) { - fprintf(stderr, "[error] liburcu: gcrd CPU # out of range\n"); - warned = 1; + + if (maxcpus > 0) { + crd = get_cpu_call_rcu_data(sched_getcpu()); + if (crd) + return crd; } - if (curcpu >= 0 && maxcpus > curcpu && - per_cpu_call_rcu_data != NULL && - per_cpu_call_rcu_data[curcpu] != NULL) - return per_cpu_call_rcu_data[curcpu]; + return get_default_call_rcu_data(); } @@ -464,7 +486,9 @@ void set_thread_call_rcu_data(struct call_rcu_data *crdp) /* * Create a separate call_rcu thread for each CPU. This does not * replace a pre-existing call_rcu thread -- use the set_cpu_call_rcu_data() - * function if you want that behavior. + * function if you want that behavior. Should be paired with + * free_all_cpu_call_rcu_data() to teardown these call_rcu worker + * threads. */ int create_all_cpu_call_rcu_data(unsigned long flags) @@ -568,12 +592,10 @@ void call_rcu_data_free(struct call_rcu_data *crdp) if (crdp == NULL || crdp == default_call_rcu_data) { return; } - if ((crdp->flags & URCU_CALL_RCU_STOPPED) == 0) { - call_rcu_lock(&crdp->mtx); - crdp->flags |= URCU_CALL_RCU_STOP; - call_rcu_unlock(&crdp->mtx); + if ((uatomic_read(&crdp->flags) & URCU_CALL_RCU_STOPPED) == 0) { + uatomic_or(&crdp->flags, URCU_CALL_RCU_STOP); wake_call_rcu_thread(crdp); - while ((crdp->flags & URCU_CALL_RCU_STOPPED) == 0) + while ((uatomic_read(&crdp->flags) & URCU_CALL_RCU_STOPPED) == 0) poll(NULL, 0, 1); } if (&crdp->cbs.head != _CMM_LOAD_SHARED(crdp->cbs.tail)) { @@ -587,9 +609,11 @@ void call_rcu_data_free(struct call_rcu_data *crdp) *cbs_endprev = cbs; uatomic_add(&default_call_rcu_data->qlen, uatomic_read(&crdp->qlen)); - cds_list_del(&crdp->list); - free(crdp); + wake_call_rcu_thread(default_call_rcu_data); } + + cds_list_del(&crdp->list); + free(crdp); } /* @@ -638,7 +662,7 @@ void call_rcu_after_fork_parent(void) */ void call_rcu_after_fork_child(void) { - struct call_rcu_data *crdp; + struct call_rcu_data *crdp, *next; /* Release the mutex. */ call_rcu_unlock(&call_rcu_mutex); @@ -651,13 +675,10 @@ void call_rcu_after_fork_child(void) (void)get_default_call_rcu_data(); /* Dispose of all of the rest of the call_rcu_data structures. */ - while (call_rcu_data_list.next != call_rcu_data_list.prev) { - crdp = cds_list_entry(call_rcu_data_list.prev, - struct call_rcu_data, list); + cds_list_for_each_entry_safe(crdp, next, &call_rcu_data_list, list) { if (crdp == default_call_rcu_data) - crdp = cds_list_entry(crdp->list.prev, - struct call_rcu_data, list); - crdp->flags = URCU_CALL_RCU_STOPPED; + continue; + uatomic_set(&crdp->flags, URCU_CALL_RCU_STOPPED); call_rcu_data_free(crdp); } }