X-Git-Url: https://git.liburcu.org/?p=urcu.git;a=blobdiff_plain;f=urcu-call-rcu-impl.h;h=8ed2ab384f81c0806d7c40a74430eb527ad825de;hp=0a47d969a4e8d3f50438fc69bbede379e6715c50;hb=450b97095e27646fcd1e4b83c99477d7253b987b;hpb=60af049d5e1d17e7ffdfd139bb486bd969c6a76c diff --git a/urcu-call-rcu-impl.h b/urcu-call-rcu-impl.h index 0a47d96..8ed2ab3 100644 --- a/urcu-call-rcu-impl.h +++ b/urcu-call-rcu-impl.h @@ -40,6 +40,7 @@ #include "urcu-pointer.h" #include "urcu/list.h" #include "urcu/futex.h" +#include "urcu/tls-compat.h" /* Data structure that identifies a call_rcu thread. */ @@ -62,7 +63,7 @@ CDS_LIST_HEAD(call_rcu_data_list); /* Link a thread using call_rcu() to its call_rcu thread. */ -static __thread struct call_rcu_data *thread_call_rcu_data; +static DEFINE_URCU_TLS(struct call_rcu_data *, thread_call_rcu_data); /* Guard call_rcu thread creation. */ @@ -82,7 +83,10 @@ static struct call_rcu_data *default_call_rcu_data; /* * Pointer to array of pointers to per-CPU call_rcu_data structures - * and # CPUs. + * and # CPUs. per_cpu_call_rcu_data is a RCU-protected pointer to an + * array of RCU-protected pointers to call_rcu_data. call_rcu acts as a + * RCU read-side and reads per_cpu_call_rcu_data and the per-cpu pointer + * without mutex. The call_rcu_mutex protects updates. */ static struct call_rcu_data **per_cpu_call_rcu_data; @@ -109,7 +113,7 @@ static void alloc_cpu_call_rcu_data(void) p = malloc(maxcpus * sizeof(*per_cpu_call_rcu_data)); if (p != NULL) { memset(p, '\0', maxcpus * sizeof(*per_cpu_call_rcu_data)); - per_cpu_call_rcu_data = p; + rcu_set_pointer(&per_cpu_call_rcu_data, p); } else { if (!warned) { fprintf(stderr, "[error] liburcu: unable to allocate per-CPU pointer array\n"); @@ -201,7 +205,7 @@ static void call_rcu_wake_up(struct call_rcu_data *crdp) { /* Write to call_rcu list before reading/writing futex */ cmm_smp_mb(); - if (unlikely(uatomic_read(&crdp->futex) == -1)) { + if (caa_unlikely(uatomic_read(&crdp->futex) == -1)) { uatomic_set(&crdp->futex, 0); futex_async(&crdp->futex, FUTEX_WAKE, 1, NULL, NULL, 0); @@ -229,7 +233,7 @@ static void *call_rcu_thread(void *arg) */ rcu_register_thread(); - thread_call_rcu_data = crdp; + URCU_TLS(thread_call_rcu_data) = crdp; if (!rt) { uatomic_dec(&crdp->futex); /* Decrement futex before reading call_rcu list */ @@ -330,13 +334,18 @@ static void call_rcu_data_init(struct call_rcu_data **crdpp, * CPU, returning NULL if there is none. We cannot automatically * created it because the platform we are running on might not define * sched_getcpu(). + * + * The call to this function and use of the returned call_rcu_data + * should be protected by RCU read-side lock. */ struct call_rcu_data *get_cpu_call_rcu_data(int cpu) { static int warned = 0; + struct call_rcu_data **pcpu_crdp; - if (per_cpu_call_rcu_data == NULL) + pcpu_crdp = rcu_dereference(per_cpu_call_rcu_data); + if (pcpu_crdp == NULL) return NULL; if (!warned && maxcpus > 0 && (cpu < 0 || maxcpus <= cpu)) { fprintf(stderr, "[error] liburcu: get CPU # out of range\n"); @@ -344,7 +353,7 @@ struct call_rcu_data *get_cpu_call_rcu_data(int cpu) } if (cpu < 0 || maxcpus <= cpu) return NULL; - return per_cpu_call_rcu_data[cpu]; + return rcu_dereference(pcpu_crdp[cpu]); } /* @@ -388,6 +397,10 @@ struct call_rcu_data *create_call_rcu_data(unsigned long flags, * the caller's responsibility to dispose of the removed structure. * Use get_cpu_call_rcu_data() to obtain a pointer to the old structure * (prior to NULLing it out, of course). + * + * The caller must wait for a grace-period to pass between return from + * set_cpu_call_rcu_data() and call to call_rcu_data_free() passing the + * previous call rcu data as argument. */ int set_cpu_call_rcu_data(int cpu, struct call_rcu_data *crdp) @@ -418,7 +431,7 @@ int set_cpu_call_rcu_data(int cpu, struct call_rcu_data *crdp) return -EEXIST; } - per_cpu_call_rcu_data[cpu] = crdp; + rcu_set_pointer(&per_cpu_call_rcu_data[cpu], crdp); call_rcu_unlock(&call_rcu_mutex); return 0; } @@ -450,13 +463,16 @@ struct call_rcu_data *get_default_call_rcu_data(void) * structure assigned to the CPU on which the thread is running, * followed by the default call_rcu_data structure. If there is not * yet a default call_rcu_data structure, one will be created. + * + * Calls to this function and use of the returned call_rcu_data should + * be protected by RCU read-side lock. */ struct call_rcu_data *get_call_rcu_data(void) { struct call_rcu_data *crd; - if (thread_call_rcu_data != NULL) - return thread_call_rcu_data; + if (URCU_TLS(thread_call_rcu_data) != NULL) + return URCU_TLS(thread_call_rcu_data); if (maxcpus > 0) { crd = get_cpu_call_rcu_data(sched_getcpu()); @@ -473,7 +489,7 @@ struct call_rcu_data *get_call_rcu_data(void) struct call_rcu_data *get_thread_call_rcu_data(void) { - return thread_call_rcu_data; + return URCU_TLS(thread_call_rcu_data); } /* @@ -489,7 +505,7 @@ struct call_rcu_data *get_thread_call_rcu_data(void) void set_thread_call_rcu_data(struct call_rcu_data *crdp) { - thread_call_rcu_data = crdp; + URCU_TLS(thread_call_rcu_data) = crdp; } /* @@ -564,6 +580,8 @@ static void wake_call_rcu_thread(struct call_rcu_data *crdp) * need the first invocation of call_rcu() to be fast, make sure * to create a call_rcu thread first. One way to accomplish this is * "get_call_rcu_data();", and another is create_all_cpu_call_rcu_data(). + * + * call_rcu must be called by registered RCU read-side threads. */ void call_rcu(struct rcu_head *head, @@ -573,10 +591,13 @@ void call_rcu(struct rcu_head *head, cds_wfq_node_init(&head->next); head->func = func; + /* Holding rcu read-side lock across use of per-cpu crdp */ + rcu_read_lock(); crdp = get_call_rcu_data(); cds_wfq_enqueue(&crdp->cbs, &head->next); uatomic_inc(&crdp->qlen); wake_call_rcu_thread(crdp); + rcu_read_unlock(); } /* @@ -596,6 +617,10 @@ void call_rcu(struct rcu_head *head, * * We also silently refuse to free NULL pointers. This simplifies * the calling code. + * + * The caller must wait for a grace-period to pass between return from + * set_cpu_call_rcu_data() and call to call_rcu_data_free() passing the + * previous call rcu data as argument. */ void call_rcu_data_free(struct call_rcu_data *crdp) { @@ -641,17 +666,38 @@ void call_rcu_data_free(struct call_rcu_data *crdp) void free_all_cpu_call_rcu_data(void) { int cpu; - struct call_rcu_data *crdp; + struct call_rcu_data **crdp; + static int warned = 0; if (maxcpus <= 0) return; + + crdp = malloc(sizeof(*crdp) * maxcpus); + if (!crdp) { + if (!warned) { + fprintf(stderr, "[error] liburcu: unable to allocate per-CPU pointer array\n"); + } + warned = 1; + return; + } + for (cpu = 0; cpu < maxcpus; cpu++) { - crdp = get_cpu_call_rcu_data(cpu); - if (crdp == NULL) + crdp[cpu] = get_cpu_call_rcu_data(cpu); + if (crdp[cpu] == NULL) continue; set_cpu_call_rcu_data(cpu, NULL); - call_rcu_data_free(crdp); } + /* + * Wait for call_rcu sites acting as RCU readers of the + * call_rcu_data to become quiescent. + */ + synchronize_rcu(); + for (cpu = 0; cpu < maxcpus; cpu++) { + if (crdp[cpu] == NULL) + continue; + call_rcu_data_free(crdp[cpu]); + } + free(crdp); } /* @@ -700,8 +746,8 @@ void call_rcu_after_fork_child(void) /* Cleanup call_rcu_data pointers before use */ maxcpus_reset(); free(per_cpu_call_rcu_data); - per_cpu_call_rcu_data = NULL; - thread_call_rcu_data = NULL; + rcu_set_pointer(&per_cpu_call_rcu_data, NULL); + URCU_TLS(thread_call_rcu_data) = NULL; /* Dispose of all of the rest of the call_rcu_data structures. */ cds_list_for_each_entry_safe(crdp, next, &call_rcu_data_list, list) {