X-Git-Url: https://git.liburcu.org/?p=urcu.git;a=blobdiff_plain;f=urcu-call-rcu-impl.h;h=d09adb1bc431a55923d3bbeb3904939f3344e460;hp=b9e57cd60a2ba731a2030cd36c9b9b6c211c416e;hb=1f689e13ea7e519b1afc001e9c55a7b1b60b599f;hpb=bc2433a9853a945a825c244e9ccfe341b73b2ceb diff --git a/urcu-call-rcu-impl.h b/urcu-call-rcu-impl.h index b9e57cd..d09adb1 100644 --- a/urcu-call-rcu-impl.h +++ b/urcu-call-rcu-impl.h @@ -82,12 +82,20 @@ static struct call_rcu_data *default_call_rcu_data; /* * Pointer to array of pointers to per-CPU call_rcu_data structures - * and # CPUs. + * and # CPUs. per_cpu_call_rcu_data is a RCU-protected pointer to an + * array of RCU-protected pointers to call_rcu_data. call_rcu acts as a + * RCU read-side and reads per_cpu_call_rcu_data and the per-cpu pointer + * without mutex. The call_rcu_mutex protects updates. */ static struct call_rcu_data **per_cpu_call_rcu_data; static long maxcpus; +static void maxcpus_reset(void) +{ + maxcpus = 0; +} + /* Allocate the array if it has not already been allocated. */ static void alloc_cpu_call_rcu_data(void) @@ -104,7 +112,7 @@ static void alloc_cpu_call_rcu_data(void) p = malloc(maxcpus * sizeof(*per_cpu_call_rcu_data)); if (p != NULL) { memset(p, '\0', maxcpus * sizeof(*per_cpu_call_rcu_data)); - per_cpu_call_rcu_data = p; + rcu_set_pointer(&per_cpu_call_rcu_data, p); } else { if (!warned) { fprintf(stderr, "[error] liburcu: unable to allocate per-CPU pointer array\n"); @@ -123,6 +131,10 @@ static void alloc_cpu_call_rcu_data(void) static struct call_rcu_data **per_cpu_call_rcu_data = NULL; static const long maxcpus = -1; +static void maxcpus_reset(void) +{ +} + static void alloc_cpu_call_rcu_data(void) { } @@ -215,6 +227,11 @@ static void *call_rcu_thread(void *arg) exit(-1); } + /* + * If callbacks take a read-side lock, we need to be registered. + */ + rcu_register_thread(); + thread_call_rcu_data = crdp; if (!rt) { uatomic_dec(&crdp->futex); @@ -247,6 +264,7 @@ static void *call_rcu_thread(void *arg) } if (uatomic_read(&crdp->flags) & URCU_CALL_RCU_STOP) break; + rcu_thread_offline(); if (!rt) { if (&crdp->cbs.head == _CMM_LOAD_SHARED(crdp->cbs.tail)) { @@ -264,6 +282,7 @@ static void *call_rcu_thread(void *arg) } else { poll(NULL, 0, 10); } + rcu_thread_online(); } if (!rt) { /* @@ -273,6 +292,7 @@ static void *call_rcu_thread(void *arg) uatomic_set(&crdp->futex, 0); } uatomic_or(&crdp->flags, URCU_CALL_RCU_STOPPED); + rcu_unregister_thread(); return NULL; } @@ -313,13 +333,18 @@ static void call_rcu_data_init(struct call_rcu_data **crdpp, * CPU, returning NULL if there is none. We cannot automatically * created it because the platform we are running on might not define * sched_getcpu(). + * + * The call to this function and use of the returned call_rcu_data + * should be protected by RCU read-side lock. */ struct call_rcu_data *get_cpu_call_rcu_data(int cpu) { static int warned = 0; + struct call_rcu_data **pcpu_crdp; - if (per_cpu_call_rcu_data == NULL) + pcpu_crdp = rcu_dereference(per_cpu_call_rcu_data); + if (pcpu_crdp == NULL) return NULL; if (!warned && maxcpus > 0 && (cpu < 0 || maxcpus <= cpu)) { fprintf(stderr, "[error] liburcu: get CPU # out of range\n"); @@ -327,7 +352,7 @@ struct call_rcu_data *get_cpu_call_rcu_data(int cpu) } if (cpu < 0 || maxcpus <= cpu) return NULL; - return per_cpu_call_rcu_data[cpu]; + return rcu_dereference(pcpu_crdp[cpu]); } /* @@ -375,9 +400,10 @@ struct call_rcu_data *create_call_rcu_data(unsigned long flags, int set_cpu_call_rcu_data(int cpu, struct call_rcu_data *crdp) { - int warned = 0; + static int warned = 0; call_rcu_lock(&call_rcu_mutex); + alloc_cpu_call_rcu_data(); if (cpu < 0 || maxcpus <= cpu) { if (!warned) { fprintf(stderr, "[error] liburcu: set CPU # out of range\n"); @@ -387,13 +413,21 @@ int set_cpu_call_rcu_data(int cpu, struct call_rcu_data *crdp) errno = EINVAL; return -EINVAL; } - alloc_cpu_call_rcu_data(); - call_rcu_unlock(&call_rcu_mutex); + if (per_cpu_call_rcu_data == NULL) { + call_rcu_unlock(&call_rcu_mutex); errno = ENOMEM; return -ENOMEM; } - per_cpu_call_rcu_data[cpu] = crdp; + + if (per_cpu_call_rcu_data[cpu] != NULL && crdp != NULL) { + call_rcu_unlock(&call_rcu_mutex); + errno = EEXIST; + return -EEXIST; + } + + rcu_set_pointer(&per_cpu_call_rcu_data[cpu], crdp); + call_rcu_unlock(&call_rcu_mutex); return 0; } @@ -424,25 +458,23 @@ struct call_rcu_data *get_default_call_rcu_data(void) * structure assigned to the CPU on which the thread is running, * followed by the default call_rcu_data structure. If there is not * yet a default call_rcu_data structure, one will be created. + * + * Calls to this function and use of the returned call_rcu_data should + * be protected by RCU read-side lock. */ struct call_rcu_data *get_call_rcu_data(void) { - int curcpu; - static int warned = 0; + struct call_rcu_data *crd; if (thread_call_rcu_data != NULL) return thread_call_rcu_data; - if (maxcpus <= 0) - return get_default_call_rcu_data(); - curcpu = sched_getcpu(); - if (!warned && (curcpu < 0 || maxcpus <= curcpu)) { - fprintf(stderr, "[error] liburcu: gcrd CPU # out of range\n"); - warned = 1; + + if (maxcpus > 0) { + crd = get_cpu_call_rcu_data(sched_getcpu()); + if (crd) + return crd; } - if (curcpu >= 0 && maxcpus > curcpu && - per_cpu_call_rcu_data != NULL && - per_cpu_call_rcu_data[curcpu] != NULL) - return per_cpu_call_rcu_data[curcpu]; + return get_default_call_rcu_data(); } @@ -474,7 +506,9 @@ void set_thread_call_rcu_data(struct call_rcu_data *crdp) /* * Create a separate call_rcu thread for each CPU. This does not * replace a pre-existing call_rcu thread -- use the set_cpu_call_rcu_data() - * function if you want that behavior. + * function if you want that behavior. Should be paired with + * free_all_cpu_call_rcu_data() to teardown these call_rcu worker + * threads. */ int create_all_cpu_call_rcu_data(unsigned long flags) @@ -508,8 +542,13 @@ int create_all_cpu_call_rcu_data(unsigned long flags) } call_rcu_unlock(&call_rcu_mutex); if ((ret = set_cpu_call_rcu_data(i, crdp)) != 0) { - /* FIXME: Leaks crdp for now. */ - return ret; /* Can happen on race. */ + call_rcu_data_free(crdp); + + /* it has been created by other thread */ + if (ret == -EEXIST) + continue; + + return ret; } } return 0; @@ -536,6 +575,8 @@ static void wake_call_rcu_thread(struct call_rcu_data *crdp) * need the first invocation of call_rcu() to be fast, make sure * to create a call_rcu thread first. One way to accomplish this is * "get_call_rcu_data();", and another is create_all_cpu_call_rcu_data(). + * + * call_rcu must be called by registered RCU read-side threads. */ void call_rcu(struct rcu_head *head, @@ -545,10 +586,13 @@ void call_rcu(struct rcu_head *head, cds_wfq_node_init(&head->next); head->func = func; + /* Holding rcu read-side lock across use of per-cpu crdp */ + rcu_read_lock(); crdp = get_call_rcu_data(); cds_wfq_enqueue(&crdp->cbs, &head->next); uatomic_inc(&crdp->qlen); wake_call_rcu_thread(crdp); + rcu_read_unlock(); } /* @@ -590,14 +634,21 @@ void call_rcu_data_free(struct call_rcu_data *crdp) _CMM_STORE_SHARED(crdp->cbs.head, NULL); cbs_tail = (struct cds_wfq_node **) uatomic_xchg(&crdp->cbs.tail, &crdp->cbs.head); + /* Create default call rcu data if need be */ + (void) get_default_call_rcu_data(); cbs_endprev = (struct cds_wfq_node **) uatomic_xchg(&default_call_rcu_data, cbs_tail); *cbs_endprev = cbs; uatomic_add(&default_call_rcu_data->qlen, uatomic_read(&crdp->qlen)); - cds_list_del(&crdp->list); - free(crdp); + wake_call_rcu_thread(default_call_rcu_data); } + + call_rcu_lock(&call_rcu_mutex); + cds_list_del(&crdp->list); + call_rcu_unlock(&call_rcu_mutex); + + free(crdp); } /* @@ -606,17 +657,37 @@ void call_rcu_data_free(struct call_rcu_data *crdp) void free_all_cpu_call_rcu_data(void) { int cpu; - struct call_rcu_data *crdp; + struct call_rcu_data **crdp; + static int warned = 0; if (maxcpus <= 0) return; + + crdp = malloc(sizeof(*crdp) * maxcpus); + if (!crdp) { + if (!warned) { + fprintf(stderr, "[error] liburcu: unable to allocate per-CPU pointer array\n"); + } + warned = 1; + } + for (cpu = 0; cpu < maxcpus; cpu++) { - crdp = get_cpu_call_rcu_data(cpu); - if (crdp == NULL) + crdp[cpu] = get_cpu_call_rcu_data(cpu); + if (crdp[cpu] == NULL) continue; set_cpu_call_rcu_data(cpu, NULL); - call_rcu_data_free(crdp); } + /* + * Wait for call_rcu sites acting as RCU readers of the + * call_rcu_data to become quiescent. + */ + synchronize_rcu(); + for (cpu = 0; cpu < maxcpus; cpu++) { + if (crdp[cpu] == NULL) + continue; + call_rcu_data_free(crdp[cpu]); + } + free(crdp); } /* @@ -646,11 +717,15 @@ void call_rcu_after_fork_parent(void) */ void call_rcu_after_fork_child(void) { - struct call_rcu_data *crdp; + struct call_rcu_data *crdp, *next; /* Release the mutex. */ call_rcu_unlock(&call_rcu_mutex); + /* Do nothing when call_rcu() has not been used */ + if (cds_list_empty(&call_rcu_data_list)) + return; + /* * Allocate a new default call_rcu_data structure in order * to get a working call_rcu thread to go with it. @@ -658,13 +733,16 @@ void call_rcu_after_fork_child(void) default_call_rcu_data = NULL; (void)get_default_call_rcu_data(); + /* Cleanup call_rcu_data pointers before use */ + maxcpus_reset(); + free(per_cpu_call_rcu_data); + rcu_set_pointer(&per_cpu_call_rcu_data, NULL); + thread_call_rcu_data = NULL; + /* Dispose of all of the rest of the call_rcu_data structures. */ - while (call_rcu_data_list.next != call_rcu_data_list.prev) { - crdp = cds_list_entry(call_rcu_data_list.prev, - struct call_rcu_data, list); + cds_list_for_each_entry_safe(crdp, next, &call_rcu_data_list, list) { if (crdp == default_call_rcu_data) - crdp = cds_list_entry(crdp->list.prev, - struct call_rcu_data, list); + continue; uatomic_set(&crdp->flags, URCU_CALL_RCU_STOPPED); call_rcu_data_free(crdp); }