Test fix: 0.6 branch does not have tls-compat.h
[userspace-rcu.git] / urcu-call-rcu-impl.h
index 6cb08be5731299c0d3334face6296faf9a98d7e7..6ea7b9a05d74bdfb0c2760d426306d663cb0ade8 100644 (file)
@@ -64,8 +64,9 @@ CDS_LIST_HEAD(call_rcu_data_list);
 
 static __thread struct call_rcu_data *thread_call_rcu_data;
 
-/* Guard call_rcu thread creation. */
-
+/*
+ * Guard call_rcu thread creation and atfork handlers.
+ */
 static pthread_mutex_t call_rcu_mutex = PTHREAD_MUTEX_INITIALIZER;
 
 /* If a given thread does not have its own call_rcu thread, this is default. */
@@ -82,12 +83,20 @@ static struct call_rcu_data *default_call_rcu_data;
 
 /*
  * Pointer to array of pointers to per-CPU call_rcu_data structures
- * and # CPUs.
+ * and # CPUs. per_cpu_call_rcu_data is a RCU-protected pointer to an
+ * array of RCU-protected pointers to call_rcu_data. call_rcu acts as a
+ * RCU read-side and reads per_cpu_call_rcu_data and the per-cpu pointer
+ * without mutex. The call_rcu_mutex protects updates.
  */
 
 static struct call_rcu_data **per_cpu_call_rcu_data;
 static long maxcpus;
 
+static void maxcpus_reset(void)
+{
+       maxcpus = 0;
+}
+
 /* Allocate the array if it has not already been allocated. */
 
 static void alloc_cpu_call_rcu_data(void)
@@ -104,7 +113,7 @@ static void alloc_cpu_call_rcu_data(void)
        p = malloc(maxcpus * sizeof(*per_cpu_call_rcu_data));
        if (p != NULL) {
                memset(p, '\0', maxcpus * sizeof(*per_cpu_call_rcu_data));
-               per_cpu_call_rcu_data = p;
+               rcu_set_pointer(&per_cpu_call_rcu_data, p);
        } else {
                if (!warned) {
                        fprintf(stderr, "[error] liburcu: unable to allocate per-CPU pointer array\n");
@@ -123,6 +132,10 @@ static void alloc_cpu_call_rcu_data(void)
 static struct call_rcu_data **per_cpu_call_rcu_data = NULL;
 static const long maxcpus = -1;
 
+static void maxcpus_reset(void)
+{
+}
+
 static void alloc_cpu_call_rcu_data(void)
 {
 }
@@ -192,7 +205,7 @@ static void call_rcu_wake_up(struct call_rcu_data *crdp)
 {
        /* Write to call_rcu list before reading/writing futex */
        cmm_smp_mb();
-       if (unlikely(uatomic_read(&crdp->futex) == -1)) {
+       if (caa_unlikely(uatomic_read(&crdp->futex) == -1)) {
                uatomic_set(&crdp->futex, 0);
                futex_async(&crdp->futex, FUTEX_WAKE, 1,
                      NULL, NULL, 0);
@@ -227,6 +240,21 @@ static void *call_rcu_thread(void *arg)
                cmm_smp_mb();
        }
        for (;;) {
+               if (uatomic_read(&crdp->flags) & URCU_CALL_RCU_PAUSE) {
+                       /*
+                        * Pause requested. Become quiescent: remove
+                        * ourself from all global lists, and don't
+                        * process any callback. The callback lists may
+                        * still be non-empty though.
+                        */
+                       rcu_unregister_thread();
+                       cmm_smp_mb__before_uatomic_or();
+                       uatomic_or(&crdp->flags, URCU_CALL_RCU_PAUSED);
+                       while ((uatomic_read(&crdp->flags) & URCU_CALL_RCU_PAUSE) != 0)
+                               poll(NULL, 0, 1);
+                       rcu_register_thread();
+               }
+
                if (&crdp->cbs.head != _CMM_LOAD_SHARED(crdp->cbs.tail)) {
                        while ((cbs = _CMM_LOAD_SHARED(crdp->cbs.head)) == NULL)
                                poll(NULL, 0, 1);
@@ -321,13 +349,18 @@ static void call_rcu_data_init(struct call_rcu_data **crdpp,
  * CPU, returning NULL if there is none.  We cannot automatically
  * created it because the platform we are running on might not define
  * sched_getcpu().
+ *
+ * The call to this function and use of the returned call_rcu_data
+ * should be protected by RCU read-side lock.
  */
 
 struct call_rcu_data *get_cpu_call_rcu_data(int cpu)
 {
        static int warned = 0;
+       struct call_rcu_data **pcpu_crdp;
 
-       if (per_cpu_call_rcu_data == NULL)
+       pcpu_crdp = rcu_dereference(per_cpu_call_rcu_data);
+       if (pcpu_crdp == NULL)
                return NULL;
        if (!warned && maxcpus > 0 && (cpu < 0 || maxcpus <= cpu)) {
                fprintf(stderr, "[error] liburcu: get CPU # out of range\n");
@@ -335,7 +368,7 @@ struct call_rcu_data *get_cpu_call_rcu_data(int cpu)
        }
        if (cpu < 0 || maxcpus <= cpu)
                return NULL;
-       return per_cpu_call_rcu_data[cpu];
+       return rcu_dereference(pcpu_crdp[cpu]);
 }
 
 /*
@@ -379,6 +412,10 @@ struct call_rcu_data *create_call_rcu_data(unsigned long flags,
  * the caller's responsibility to dispose of the removed structure.
  * Use get_cpu_call_rcu_data() to obtain a pointer to the old structure
  * (prior to NULLing it out, of course).
+ *
+ * The caller must wait for a grace-period to pass between return from
+ * set_cpu_call_rcu_data() and call to call_rcu_data_free() passing the
+ * previous call rcu data as argument.
  */
 
 int set_cpu_call_rcu_data(int cpu, struct call_rcu_data *crdp)
@@ -396,12 +433,21 @@ int set_cpu_call_rcu_data(int cpu, struct call_rcu_data *crdp)
                errno = EINVAL;
                return -EINVAL;
        }
-       call_rcu_unlock(&call_rcu_mutex);
+
        if (per_cpu_call_rcu_data == NULL) {
+               call_rcu_unlock(&call_rcu_mutex);
                errno = ENOMEM;
                return -ENOMEM;
        }
-       per_cpu_call_rcu_data[cpu] = crdp;
+
+       if (per_cpu_call_rcu_data[cpu] != NULL && crdp != NULL) {
+               call_rcu_unlock(&call_rcu_mutex);
+               errno = EEXIST;
+               return -EEXIST;
+       }
+
+       rcu_set_pointer(&per_cpu_call_rcu_data[cpu], crdp);
+       call_rcu_unlock(&call_rcu_mutex);
        return 0;
 }
 
@@ -432,6 +478,9 @@ struct call_rcu_data *get_default_call_rcu_data(void)
  * structure assigned to the CPU on which the thread is running,
  * followed by the default call_rcu_data structure.  If there is not
  * yet a default call_rcu_data structure, one will be created.
+ *
+ * Calls to this function and use of the returned call_rcu_data should
+ * be protected by RCU read-side lock.
  */
 struct call_rcu_data *get_call_rcu_data(void)
 {
@@ -513,8 +562,13 @@ int create_all_cpu_call_rcu_data(unsigned long flags)
                }
                call_rcu_unlock(&call_rcu_mutex);
                if ((ret = set_cpu_call_rcu_data(i, crdp)) != 0) {
-                       /* FIXME: Leaks crdp for now. */
-                       return ret; /* Can happen on race. */
+                       call_rcu_data_free(crdp);
+
+                       /* it has been created by other thread */
+                       if (ret == -EEXIST)
+                               continue;
+
+                       return ret;
                }
        }
        return 0;
@@ -541,6 +595,8 @@ static void wake_call_rcu_thread(struct call_rcu_data *crdp)
  * need the first invocation of call_rcu() to be fast, make sure
  * to create a call_rcu thread first.  One way to accomplish this is
  * "get_call_rcu_data();", and another is create_all_cpu_call_rcu_data().
+ *
+ * call_rcu must be called by registered RCU read-side threads.
  */
 
 void call_rcu(struct rcu_head *head,
@@ -550,10 +606,13 @@ void call_rcu(struct rcu_head *head,
 
        cds_wfq_node_init(&head->next);
        head->func = func;
+       /* Holding rcu read-side lock across use of per-cpu crdp */
+       rcu_read_lock();
        crdp = get_call_rcu_data();
        cds_wfq_enqueue(&crdp->cbs, &head->next);
        uatomic_inc(&crdp->qlen);
        wake_call_rcu_thread(crdp);
+       rcu_read_unlock();
 }
 
 /*
@@ -573,6 +632,10 @@ void call_rcu(struct rcu_head *head,
  *
  * We also silently refuse to free NULL pointers.  This simplifies
  * the calling code.
+ *
+ * The caller must wait for a grace-period to pass between return from
+ * set_cpu_call_rcu_data() and call to call_rcu_data_free() passing the
+ * previous call rcu data as argument.
  */
 void call_rcu_data_free(struct call_rcu_data *crdp)
 {
@@ -595,14 +658,22 @@ void call_rcu_data_free(struct call_rcu_data *crdp)
                _CMM_STORE_SHARED(crdp->cbs.head, NULL);
                cbs_tail = (struct cds_wfq_node **)
                        uatomic_xchg(&crdp->cbs.tail, &crdp->cbs.head);
+               /* Create default call rcu data if need be */
+               (void) get_default_call_rcu_data();
                cbs_endprev = (struct cds_wfq_node **)
-                       uatomic_xchg(&default_call_rcu_data, cbs_tail);
-               *cbs_endprev = cbs;
+                       uatomic_xchg(&default_call_rcu_data->cbs.tail,
+                                       cbs_tail);
+               _CMM_STORE_SHARED(*cbs_endprev, cbs);
                uatomic_add(&default_call_rcu_data->qlen,
                            uatomic_read(&crdp->qlen));
-               cds_list_del(&crdp->list);
-               free(crdp);
+               wake_call_rcu_thread(default_call_rcu_data);
        }
+
+       call_rcu_lock(&call_rcu_mutex);
+       cds_list_del(&crdp->list);
+       call_rcu_unlock(&call_rcu_mutex);
+
+       free(crdp);
 }
 
 /*
@@ -611,27 +682,61 @@ void call_rcu_data_free(struct call_rcu_data *crdp)
 void free_all_cpu_call_rcu_data(void)
 {
        int cpu;
-       struct call_rcu_data *crdp;
+       struct call_rcu_data **crdp;
+       static int warned = 0;
 
        if (maxcpus <= 0)
                return;
+
+       crdp = malloc(sizeof(*crdp) * maxcpus);
+       if (!crdp) {
+               if (!warned) {
+                       fprintf(stderr, "[error] liburcu: unable to allocate per-CPU pointer array\n");
+               }
+               warned = 1;
+               return;
+       }
+
        for (cpu = 0; cpu < maxcpus; cpu++) {
-               crdp = get_cpu_call_rcu_data(cpu);
-               if (crdp == NULL)
+               crdp[cpu] = get_cpu_call_rcu_data(cpu);
+               if (crdp[cpu] == NULL)
                        continue;
                set_cpu_call_rcu_data(cpu, NULL);
-               call_rcu_data_free(crdp);
        }
+       /*
+        * Wait for call_rcu sites acting as RCU readers of the
+        * call_rcu_data to become quiescent.
+        */
+       synchronize_rcu();
+       for (cpu = 0; cpu < maxcpus; cpu++) {
+               if (crdp[cpu] == NULL)
+                       continue;
+               call_rcu_data_free(crdp[cpu]);
+       }
+       free(crdp);
 }
 
 /*
  * Acquire the call_rcu_mutex in order to ensure that the child sees
- * all of the call_rcu() data structures in a consistent state.
+ * all of the call_rcu() data structures in a consistent state. Ensure
+ * that all call_rcu threads are in a quiescent state across fork.
  * Suitable for pthread_atfork() and friends.
  */
 void call_rcu_before_fork(void)
 {
+       struct call_rcu_data *crdp;
+
        call_rcu_lock(&call_rcu_mutex);
+
+       cds_list_for_each_entry(crdp, &call_rcu_data_list, list) {
+               uatomic_or(&crdp->flags, URCU_CALL_RCU_PAUSE);
+               cmm_smp_mb__after_uatomic_or();
+               wake_call_rcu_thread(crdp);
+       }
+       cds_list_for_each_entry(crdp, &call_rcu_data_list, list) {
+               while ((uatomic_read(&crdp->flags) & URCU_CALL_RCU_PAUSED) == 0)
+                       poll(NULL, 0, 1);
+       }
 }
 
 /*
@@ -641,6 +746,10 @@ void call_rcu_before_fork(void)
  */
 void call_rcu_after_fork_parent(void)
 {
+       struct call_rcu_data *crdp;
+
+       cds_list_for_each_entry(crdp, &call_rcu_data_list, list)
+               uatomic_and(&crdp->flags, ~URCU_CALL_RCU_PAUSE);
        call_rcu_unlock(&call_rcu_mutex);
 }
 
@@ -656,6 +765,10 @@ void call_rcu_after_fork_child(void)
        /* Release the mutex. */
        call_rcu_unlock(&call_rcu_mutex);
 
+       /* Do nothing when call_rcu() has not been used */
+       if (cds_list_empty(&call_rcu_data_list))
+               return;
+
        /*
         * Allocate a new default call_rcu_data structure in order
         * to get a working call_rcu thread to go with it.
@@ -663,7 +776,17 @@ void call_rcu_after_fork_child(void)
        default_call_rcu_data = NULL;
        (void)get_default_call_rcu_data();
 
-       /* Dispose of all of the rest of the call_rcu_data structures. */
+       /* Cleanup call_rcu_data pointers before use */
+       maxcpus_reset();
+       free(per_cpu_call_rcu_data);
+       rcu_set_pointer(&per_cpu_call_rcu_data, NULL);
+       thread_call_rcu_data = NULL;
+
+       /*
+        * Dispose of all of the rest of the call_rcu_data structures.
+        * Leftover call_rcu callbacks will be merged into the new
+        * default call_rcu thread queue.
+        */
        cds_list_for_each_entry_safe(crdp, next, &call_rcu_data_list, list) {
                if (crdp == default_call_rcu_data)
                        continue;
This page took 0.027379 seconds and 4 git commands to generate.