rculfhash: break in-progress resize when target size change (between levels)
[urcu.git] / urcu-call-rcu-impl.h
index b9e57cd60a2ba731a2030cd36c9b9b6c211c416e..c14cc18bde825c6c3cfec1557222e764c88f85a9 100644 (file)
@@ -215,6 +215,11 @@ static void *call_rcu_thread(void *arg)
                exit(-1);
        }
 
+       /*
+        * If callbacks take a read-side lock, we need to be registered.
+        */
+       rcu_register_thread();
+
        thread_call_rcu_data = crdp;
        if (!rt) {
                uatomic_dec(&crdp->futex);
@@ -247,6 +252,7 @@ static void *call_rcu_thread(void *arg)
                }
                if (uatomic_read(&crdp->flags) & URCU_CALL_RCU_STOP)
                        break;
+               rcu_thread_offline();
                if (!rt) {
                        if (&crdp->cbs.head
                            == _CMM_LOAD_SHARED(crdp->cbs.tail)) {
@@ -264,6 +270,7 @@ static void *call_rcu_thread(void *arg)
                } else {
                        poll(NULL, 0, 10);
                }
+               rcu_thread_online();
        }
        if (!rt) {
                /*
@@ -273,6 +280,7 @@ static void *call_rcu_thread(void *arg)
                uatomic_set(&crdp->futex, 0);
        }
        uatomic_or(&crdp->flags, URCU_CALL_RCU_STOPPED);
+       rcu_unregister_thread();
        return NULL;
 }
 
@@ -474,7 +482,9 @@ void set_thread_call_rcu_data(struct call_rcu_data *crdp)
 /*
  * Create a separate call_rcu thread for each CPU.  This does not
  * replace a pre-existing call_rcu thread -- use the set_cpu_call_rcu_data()
- * function if you want that behavior.
+ * function if you want that behavior. Should be paired with
+ * free_all_cpu_call_rcu_data() to teardown these call_rcu worker
+ * threads.
  */
 
 int create_all_cpu_call_rcu_data(unsigned long flags)
This page took 0.023528 seconds and 4 git commands to generate.