Fix: urcu-bp interaction with threads vs constructors/destructors
authorMathieu Desnoyers <mathieu.desnoyers@efficios.com>
Sun, 8 Dec 2013 15:31:04 +0000 (10:31 -0500)
committerMathieu Desnoyers <mathieu.desnoyers@efficios.com>
Sun, 8 Dec 2013 15:37:12 +0000 (10:37 -0500)
Add a reference counter for threads using urcu-bp, thus ensuring that
even if the urcu destructor is executed before each thread using RCU
read-side critical sections exit, those threads will not see a corrupted
thread list.

Also, don't use URCU_TLS() within urcu_bp_thread_exit_notifier(). It
appears that this is racy (although this was probably due to the issue
fixed by reference counting). Anyway, play safe, and pass the rcu_key
received as parameter instead.

Those issues only reproduce when threads are still active when the
urcu-bp destructor is called.

Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
urcu-bp.c

index 0963dd05096d8ac5bc7f3c93520f064f75592a80..59cc4bc8c76b7833d3e7573b4bb7f53802a28f8f 100644 (file)
--- a/urcu-bp.c
+++ b/urcu-bp.c
@@ -91,6 +91,9 @@ void *mremap_wrapper(void *old_address, size_t old_size,
  */
 #define RCU_QS_ACTIVE_ATTEMPTS 100
 
+static
+int rcu_bp_refcount;
+
 static
 void __attribute__((constructor)) rcu_bp_init(void);
 static
@@ -429,11 +432,8 @@ struct registry_chunk *find_chunk(struct rcu_reader *rcu_reader_reg)
 
 /* Called with signals off and mutex locked */
 static
-void remove_thread(void)
+void remove_thread(struct rcu_reader *rcu_reader_reg)
 {
-       struct rcu_reader *rcu_reader_reg;
-
-       rcu_reader_reg = URCU_TLS(rcu_reader);
        cleanup_thread(find_chunk(rcu_reader_reg), rcu_reader_reg);
        URCU_TLS(rcu_reader) = NULL;
 }
@@ -474,7 +474,7 @@ end:
 
 /* Disable signals, take mutex, remove from registry */
 static
-void rcu_bp_unregister(void)
+void rcu_bp_unregister(struct rcu_reader *rcu_reader_reg)
 {
        sigset_t newmask, oldmask;
        int ret;
@@ -487,11 +487,12 @@ void rcu_bp_unregister(void)
                abort();
 
        mutex_lock(&rcu_gp_lock);
-       remove_thread();
+       remove_thread(rcu_reader_reg);
        mutex_unlock(&rcu_gp_lock);
        ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
        if (ret)
                abort();
+       rcu_bp_exit();
 }
 
 /*
@@ -501,15 +502,14 @@ void rcu_bp_unregister(void)
 static
 void urcu_bp_thread_exit_notifier(void *rcu_key)
 {
-       assert(rcu_key == URCU_TLS(rcu_reader));
-       rcu_bp_unregister();
+       rcu_bp_unregister(rcu_key);
 }
 
 static
 void rcu_bp_init(void)
 {
        mutex_lock(&init_lock);
-       if (!initialized) {
+       if (!rcu_bp_refcount++) {
                int ret;
 
                ret = pthread_key_create(&urcu_bp_key,
@@ -524,16 +524,21 @@ void rcu_bp_init(void)
 static
 void rcu_bp_exit(void)
 {
-       struct registry_chunk *chunk, *tmp;
-       int ret;
+       mutex_lock(&init_lock);
+       if (!--rcu_bp_refcount) {
+               struct registry_chunk *chunk, *tmp;
+               int ret;
 
-       cds_list_for_each_entry_safe(chunk, tmp,
-                       &registry_arena.chunk_list, node) {
-               munmap(chunk, chunk->data_len + sizeof(struct registry_chunk));
+               cds_list_for_each_entry_safe(chunk, tmp,
+                               &registry_arena.chunk_list, node) {
+                       munmap(chunk, chunk->data_len
+                                       + sizeof(struct registry_chunk));
+               }
+               ret = pthread_key_delete(urcu_bp_key);
+               if (ret)
+                       abort();
        }
-       ret = pthread_key_delete(urcu_bp_key);
-       if (ret)
-               abort();
+       mutex_unlock(&init_lock);
 }
 
 /*
This page took 0.026285 seconds and 4 git commands to generate.