rculfhash: wait for call_rcu quiescence before destroy
[urcu.git] / rculfhash.c
index 1ccd3f15a5b8be89b8ca4fce5b34f1e588e9473e..a3223f37f046021cb6c3c0119e107e2e4dde4fd0 100644 (file)
@@ -67,6 +67,7 @@ struct rcu_ht {
        ht_compare_fct compare_fct;
        unsigned long hash_seed;
        pthread_mutex_t resize_mutex;   /* resize mutex: add/del mutex */
+       unsigned int in_progress_resize;
        void (*ht_call_rcu)(struct rcu_head *head,
                      void (*func)(struct rcu_head *head));
 };
@@ -234,14 +235,16 @@ void _ht_gc_bucket(struct rcu_ht_node *dummy, struct rcu_ht_node *node)
 }
 
 static
-int _ht_add(struct rcu_ht *ht, struct rcu_table *t, struct rcu_ht_node *node,
-            int unique)
+struct rcu_ht_node *_ht_add(struct rcu_ht *ht, struct rcu_table *t,
+                           struct rcu_ht_node *node, int unique)
 {
        struct rcu_ht_node *iter_prev, *dummy, *iter, *next;
        unsigned long hash;
 
-       if (!t->size)
-               return 0;
+       if (!t->size) {
+               assert(node->dummy);
+               return node;    /* Initial first add (head) */
+       }
        hash = bit_reverse_ulong(node->reverse_hash);
        for (;;) {
                uint32_t chain_len = 0;
@@ -267,7 +270,7 @@ int _ht_add(struct rcu_ht *ht, struct rcu_table *t, struct rcu_ht_node *node,
                            && !ht->compare_fct(node->key, node->key_len,
                                                clear_flag(iter)->key,
                                                clear_flag(iter)->key_len))
-                               return -EEXIST;
+                               return clear_flag(iter);
                        /* Only account for identical reverse hash once */
                        if (iter_prev->reverse_hash != clear_flag(iter)->reverse_hash)
                                check_resize(ht, t, ++chain_len);
@@ -293,7 +296,7 @@ gc_end:
        /* Garbage collect logically removed nodes in the bucket */
        dummy = rcu_dereference(t->tbl[hash & (t->size - 1)]);
        _ht_gc_bucket(dummy, node);
-       return 0;
+       return node;
 }
 
 static
@@ -371,6 +374,7 @@ struct rcu_ht *ht_new(ht_hash_fct hash_fct,
        ht->compare_fct = compare_fct;
        ht->hash_seed = hash_seed;
        ht->ht_call_rcu = ht_call_rcu;
+       ht->in_progress_resize = 0;
        /* this mutex should not nest in read-side C.S. */
        pthread_mutex_init(&ht->resize_mutex, NULL);
        ht->t = calloc(1, sizeof(struct rcu_table)
@@ -423,7 +427,7 @@ void ht_add(struct rcu_ht *ht, struct rcu_ht_node *node)
        (void) _ht_add(ht, t, node, 0);
 }
 
-int ht_add_unique(struct rcu_ht *ht, struct rcu_ht_node *node)
+struct rcu_ht_node *ht_add_unique(struct rcu_ht *ht, struct rcu_ht_node *node)
 {
        struct rcu_table *t;
        unsigned long hash;
@@ -475,6 +479,9 @@ int ht_destroy(struct rcu_ht *ht)
 {
        int ret;
 
+       /* Wait for in-flight resize operations to complete */
+       while (uatomic_read(&ht->in_progress_resize))
+               poll(NULL, 0, 100);     /* wait for 100ms */
        ret = ht_delete_dummy(ht);
        if (ret)
                return ret;
@@ -574,6 +581,8 @@ void do_resize_cb(struct rcu_head *head)
        _do_ht_resize(ht);
        pthread_mutex_unlock(&ht->resize_mutex);
        free(work);
+       cmm_smp_mb();   /* finish resize before decrement */
+       uatomic_dec(&ht->in_progress_resize);
 }
 
 static
@@ -584,6 +593,8 @@ void ht_resize_lazy(struct rcu_ht *ht, struct rcu_table *t, int growth)
 
        target_size = resize_target_update(t, growth);
        if (!CMM_LOAD_SHARED(t->resize_initiated) && t->size < target_size) {
+               uatomic_inc(&ht->in_progress_resize);
+               cmm_smp_mb();   /* increment resize count before calling it */
                work = malloc(sizeof(*work));
                work->ht = ht;
                ht->ht_call_rcu(&work->head, do_resize_cb);
This page took 0.024202 seconds and 4 git commands to generate.