rculfhash: reinstate i - 1 for shrink
[urcu.git] / rculfhash.c
index 44a589b03361a33c7625d1b336f05da77d6b1bca..0b3996416c270072b069fe700764515165bc50b5 100644 (file)
@@ -211,6 +211,7 @@ struct cds_lfht {
        cds_lfht_hash_fct hash_fct;
        cds_lfht_compare_fct compare_fct;
        unsigned long hash_seed;
+       int flags;
        pthread_mutex_t resize_mutex;   /* resize mutex: add/del mutex */
        unsigned int in_progress_resize, in_progress_destroy;
        void (*cds_lfht_call_rcu)(struct rcu_head *head,
@@ -577,6 +578,8 @@ void check_resize(struct cds_lfht *ht, struct rcu_table *t,
 {
        unsigned long count;
 
+       if (!(ht->flags & CDS_LFHT_AUTO_RESIZE))
+               return;
        count = uatomic_read(&ht->count);
        /*
         * Use bucket-local length for small table expand and for
@@ -660,11 +663,22 @@ void _cds_lfht_gc_bucket(struct cds_lfht_node *dummy, struct cds_lfht_node *node
 {
        struct cds_lfht_node *iter_prev, *iter, *next, *new_next;
 
+       assert(!is_dummy(dummy));
+       assert(!is_removed(dummy));
+       assert(!is_dummy(node));
+       assert(!is_removed(node));
        for (;;) {
                iter_prev = dummy;
                /* We can always skip the dummy node initially */
                iter = rcu_dereference(iter_prev->p.next);
                assert(iter_prev->p.reverse_hash <= node->p.reverse_hash);
+               /*
+                * We should never be called with dummy (start of chain)
+                * and logically removed node (end of path compression
+                * marker) being the actual same node. This would be a
+                * bug in the algorithm implementation.
+                */
+               assert(dummy != node);
                for (;;) {
                        if (unlikely(!clear_flag(iter)))
                                return;
@@ -694,6 +708,8 @@ struct cds_lfht_node *_cds_lfht_add(struct cds_lfht *ht, struct rcu_table *t,
        struct _cds_lfht_node *lookup;
        unsigned long hash, index, order;
 
+       assert(!is_dummy(node));
+       assert(!is_removed(node));
        if (!t->size) {
                assert(dummy);
                node->p.next = flag_dummy(NULL);
@@ -715,8 +731,10 @@ struct cds_lfht_node *_cds_lfht_add(struct cds_lfht *ht, struct rcu_table *t,
                iter = rcu_dereference(iter_prev->p.next);
                assert(iter_prev->p.reverse_hash <= node->p.reverse_hash);
                for (;;) {
+                       /* TODO: check if removed */
                        if (unlikely(!clear_flag(iter)))
                                goto insert;
+                       /* TODO: check if removed */
                        if (likely(clear_flag(iter)->p.reverse_hash > node->p.reverse_hash))
                                goto insert;
                        next = rcu_dereference(clear_flag(iter)->p.next);
@@ -738,6 +756,7 @@ struct cds_lfht_node *_cds_lfht_add(struct cds_lfht *ht, struct rcu_table *t,
        insert:
                assert(node != clear_flag(iter));
                assert(!is_removed(iter_prev));
+               assert(!is_removed(iter));
                assert(iter_prev != node);
                if (!dummy)
                        node->p.next = clear_flag(iter);
@@ -781,6 +800,8 @@ int _cds_lfht_remove(struct cds_lfht *ht, struct rcu_table *t,
        unsigned long hash, index, order;
 
        /* logically delete the node */
+       assert(!is_dummy(node));
+       assert(!is_removed(node));
        old = rcu_dereference(node->p.next);
        do {
                next = old;
@@ -797,9 +818,6 @@ int _cds_lfht_remove(struct cds_lfht *ht, struct rcu_table *t,
        /* We performed the (logical) deletion. */
        flagged = 1;
 
-       if (dummy_removal)
-               node = clear_flag(node);
-
        /*
         * Ensure that the node is not visible to readers anymore: lookup for
         * the node, and remove it (along with any other logically removed node)
@@ -879,18 +897,22 @@ void fini_table(struct cds_lfht *ht, struct rcu_table *t,
 
                len = !i ? 1 : 1UL << (i - 1);
                dbg_printf("fini order %lu len: %lu\n", i, len);
-               /* Update table size */
+               /*
+                * Update table size. Need to shrink this table prior to
+                * removal so gc lookups use non-logically-removed dummy
+                * nodes.
+                */
                t->size = 1UL << (i - 1);
                /* Unlink */
                for (j = 0; j < len; j++) {
-                       struct cds_lfht_node *new_node =
+                       struct cds_lfht_node *fini_node =
                                (struct cds_lfht_node *) &t->tbl[i]->nodes[j];
 
                        dbg_printf("fini entry: i %lu j %lu hash %lu\n",
                                   i, j, !i ? 0 : (1UL << (i - 1)) + j);
-                       new_node->p.reverse_hash =
+                       fini_node->p.reverse_hash =
                                bit_reverse_ulong(!i ? 0 : (1UL << (i - 1)) + j);
-                       (void) _cds_lfht_remove(ht, t, new_node, 1);
+                       (void) _cds_lfht_remove(ht, t, fini_node, 1);
                        if (CMM_LOAD_SHARED(ht->in_progress_destroy))
                                break;
                }
@@ -907,6 +929,7 @@ struct cds_lfht *cds_lfht_new(cds_lfht_hash_fct hash_fct,
                        cds_lfht_compare_fct compare_fct,
                        unsigned long hash_seed,
                        unsigned long init_size,
+                       int flags,
                        void (*cds_lfht_call_rcu)(struct rcu_head *head,
                                        void (*func)(struct rcu_head *head)),
                        void (*cds_lfht_synchronize_rcu)(void))
@@ -931,6 +954,7 @@ struct cds_lfht *cds_lfht_new(cds_lfht_hash_fct hash_fct,
        ht->t = calloc(1, sizeof(struct cds_lfht)
                       + (order * sizeof(struct rcu_level *)));
        ht->t->size = 0;
+       ht->flags = flags;
        pthread_mutex_lock(&ht->resize_mutex);
        init_table(ht, ht->t, 0, order);
        pthread_mutex_unlock(&ht->resize_mutex);
@@ -1175,6 +1199,7 @@ void _do_cds_lfht_shrink(struct cds_lfht *ht, struct rcu_table *old_t,
        memcpy(&new_t->tbl, &old_t->tbl,
               new_order * sizeof(struct rcu_level *));
        new_t->size = !new_order ? 1 : (1UL << (new_order - 1));
+       assert(new_t->size == new_size);
        new_t->resize_target = new_t->size;
        new_t->resize_initiated = 0;
 
@@ -1182,9 +1207,10 @@ void _do_cds_lfht_shrink(struct cds_lfht *ht, struct rcu_table *old_t,
        rcu_assign_pointer(ht->t, new_t);
 
        /*
-        * We need to wait for all reader threads to reach Q.S. (and
+        * We need to wait for all add operations to reach Q.S. (and
         * thus use the new table for lookups) before we can start
-        * releasing the old dummy nodes.
+        * releasing the old dummy nodes. Otherwise their lookup will
+        * return a logically removed node as insert position.
         */
        ht->cds_lfht_synchronize_rcu();
 
@@ -1221,19 +1247,18 @@ unsigned long resize_target_update(struct rcu_table *t,
 }
 
 static
-unsigned long resize_target_update_count(struct rcu_table *t,
-                                  unsigned long count)
+void resize_target_update_count(struct rcu_table *t,
+                               unsigned long count)
 {
        count = max(count, MIN_TABLE_SIZE);
-       return uatomic_set(&t->resize_target, count);
+       uatomic_set(&t->resize_target, count);
 }
 
 void cds_lfht_resize(struct cds_lfht *ht, unsigned long new_size)
 {
        struct rcu_table *t = rcu_dereference(ht->t);
-       unsigned long target_size;
 
-       target_size = resize_target_update_count(t, new_size);
+       resize_target_update_count(t, new_size);
        CMM_STORE_SHARED(t->resize_initiated, 1);
        pthread_mutex_lock(&ht->resize_mutex);
        _do_cds_lfht_resize(ht);
@@ -1279,9 +1304,10 @@ void cds_lfht_resize_lazy_count(struct cds_lfht *ht, struct rcu_table *t,
                                unsigned long count)
 {
        struct rcu_resize_work *work;
-       unsigned long target_size;
 
-       target_size = resize_target_update_count(t, count);
+       if (!(ht->flags & CDS_LFHT_AUTO_RESIZE))
+               return;
+       resize_target_update_count(t, count);
        if (!CMM_LOAD_SHARED(t->resize_initiated)) {
                uatomic_inc(&ht->in_progress_resize);
                cmm_smp_mb();   /* increment resize count before calling it */
This page took 0.026056 seconds and 4 git commands to generate.