rculfhash: break in-progress resize when target size change (between levels)
[urcu.git] / rculfhash.c
index c3a032e20d26a35c09d1f1b3c466432206ecbd32..1288c78d585f983764da0c96eefb6df4f5712cc3 100644 (file)
 #define dbg_printf(fmt, args...)
 #endif
 
-/* For testing */
-#define POISON_FREE
-
 /*
  * Per-CPU split-counters lazily update the global counter each 1024
  * addition/removal. It automatically keeps track of resize required.
 #define CHAIN_LEN_RESIZE_THRESHOLD     3
 
 /*
- * Define the minimum table size. Protects against hash table resize overload
- * when too many entries are added quickly before the resize can complete.
- * This is especially the case if the table could be shrinked to a size of 1.
- * TODO: we might want to make the add/remove operations help the resize to
- * add or remove dummy nodes when a resize is ongoing to ensure upper-bound on
- * chain length.
+ * Define the minimum table size.
  */
 #define MIN_TABLE_SIZE                 128
 
+#if (CAA_BITS_PER_LONG == 32)
+#define MAX_TABLE_ORDER                        32
+#else
+#define MAX_TABLE_ORDER                        64
+#endif
+
+#ifndef min
+#define min(a, b)      ((a) < (b) ? (a) : (b))
+#endif
+
 #ifndef max
 #define max(a, b)      ((a) > (b) ? (a) : (b))
 #endif
@@ -202,19 +204,25 @@ struct rcu_level {
 };
 
 struct rcu_table {
-       unsigned long size;     /* always a power of 2 */
+       unsigned long size;     /* always a power of 2, shared (RCU) */
        unsigned long resize_target;
        int resize_initiated;
-       struct rcu_head head;
-       struct rcu_level *tbl[0];
+       struct rcu_level *tbl[MAX_TABLE_ORDER];
 };
 
 struct cds_lfht {
-       struct rcu_table *t;            /* shared */
+       struct rcu_table t;
        cds_lfht_hash_fct hash_fct;
        cds_lfht_compare_fct compare_fct;
        unsigned long hash_seed;
        int flags;
+       /*
+        * We need to put the work threads offline (QSBR) when taking this
+        * mutex, because we use synchronize_rcu within this mutex critical
+        * section, which waits on read-side critical sections, and could
+        * therefore cause grace-period deadlock if we hold off RCU G.P.
+        * completion.
+        */
        pthread_mutex_t resize_mutex;   /* resize mutex: add/del mutex */
        unsigned int in_progress_resize, in_progress_destroy;
        void (*cds_lfht_call_rcu)(struct rcu_head *head,
@@ -222,6 +230,8 @@ struct cds_lfht {
        void (*cds_lfht_synchronize_rcu)(void);
        void (*cds_lfht_rcu_read_lock)(void);
        void (*cds_lfht_rcu_read_unlock)(void);
+       void (*cds_lfht_rcu_thread_offline)(void);
+       void (*cds_lfht_rcu_thread_online)(void);
        unsigned long count;            /* global approximate item count */
        struct ht_items_count *percpu_count;    /* per-cpu item count */
 };
@@ -231,6 +241,12 @@ struct rcu_resize_work {
        struct cds_lfht *ht;
 };
 
+static
+struct cds_lfht_node *_cds_lfht_add(struct cds_lfht *ht,
+                               unsigned long size,
+                               struct cds_lfht_node *node,
+                               int unique, int dummy);
+
 /*
  * Algorithm to reverse bits in a word by lookup table, extended to
  * 64-bit words.
@@ -434,7 +450,7 @@ int get_count_order_ulong(unsigned long x)
 #endif
 
 static
-void cds_lfht_resize_lazy(struct cds_lfht *ht, struct rcu_table *t, int growth);
+void cds_lfht_resize_lazy(struct cds_lfht *ht, unsigned long size, int growth);
 
 /*
  * If the sched_getcpu() and sysconf(_SC_NPROCESSORS_CONF) calls are
@@ -445,7 +461,7 @@ void cds_lfht_resize_lazy(struct cds_lfht *ht, struct rcu_table *t, int growth);
 #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF)
 
 static
-void cds_lfht_resize_lazy_count(struct cds_lfht *ht, struct rcu_table *t,
+void cds_lfht_resize_lazy_count(struct cds_lfht *ht, unsigned long size,
                                unsigned long count);
 
 static long nr_cpus_mask = -1;
@@ -500,7 +516,7 @@ int ht_get_cpu(void)
 }
 
 static
-void ht_count_add(struct cds_lfht *ht, struct rcu_table *t)
+void ht_count_add(struct cds_lfht *ht, unsigned long size)
 {
        unsigned long percpu_count;
        int cpu;
@@ -519,18 +535,17 @@ void ht_count_add(struct cds_lfht *ht, struct rcu_table *t)
                                           1UL << COUNT_COMMIT_ORDER);
                /* If power of 2 */
                if (!(count & (count - 1))) {
-                       if ((count >> CHAIN_LEN_RESIZE_THRESHOLD)
-                                       < t->size)
+                       if ((count >> CHAIN_LEN_RESIZE_THRESHOLD) < size)
                                return;
                        dbg_printf("add set global %lu\n", count);
-                       cds_lfht_resize_lazy_count(ht, t,
+                       cds_lfht_resize_lazy_count(ht, size,
                                count >> (CHAIN_LEN_TARGET - 1));
                }
        }
 }
 
 static
-void ht_count_remove(struct cds_lfht *ht, struct rcu_table *t)
+void ht_count_remove(struct cds_lfht *ht, unsigned long size)
 {
        unsigned long percpu_count;
        int cpu;
@@ -549,11 +564,10 @@ void ht_count_remove(struct cds_lfht *ht, struct rcu_table *t)
                                           -(1UL << COUNT_COMMIT_ORDER));
                /* If power of 2 */
                if (!(count & (count - 1))) {
-                       if ((count >> CHAIN_LEN_RESIZE_THRESHOLD)
-                                       >= t->size)
+                       if ((count >> CHAIN_LEN_RESIZE_THRESHOLD) >= size)
                                return;
                        dbg_printf("remove set global %lu\n", count);
-                       cds_lfht_resize_lazy_count(ht, t,
+                       cds_lfht_resize_lazy_count(ht, size,
                                count >> (CHAIN_LEN_TARGET - 1));
                }
        }
@@ -575,12 +589,12 @@ void free_per_cpu_items_count(struct ht_items_count *count)
 }
 
 static
-void ht_count_add(struct cds_lfht *ht, struct rcu_table *t)
+void ht_count_add(struct cds_lfht *ht, unsigned long size)
 {
 }
 
 static
-void ht_count_remove(struct cds_lfht *ht, struct rcu_table *t)
+void ht_count_remove(struct cds_lfht *ht, unsigned long size)
 {
 }
 
@@ -588,8 +602,7 @@ void ht_count_remove(struct cds_lfht *ht, struct rcu_table *t)
 
 
 static
-void check_resize(struct cds_lfht *ht, struct rcu_table *t,
-                 uint32_t chain_len)
+void check_resize(struct cds_lfht *ht, unsigned long size, uint32_t chain_len)
 {
        unsigned long count;
 
@@ -606,7 +619,7 @@ void check_resize(struct cds_lfht *ht, struct rcu_table *t,
                dbg_printf("WARNING: large chain length: %u.\n",
                           chain_len);
        if (chain_len >= CHAIN_LEN_RESIZE_THRESHOLD)
-               cds_lfht_resize_lazy(ht, t,
+               cds_lfht_resize_lazy(ht, size,
                        get_count_order_u32(chain_len - (CHAIN_LEN_TARGET - 1)));
 }
 
@@ -654,14 +667,6 @@ unsigned long _uatomic_max(unsigned long *ptr, unsigned long v)
        return v;
 }
 
-static
-void cds_lfht_free_table_cb(struct rcu_head *head)
-{
-       struct rcu_table *t =
-               caa_container_of(head, struct rcu_table, head);
-       poison_free(t);
-}
-
 static
 void cds_lfht_free_level(struct rcu_head *head)
 {
@@ -674,23 +679,27 @@ void cds_lfht_free_level(struct rcu_head *head)
  * Remove all logically deleted nodes from a bucket up to a certain node key.
  */
 static
-void _cds_lfht_gc_bucket(struct cds_lfht_node *dummy, struct cds_lfht_node *node)
+int _cds_lfht_gc_bucket(struct cds_lfht_node *dummy, struct cds_lfht_node *node)
 {
        struct cds_lfht_node *iter_prev, *iter, *next, *new_next;
-       struct cds_lfht_node *iter_trace[64];
-       unsigned long trace_idx = 0;
 
-       memset(iter_trace, 0, sizeof(iter_trace));
        assert(!is_dummy(dummy));
        assert(!is_removed(dummy));
        assert(!is_dummy(node));
        assert(!is_removed(node));
        for (;;) {
-               iter_trace[trace_idx++ & (64 - 1)] = (void *) 0x1;
                iter_prev = dummy;
                /* We can always skip the dummy node initially */
                iter = rcu_dereference(iter_prev->p.next);
-               iter_trace[trace_idx++ & (64 - 1)] = iter;
+               if (unlikely(iter == NULL)) {
+                       /*
+                        * We are executing concurrently with a hash table
+                        * expand, so we see a dummy node with NULL next value.
+                        * Help expand by linking this node into the list and
+                        * retry.
+                        */
+                       return 1;
+               }
                assert(iter_prev->p.reverse_hash <= node->p.reverse_hash);
                /*
                 * We should never be called with dummy (start of chain)
@@ -701,15 +710,14 @@ void _cds_lfht_gc_bucket(struct cds_lfht_node *dummy, struct cds_lfht_node *node
                assert(dummy != node);
                for (;;) {
                        if (unlikely(!clear_flag(iter)))
-                               return;
+                               return 0;
                        if (likely(clear_flag(iter)->p.reverse_hash > node->p.reverse_hash))
-                               return;
+                               return 0;
                        next = rcu_dereference(clear_flag(iter)->p.next);
                        if (likely(is_removed(next)))
                                break;
                        iter_prev = clear_flag(iter);
                        iter = next;
-                       iter_trace[trace_idx++ & (64 - 1)] = iter;
                }
                assert(!is_removed(iter));
                if (is_dummy(iter))
@@ -717,13 +725,15 @@ void _cds_lfht_gc_bucket(struct cds_lfht_node *dummy, struct cds_lfht_node *node
                else
                        new_next = clear_flag(next);
                (void) uatomic_cmpxchg(&iter_prev->p.next, iter, new_next);
-               iter_trace[trace_idx++ & (64 - 1)] = (void *) 0x2;
        }
+       return 0;
 }
 
 static
-struct cds_lfht_node *_cds_lfht_add(struct cds_lfht *ht, struct rcu_table *t,
-                               struct cds_lfht_node *node, int unique, int dummy)
+struct cds_lfht_node *_cds_lfht_add(struct cds_lfht *ht,
+                               unsigned long size,
+                               struct cds_lfht_node *node,
+                               int unique, int dummy)
 {
        struct cds_lfht_node *iter_prev, *iter, *next, *new_node, *new_next,
                        *dummy_node;
@@ -732,7 +742,7 @@ struct cds_lfht_node *_cds_lfht_add(struct cds_lfht *ht, struct rcu_table *t,
 
        assert(!is_dummy(node));
        assert(!is_removed(node));
-       if (!t->size) {
+       if (!size) {
                assert(dummy);
                node->p.next = flag_dummy(NULL);
                return node;    /* Initial first add (head) */
@@ -745,18 +755,33 @@ struct cds_lfht_node *_cds_lfht_add(struct cds_lfht *ht, struct rcu_table *t,
                 * iter_prev points to the non-removed node prior to the
                 * insert location.
                 */
-               index = hash & (t->size - 1);
+               index = hash & (size - 1);
                order = get_count_order_ulong(index + 1);
-               lookup = &t->tbl[order]->nodes[index & ((!order ? 0 : (1UL << (order - 1))) - 1)];
+               lookup = &ht->t.tbl[order]->nodes[index & ((!order ? 0 : (1UL << (order - 1))) - 1)];
                iter_prev = (struct cds_lfht_node *) lookup;
                /* We can always skip the dummy node initially */
                iter = rcu_dereference(iter_prev->p.next);
+               if (unlikely(iter == NULL)) {
+                       /*
+                        * We are executing concurrently with a hash table
+                        * expand, so we see a dummy node with NULL next value.
+                        * Help expand by linking this node into the list and
+                        * retry.
+                        */
+                       (void) _cds_lfht_add(ht, size >> 1, iter_prev, 0, 1);
+                       continue;       /* retry */
+               }
                assert(iter_prev->p.reverse_hash <= node->p.reverse_hash);
                for (;;) {
-                       /* TODO: check if removed */
+                       /*
+                        * When adding a dummy node, we allow concurrent
+                        * add/removal to help. If we find the dummy node in
+                        * place, skip its insertion.
+                        */
+                       if (unlikely(dummy && clear_flag(iter) == node))
+                               return node;
                        if (unlikely(!clear_flag(iter)))
                                goto insert;
-                       /* TODO: check if removed */
                        if (likely(clear_flag(iter)->p.reverse_hash > node->p.reverse_hash))
                                goto insert;
                        next = rcu_dereference(clear_flag(iter)->p.next);
@@ -771,7 +796,7 @@ struct cds_lfht_node *_cds_lfht_add(struct cds_lfht *ht, struct rcu_table *t,
                        /* Only account for identical reverse hash once */
                        if (iter_prev->p.reverse_hash != clear_flag(iter)->p.reverse_hash
                            && !is_dummy(next))
-                               check_resize(ht, t, ++chain_len);
+                               check_resize(ht, size, ++chain_len);
                        iter_prev = clear_flag(iter);
                        iter = next;
                }
@@ -804,17 +829,22 @@ struct cds_lfht_node *_cds_lfht_add(struct cds_lfht *ht, struct rcu_table *t,
        }
 gc_end:
        /* Garbage collect logically removed nodes in the bucket */
-       index = hash & (t->size - 1);
+       index = hash & (size - 1);
        order = get_count_order_ulong(index + 1);
-       lookup = &t->tbl[order]->nodes[index & (!order ? 0 : ((1UL << (order - 1)) - 1))];
+       lookup = &ht->t.tbl[order]->nodes[index & (!order ? 0 : ((1UL << (order - 1)) - 1))];
        dummy_node = (struct cds_lfht_node *) lookup;
-       _cds_lfht_gc_bucket(dummy_node, node);
+       if (_cds_lfht_gc_bucket(dummy_node, node)) {
+               /* Help expand */
+               (void) _cds_lfht_add(ht, size >> 1, dummy_node, 0, 1);
+               goto gc_end;    /* retry */
+       }
        return node;
 }
 
 static
-int _cds_lfht_remove(struct cds_lfht *ht, struct rcu_table *t,
-               struct cds_lfht_node *node, int dummy_removal)
+int _cds_lfht_remove(struct cds_lfht *ht, unsigned long size,
+               struct cds_lfht_node *node,
+               int dummy_removal)
 {
        struct cds_lfht_node *dummy, *next, *old;
        struct _cds_lfht_node *lookup;
@@ -845,13 +875,18 @@ int _cds_lfht_remove(struct cds_lfht *ht, struct rcu_table *t,
         * the node, and remove it (along with any other logically removed node)
         * if found.
         */
+gc_retry:
        hash = bit_reverse_ulong(node->p.reverse_hash);
-       assert(t->size > 0);
-       index = hash & (t->size - 1);
+       assert(size > 0);
+       index = hash & (size - 1);
        order = get_count_order_ulong(index + 1);
-       lookup = &t->tbl[order]->nodes[index & (!order ? 0 : ((1UL << (order - 1)) - 1))];
+       lookup = &ht->t.tbl[order]->nodes[index & (!order ? 0 : ((1UL << (order - 1)) - 1))];
        dummy = (struct cds_lfht_node *) lookup;
-       _cds_lfht_gc_bucket(dummy, node);
+       if (_cds_lfht_gc_bucket(dummy, node)) {
+               /* Help expand */
+               (void) _cds_lfht_add(ht, size >> 1, dummy, 0, 1);
+               goto gc_retry;  /* retry */
+       }
 end:
        /*
         * Only the flagging action indicated that we (and no other)
@@ -864,13 +899,54 @@ end:
                return -ENOENT;
 }
 
+static
+void init_table_hash(struct cds_lfht *ht, unsigned long i,
+               unsigned long len)
+{
+       unsigned long j;
+
+       for (j = 0; j < len; j++) {
+               struct cds_lfht_node *new_node =
+                       (struct cds_lfht_node *) &ht->t.tbl[i]->nodes[j];
+
+               dbg_printf("init hash entry: i %lu j %lu hash %lu\n",
+                          i, j, !i ? 0 : (1UL << (i - 1)) + j);
+               new_node->p.reverse_hash =
+                       bit_reverse_ulong(!i ? 0 : (1UL << (i - 1)) + j);
+               if (CMM_LOAD_SHARED(ht->in_progress_destroy))
+                       break;
+       }
+}
+
 /*
  * Holding RCU read lock to protect _cds_lfht_add against memory
  * reclaim that could be performed by other call_rcu worker threads (ABA
  * problem).
  */
 static
-void init_table(struct cds_lfht *ht, struct rcu_table *t,
+void init_table_link(struct cds_lfht *ht, unsigned long i, unsigned long len)
+{
+       unsigned long j;
+
+       ht->cds_lfht_rcu_thread_online();
+       ht->cds_lfht_rcu_read_lock();
+       for (j = 0; j < len; j++) {
+               struct cds_lfht_node *new_node =
+                       (struct cds_lfht_node *) &ht->t.tbl[i]->nodes[j];
+
+               dbg_printf("init link: i %lu j %lu hash %lu\n",
+                          i, j, !i ? 0 : (1UL << (i - 1)) + j);
+               (void) _cds_lfht_add(ht, !i ? 0 : (1UL << (i - 1)),
+                               new_node, 0, 1);
+               if (CMM_LOAD_SHARED(ht->in_progress_destroy))
+                       break;
+       }
+       ht->cds_lfht_rcu_read_unlock();
+       ht->cds_lfht_rcu_thread_offline();
+}
+
+static
+void init_table(struct cds_lfht *ht,
                unsigned long first_order, unsigned long len_order)
 {
        unsigned long i, end_order;
@@ -878,45 +954,89 @@ void init_table(struct cds_lfht *ht, struct rcu_table *t,
        dbg_printf("init table: first_order %lu end_order %lu\n",
                   first_order, first_order + len_order);
        end_order = first_order + len_order;
-       t->size = !first_order ? 0 : (1UL << (first_order - 1));
        for (i = first_order; i < end_order; i++) {
-               unsigned long j, len;
+               unsigned long len;
 
                len = !i ? 1 : 1UL << (i - 1);
                dbg_printf("init order %lu len: %lu\n", i, len);
-               t->tbl[i] = calloc(1, sizeof(struct rcu_level)
+
+               /* Stop expand if the resize target changes under us */
+               if (CMM_LOAD_SHARED(ht->t.resize_target) < (!i ? 1 : (1UL << i)))
+                       break;
+
+               ht->t.tbl[i] = calloc(1, sizeof(struct rcu_level)
                                + (len * sizeof(struct _cds_lfht_node)));
-               ht->cds_lfht_rcu_read_lock();
-               for (j = 0; j < len; j++) {
-                       struct cds_lfht_node *new_node =
-                               (struct cds_lfht_node *) &t->tbl[i]->nodes[j];
-
-                       dbg_printf("init entry: i %lu j %lu hash %lu\n",
-                                  i, j, !i ? 0 : (1UL << (i - 1)) + j);
-                       new_node->p.reverse_hash =
-                               bit_reverse_ulong(!i ? 0 : (1UL << (i - 1)) + j);
-                       (void) _cds_lfht_add(ht, t, new_node, 0, 1);
-                       if (CMM_LOAD_SHARED(ht->in_progress_destroy))
-                               break;
-               }
-               ht->cds_lfht_rcu_read_unlock();
-               /* Update table size */
-               t->size = !i ? 1 : (1UL << i);
-               dbg_printf("init new size: %lu\n", t->size);
+
+               /* Set all dummy nodes reverse hash values for a level */
+               init_table_hash(ht, i, len);
+
+               /*
+                * Update table size. At this point, concurrent add/remove see
+                * dummy nodes with correctly initialized reverse hash value,
+                * but with NULL next pointers. If they do, they can help us
+                * link the dummy nodes into the list and retry.
+                */
+               cmm_smp_wmb();  /* populate data before RCU size */
+               CMM_STORE_SHARED(ht->t.size, !i ? 1 : (1UL << i));
+
+               /*
+                * Link all dummy nodes into the table. Concurrent
+                * add/remove are helping us.
+                */
+               init_table_link(ht, i, len);
+
+               dbg_printf("init new size: %lu\n", !i ? 1 : (1UL << i));
                if (CMM_LOAD_SHARED(ht->in_progress_destroy))
                        break;
        }
-       t->resize_target = t->size;
-       t->resize_initiated = 0;
 }
 
 /*
  * Holding RCU read lock to protect _cds_lfht_remove against memory
  * reclaim that could be performed by other call_rcu worker threads (ABA
  * problem).
+ * For a single level, we logically remove and garbage collect each node.
+ *
+ * As a design choice, we perform logical removal and garbage collection on a
+ * node-per-node basis to simplify this algorithm. We also assume keeping good
+ * cache locality of the operation would overweight possible performance gain
+ * that could be achieved by batching garbage collection for multiple levels.
+ * However, this would have to be justified by benchmarks.
+ *
+ * Concurrent removal and add operations are helping us perform garbage
+ * collection of logically removed nodes. We guarantee that all logically
+ * removed nodes have been garbage-collected (unlinked) before call_rcu is
+ * invoked to free a hole level of dummy nodes (after a grace period).
+ *
+ * Logical removal and garbage collection can therefore be done in batch or on a
+ * node-per-node basis, as long as the guarantee above holds.
  */
 static
-void fini_table(struct cds_lfht *ht, struct rcu_table *t,
+void remove_table(struct cds_lfht *ht, unsigned long i, unsigned long len)
+{
+       unsigned long j;
+
+       ht->cds_lfht_rcu_thread_online();
+       ht->cds_lfht_rcu_read_lock();
+       for (j = 0; j < len; j++) {
+               struct cds_lfht_node *fini_node =
+                       (struct cds_lfht_node *) &ht->t.tbl[i]->nodes[j];
+
+               dbg_printf("remove entry: i %lu j %lu hash %lu\n",
+                          i, j, !i ? 0 : (1UL << (i - 1)) + j);
+               fini_node->p.reverse_hash =
+                       bit_reverse_ulong(!i ? 0 : (1UL << (i - 1)) + j);
+               (void) _cds_lfht_remove(ht, !i ? 0 : (1UL << (i - 1)),
+                               fini_node, 1);
+               if (CMM_LOAD_SHARED(ht->in_progress_destroy))
+                       break;
+       }
+       ht->cds_lfht_rcu_read_unlock();
+       ht->cds_lfht_rcu_thread_offline();
+}
+
+static
+void fini_table(struct cds_lfht *ht,
                unsigned long first_order, unsigned long len_order)
 {
        long i, end_order;
@@ -925,40 +1045,41 @@ void fini_table(struct cds_lfht *ht, struct rcu_table *t,
                   first_order, first_order + len_order);
        end_order = first_order + len_order;
        assert(first_order > 0);
-       assert(t->size == (1UL << (end_order - 1)));
        for (i = end_order - 1; i >= first_order; i--) {
-               unsigned long j, len;
+               unsigned long len;
 
                len = !i ? 1 : 1UL << (i - 1);
                dbg_printf("fini order %lu len: %lu\n", i, len);
+
+               /* Stop shrink if the resize target changes under us */
+               if (CMM_LOAD_SHARED(ht->t.resize_target) > (1UL << (i - 1)))
+                       break;
+
+               cmm_smp_wmb();  /* populate data before RCU size */
+               CMM_STORE_SHARED(ht->t.size, 1UL << (i - 1));
+
                /*
-                * Update table size. Need to shrink this table prior to
-                * removal so gc lookups use non-logically-removed dummy
-                * nodes.
+                * We need to wait for all add operations to reach Q.S. (and
+                * thus use the new table for lookups) before we can start
+                * releasing the old dummy nodes. Otherwise their lookup will
+                * return a logically removed node as insert position.
                 */
-               t->size = 1UL << (i - 1);
-               /* Unlink */
-               ht->cds_lfht_rcu_read_lock();
-               for (j = 0; j < len; j++) {
-                       struct cds_lfht_node *fini_node =
-                               (struct cds_lfht_node *) &t->tbl[i]->nodes[j];
-
-                       dbg_printf("fini entry: i %lu j %lu hash %lu\n",
-                                  i, j, !i ? 0 : (1UL << (i - 1)) + j);
-                       fini_node->p.reverse_hash =
-                               bit_reverse_ulong(!i ? 0 : (1UL << (i - 1)) + j);
-                       (void) _cds_lfht_remove(ht, t, fini_node, 1);
-                       if (CMM_LOAD_SHARED(ht->in_progress_destroy))
-                               break;
-               }
-               ht->cds_lfht_rcu_read_unlock();
-               ht->cds_lfht_call_rcu(&t->tbl[i]->head, cds_lfht_free_level);
-               dbg_printf("fini new size: %lu\n", t->size);
+               ht->cds_lfht_synchronize_rcu();
+
+               /*
+                * Set "removed" flag in dummy nodes about to be removed.
+                * Unlink all now-logically-removed dummy node pointers.
+                * Concurrent add/remove operation are helping us doing
+                * the gc.
+                */
+               remove_table(ht, i, len);
+
+               ht->cds_lfht_call_rcu(&ht->t.tbl[i]->head, cds_lfht_free_level);
+
+               dbg_printf("fini new size: %lu\n", 1UL << i);
                if (CMM_LOAD_SHARED(ht->in_progress_destroy))
                        break;
        }
-       t->resize_target = t->size;
-       t->resize_initiated = 0;
 }
 
 struct cds_lfht *cds_lfht_new(cds_lfht_hash_fct hash_fct,
@@ -970,7 +1091,9 @@ struct cds_lfht *cds_lfht_new(cds_lfht_hash_fct hash_fct,
                                        void (*func)(struct rcu_head *head)),
                        void (*cds_lfht_synchronize_rcu)(void),
                        void (*cds_lfht_rcu_read_lock)(void),
-                       void (*cds_lfht_rcu_read_unlock)(void))
+                       void (*cds_lfht_rcu_read_unlock)(void),
+                       void (*cds_lfht_rcu_thread_offline)(void),
+                       void (*cds_lfht_rcu_thread_online)(void))
 {
        struct cds_lfht *ht;
        unsigned long order;
@@ -986,35 +1109,35 @@ struct cds_lfht *cds_lfht_new(cds_lfht_hash_fct hash_fct,
        ht->cds_lfht_synchronize_rcu = cds_lfht_synchronize_rcu;
        ht->cds_lfht_rcu_read_lock = cds_lfht_rcu_read_lock;
        ht->cds_lfht_rcu_read_unlock = cds_lfht_rcu_read_unlock;
-       ht->in_progress_resize = 0;
+       ht->cds_lfht_rcu_thread_offline = cds_lfht_rcu_thread_offline;
+       ht->cds_lfht_rcu_thread_online = cds_lfht_rcu_thread_online;
        ht->percpu_count = alloc_per_cpu_items_count();
        /* this mutex should not nest in read-side C.S. */
        pthread_mutex_init(&ht->resize_mutex, NULL);
        order = get_count_order_ulong(max(init_size, MIN_TABLE_SIZE)) + 1;
-       ht->t = calloc(1, sizeof(struct cds_lfht)
-                      + (order * sizeof(struct rcu_level *)));
-       ht->t->size = 0;
        ht->flags = flags;
+       ht->cds_lfht_rcu_thread_offline();
        pthread_mutex_lock(&ht->resize_mutex);
-       init_table(ht, ht->t, 0, order);
+       ht->t.resize_target = 1UL << (order - 1);
+       init_table(ht, 0, order);
        pthread_mutex_unlock(&ht->resize_mutex);
+       ht->cds_lfht_rcu_thread_online();
        return ht;
 }
 
 struct cds_lfht_node *cds_lfht_lookup(struct cds_lfht *ht, void *key, size_t key_len)
 {
-       struct rcu_table *t;
        struct cds_lfht_node *node, *next;
        struct _cds_lfht_node *lookup;
-       unsigned long hash, reverse_hash, index, order;
+       unsigned long hash, reverse_hash, index, order, size;
 
        hash = ht->hash_fct(key, key_len, ht->hash_seed);
        reverse_hash = bit_reverse_ulong(hash);
 
-       t = rcu_dereference(ht->t);
-       index = hash & (t->size - 1);
+       size = rcu_dereference(ht->t.size);
+       index = hash & (size - 1);
        order = get_count_order_ulong(index + 1);
-       lookup = &t->tbl[order]->nodes[index & (!order ? 0 : ((1UL << (order - 1))) - 1)];
+       lookup = &ht->t.tbl[order]->nodes[index & (!order ? 0 : ((1UL << (order - 1))) - 1)];
        dbg_printf("lookup hash %lu index %lu order %lu aridx %lu\n",
                   hash, index, order, index & (!order ? 0 : ((1UL << (order - 1)) - 1)));
        node = (struct cds_lfht_node *) lookup;
@@ -1072,57 +1195,53 @@ struct cds_lfht_node *cds_lfht_next(struct cds_lfht *ht,
 
 void cds_lfht_add(struct cds_lfht *ht, struct cds_lfht_node *node)
 {
-       struct rcu_table *t;
-       unsigned long hash;
+       unsigned long hash, size;
 
        hash = ht->hash_fct(node->key, node->key_len, ht->hash_seed);
        node->p.reverse_hash = bit_reverse_ulong((unsigned long) hash);
 
-       t = rcu_dereference(ht->t);
-       (void) _cds_lfht_add(ht, t, node, 0, 0);
-       ht_count_add(ht, t);
+       size = rcu_dereference(ht->t.size);
+       (void) _cds_lfht_add(ht, size, node, 0, 0);
+       ht_count_add(ht, size);
 }
 
 struct cds_lfht_node *cds_lfht_add_unique(struct cds_lfht *ht,
                                        struct cds_lfht_node *node)
 {
-       struct rcu_table *t;
-       unsigned long hash;
+       unsigned long hash, size;
        struct cds_lfht_node *ret;
 
        hash = ht->hash_fct(node->key, node->key_len, ht->hash_seed);
        node->p.reverse_hash = bit_reverse_ulong((unsigned long) hash);
 
-       t = rcu_dereference(ht->t);
-       ret = _cds_lfht_add(ht, t, node, 1, 0);
+       size = rcu_dereference(ht->t.size);
+       ret = _cds_lfht_add(ht, size, node, 1, 0);
        if (ret != node)
-               ht_count_add(ht, t);
+               ht_count_add(ht, size);
        return ret;
 }
 
 int cds_lfht_remove(struct cds_lfht *ht, struct cds_lfht_node *node)
 {
-       struct rcu_table *t;
+       unsigned long size;
        int ret;
 
-       t = rcu_dereference(ht->t);
-       ret = _cds_lfht_remove(ht, t, node, 0);
+       size = rcu_dereference(ht->t.size);
+       ret = _cds_lfht_remove(ht, size, node, 0);
        if (!ret)
-               ht_count_remove(ht, t);
+               ht_count_remove(ht, size);
        return ret;
 }
 
 static
 int cds_lfht_delete_dummy(struct cds_lfht *ht)
 {
-       struct rcu_table *t;
        struct cds_lfht_node *node;
        struct _cds_lfht_node *lookup;
-       unsigned long order, i;
+       unsigned long order, i, size;
 
-       t = ht->t;
        /* Check that the table is empty */
-       lookup = &t->tbl[0]->nodes[0];
+       lookup = &ht->t.tbl[0]->nodes[0];
        node = (struct cds_lfht_node *) lookup;
        do {
                node = clear_flag(node)->p.next;
@@ -1130,18 +1249,23 @@ int cds_lfht_delete_dummy(struct cds_lfht *ht)
                        return -EPERM;
                assert(!is_removed(node));
        } while (clear_flag(node));
+       /*
+        * size accessed without rcu_dereference because hash table is
+        * being destroyed.
+        */
+       size = ht->t.size;
        /* Internal sanity check: all nodes left should be dummy */
-       for (order = 0; order < get_count_order_ulong(t->size) + 1; order++) {
+       for (order = 0; order < get_count_order_ulong(size) + 1; order++) {
                unsigned long len;
 
                len = !order ? 1 : 1UL << (order - 1);
                for (i = 0; i < len; i++) {
                        dbg_printf("delete order %lu i %lu hash %lu\n",
                                order, i,
-                               bit_reverse_ulong(t->tbl[order]->nodes[i].reverse_hash));
-                       assert(is_dummy(t->tbl[order]->nodes[i].next));
+                               bit_reverse_ulong(ht->t.tbl[order]->nodes[i].reverse_hash));
+                       assert(is_dummy(ht->t.tbl[order]->nodes[i].next));
                }
-               poison_free(t->tbl[order]);
+               poison_free(ht->t.tbl[order]);
        }
        return 0;
 }
@@ -1161,7 +1285,6 @@ int cds_lfht_destroy(struct cds_lfht *ht)
        ret = cds_lfht_delete_dummy(ht);
        if (ret)
                return ret;
-       poison_free(ht->t);
        free_per_cpu_items_count(ht->percpu_count);
        poison_free(ht);
        return ret;
@@ -1171,7 +1294,6 @@ void cds_lfht_count_nodes(struct cds_lfht *ht,
                unsigned long *count,
                unsigned long *removed)
 {
-       struct rcu_table *t;
        struct cds_lfht_node *node, *next;
        struct _cds_lfht_node *lookup;
        unsigned long nr_dummy = 0;
@@ -1179,9 +1301,8 @@ void cds_lfht_count_nodes(struct cds_lfht *ht,
        *count = 0;
        *removed = 0;
 
-       t = rcu_dereference(ht->t);
        /* Count non-dummy nodes in the table */
-       lookup = &t->tbl[0]->nodes[0];
+       lookup = &ht->t.tbl[0]->nodes[0];
        node = (struct cds_lfht_node *) lookup;
        do {
                next = rcu_dereference(node->p.next);
@@ -1199,64 +1320,35 @@ void cds_lfht_count_nodes(struct cds_lfht *ht,
 
 /* called with resize mutex held */
 static
-void _do_cds_lfht_grow(struct cds_lfht *ht, struct rcu_table *old_t,
+void _do_cds_lfht_grow(struct cds_lfht *ht,
                unsigned long old_size, unsigned long new_size)
 {
        unsigned long old_order, new_order;
-       struct rcu_table *new_t;
 
        old_order = get_count_order_ulong(old_size) + 1;
        new_order = get_count_order_ulong(new_size) + 1;
        printf("resize from %lu (order %lu) to %lu (order %lu) buckets\n",
               old_size, old_order, new_size, new_order);
-       new_t = malloc(sizeof(struct cds_lfht)
-                       + (new_order * sizeof(struct rcu_level *)));
        assert(new_size > old_size);
-       memcpy(&new_t->tbl, &old_t->tbl,
-              old_order * sizeof(struct rcu_level *));
-       init_table(ht, new_t, old_order, new_order - old_order);
-       /* Changing table and size atomically wrt lookups */
-       rcu_assign_pointer(ht->t, new_t);
-       ht->cds_lfht_call_rcu(&old_t->head, cds_lfht_free_table_cb);
+       init_table(ht, old_order, new_order - old_order);
 }
 
 /* called with resize mutex held */
 static
-void _do_cds_lfht_shrink(struct cds_lfht *ht, struct rcu_table *old_t,
+void _do_cds_lfht_shrink(struct cds_lfht *ht,
                unsigned long old_size, unsigned long new_size)
 {
        unsigned long old_order, new_order;
-       struct rcu_table *new_t;
 
        new_size = max(new_size, MIN_TABLE_SIZE);
        old_order = get_count_order_ulong(old_size) + 1;
        new_order = get_count_order_ulong(new_size) + 1;
        printf("resize from %lu (order %lu) to %lu (order %lu) buckets\n",
               old_size, old_order, new_size, new_order);
-       new_t = malloc(sizeof(struct cds_lfht)
-                       + (new_order * sizeof(struct rcu_level *)));
        assert(new_size < old_size);
-       memcpy(&new_t->tbl, &old_t->tbl,
-              new_order * sizeof(struct rcu_level *));
-       new_t->size = !new_order ? 1 : (1UL << (new_order - 1));
-       assert(new_t->size == new_size);
-       new_t->resize_target = new_t->size;
-       new_t->resize_initiated = 0;
-
-       /* Changing table and size atomically wrt lookups */
-       rcu_assign_pointer(ht->t, new_t);
-
-       /*
-        * We need to wait for all add operations to reach Q.S. (and
-        * thus use the new table for lookups) before we can start
-        * releasing the old dummy nodes. Otherwise their lookup will
-        * return a logically removed node as insert position.
-        */
-       ht->cds_lfht_synchronize_rcu();
 
-       /* Unlink and remove all now-unused dummy node pointers. */
-       fini_table(ht, old_t, new_order, old_order - new_order);
-       ht->cds_lfht_call_rcu(&old_t->head, cds_lfht_free_table_cb);
+       /* Remove and unlink all dummy nodes to remove. */
+       fini_table(ht, new_order, old_order - new_order);
 }
 
 
@@ -1265,44 +1357,49 @@ static
 void _do_cds_lfht_resize(struct cds_lfht *ht)
 {
        unsigned long new_size, old_size;
-       struct rcu_table *old_t;
-
-       old_t = ht->t;
-       old_size = old_t->size;
-       new_size = CMM_LOAD_SHARED(old_t->resize_target);
-       if (old_size < new_size)
-               _do_cds_lfht_grow(ht, old_t, old_size, new_size);
-       else if (old_size > new_size)
-               _do_cds_lfht_shrink(ht, old_t, old_size, new_size);
-       else
-               CMM_STORE_SHARED(old_t->resize_initiated, 0);
+
+       /*
+        * Resize table, re-do if the target size has changed under us.
+        */
+       do {
+               ht->t.resize_initiated = 1;
+               old_size = ht->t.size;
+               new_size = CMM_LOAD_SHARED(ht->t.resize_target);
+               if (old_size < new_size)
+                       _do_cds_lfht_grow(ht, old_size, new_size);
+               else if (old_size > new_size)
+                       _do_cds_lfht_shrink(ht, old_size, new_size);
+               ht->t.resize_initiated = 0;
+               /* write resize_initiated before read resize_target */
+               cmm_smp_mb();
+       } while (ht->t.size != CMM_LOAD_SHARED(ht->t.resize_target));
 }
 
 static
-unsigned long resize_target_update(struct rcu_table *t,
+unsigned long resize_target_update(struct cds_lfht *ht, unsigned long size,
                                   int growth_order)
 {
-       return _uatomic_max(&t->resize_target,
-                           t->size << growth_order);
+       return _uatomic_max(&ht->t.resize_target,
+                           size << growth_order);
 }
 
 static
-void resize_target_update_count(struct rcu_table *t,
+void resize_target_update_count(struct cds_lfht *ht,
                                unsigned long count)
 {
        count = max(count, MIN_TABLE_SIZE);
-       uatomic_set(&t->resize_target, count);
+       uatomic_set(&ht->t.resize_target, count);
 }
 
 void cds_lfht_resize(struct cds_lfht *ht, unsigned long new_size)
 {
-       struct rcu_table *t = rcu_dereference(ht->t);
-
-       resize_target_update_count(t, new_size);
-       CMM_STORE_SHARED(t->resize_initiated, 1);
+       resize_target_update_count(ht, new_size);
+       CMM_STORE_SHARED(ht->t.resize_initiated, 1);
+       ht->cds_lfht_rcu_thread_offline();
        pthread_mutex_lock(&ht->resize_mutex);
        _do_cds_lfht_resize(ht);
        pthread_mutex_unlock(&ht->resize_mutex);
+       ht->cds_lfht_rcu_thread_online();
 }
 
 static
@@ -1312,49 +1409,55 @@ void do_resize_cb(struct rcu_head *head)
                caa_container_of(head, struct rcu_resize_work, head);
        struct cds_lfht *ht = work->ht;
 
+       ht->cds_lfht_rcu_thread_offline();
        pthread_mutex_lock(&ht->resize_mutex);
        _do_cds_lfht_resize(ht);
        pthread_mutex_unlock(&ht->resize_mutex);
+       ht->cds_lfht_rcu_thread_online();
        poison_free(work);
        cmm_smp_mb();   /* finish resize before decrement */
        uatomic_dec(&ht->in_progress_resize);
 }
 
 static
-void cds_lfht_resize_lazy(struct cds_lfht *ht, struct rcu_table *t, int growth)
+void cds_lfht_resize_lazy(struct cds_lfht *ht, unsigned long size, int growth)
 {
        struct rcu_resize_work *work;
        unsigned long target_size;
 
-       target_size = resize_target_update(t, growth);
-       if (!CMM_LOAD_SHARED(t->resize_initiated) && t->size < target_size) {
+       target_size = resize_target_update(ht, size, growth);
+       /* Store resize_target before read resize_initiated */
+       cmm_smp_mb();
+       if (!CMM_LOAD_SHARED(ht->t.resize_initiated) && size < target_size) {
                uatomic_inc(&ht->in_progress_resize);
                cmm_smp_mb();   /* increment resize count before calling it */
                work = malloc(sizeof(*work));
                work->ht = ht;
                ht->cds_lfht_call_rcu(&work->head, do_resize_cb);
-               CMM_STORE_SHARED(t->resize_initiated, 1);
+               CMM_STORE_SHARED(ht->t.resize_initiated, 1);
        }
 }
 
 #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF)
 
 static
-void cds_lfht_resize_lazy_count(struct cds_lfht *ht, struct rcu_table *t,
+void cds_lfht_resize_lazy_count(struct cds_lfht *ht, unsigned long size,
                                unsigned long count)
 {
        struct rcu_resize_work *work;
 
        if (!(ht->flags & CDS_LFHT_AUTO_RESIZE))
                return;
-       resize_target_update_count(t, count);
-       if (!CMM_LOAD_SHARED(t->resize_initiated)) {
+       resize_target_update_count(ht, count);
+       /* Store resize_target before read resize_initiated */
+       cmm_smp_mb();
+       if (!CMM_LOAD_SHARED(ht->t.resize_initiated)) {
                uatomic_inc(&ht->in_progress_resize);
                cmm_smp_mb();   /* increment resize count before calling it */
                work = malloc(sizeof(*work));
                work->ht = ht;
                ht->cds_lfht_call_rcu(&work->head, do_resize_cb);
-               CMM_STORE_SHARED(t->resize_initiated, 1);
+               CMM_STORE_SHARED(ht->t.resize_initiated, 1);
        }
 }
 
This page took 0.048324 seconds and 4 git commands to generate.