rculfhash: remove now unneeded gc flag (combine with removed)
[urcu.git] / rculfhash.c
index 5a6eb87ad35fa92deed76bd7ff90abc354363ba8..1487980c832c115154937cda4b185024636f9e44 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * rculfhash.c
  *
- * Userspace RCU library - Lock-Free Expandable RCU Hash Table
+ * Userspace RCU library - Lock-Free Resizable RCU Hash Table
  *
  * Copyright 2010-2011 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
  *
@@ -29,7 +29,7 @@
  *   symposium on Parallel algorithms and architectures, ACM Press,
  *   (2002), 73-82.
  *
- * Some specificities of this Lock-Free Expandable RCU Hash Table
+ * Some specificities of this Lock-Free Resizable RCU Hash Table
  * implementation:
  *
  * - RCU read-side critical section allows readers to perform hash
  * - An index of dummy nodes is kept. These dummy nodes are the hash
  *   table "buckets", and they are also chained together in the
  *   split-ordered list, which allows recursive expansion.
- * - The resize operation only allows expanding the hash table.
- *   It is triggered either through an API call or automatically by
- *   detecting long chains in the add operation.
+ * - The resize operation for small tables only allows expanding the hash table.
+ *   It is triggered automatically by detecting long chains in the add
+ *   operation.
+ * - The resize operation for larger tables (and available through an
+ *   API) allows both expanding and shrinking the hash table.
+ * - Per-CPU Split-counters are used to keep track of the number of
+ *   nodes within the hash table for automatic resize triggering.
  * - Resize operation initiated by long chain detection is executed by a
  *   call_rcu thread, which keeps lock-freedom of add and remove.
  * - Resize operations are protected by a mutex.
  * - The per-order dummy node tables contain a compact version of the
  *   hash table nodes. These tables are invariant after they are
  *   populated into the hash table.
+ * 
+ * A bit of ascii art explanation:
+ * 
+ * Order index is the off-by-one compare to the actual power of 2 because 
+ * we use index 0 to deal with the 0 special-case.
+ * 
+ * This shows the nodes for a small table ordered by reversed bits:
+ * 
+ *    bits   reverse
+ * 0  000        000
+ * 4  100        001
+ * 2  010        010
+ * 6  110        011
+ * 1  001        100
+ * 5  101        101
+ * 3  011        110
+ * 7  111        111
+ * 
+ * This shows the nodes in order of non-reversed bits, linked by 
+ * reversed-bit order.
+ * 
+ * order              bits       reverse
+ * 0               0  000        000
+ *                 |
+ * 1               |  1  001        100       <-    <-
+ *                 |  |                        |     |
+ * 2               |  |  2  010        010     |     |
+ *                 |  |  |  3  011        110  | <-  |
+ *                 |  |  |  |                  |  |  |
+ * 3               -> |  |  |  4  100        001  |  |
+ *                    -> |  |     5  101        101  |
+ *                       -> |        6  110        011
+ *                          ->          7  111        111
  */
 
 #define _LGPL_SOURCE
 #define dbg_printf(fmt, args...)
 #endif
 
-#define CHAIN_LEN_TARGET               4
-#define CHAIN_LEN_RESIZE_THRESHOLD     8
-
-/* Commit counter changes to global counter each 1024 steps */
+/*
+ * Per-CPU split-counters lazily update the global counter each 1024
+ * addition/removal. It automatically keeps track of resize required.
+ * We use the bucket length as indicator for need to expand for small
+ * tables and machines lacking per-cpu data suppport.
+ */
 #define COUNT_COMMIT_ORDER             10
+#define CHAIN_LEN_TARGET               1
+#define CHAIN_LEN_RESIZE_THRESHOLD     3
+
+/*
+ * Define the minimum table size.
+ */
+#define MIN_TABLE_SIZE                 1
+
+#if (CAA_BITS_PER_LONG == 32)
+#define MAX_TABLE_ORDER                        32
+#else
+#define MAX_TABLE_ORDER                        64
+#endif
+
+/*
+ * Minimum number of dummy nodes to touch per thread to parallelize grow/shrink.
+ */
+#define MIN_PARTITION_PER_THREAD_ORDER 12
+#define MIN_PARTITION_PER_THREAD       (1UL << MIN_PARTITION_PER_THREAD_ORDER)
+
+#ifndef min
+#define min(a, b)      ((a) < (b) ? (a) : (b))
+#endif
 
 #ifndef max
 #define max(a, b)      ((a) > (b) ? (a) : (b))
 
 /*
  * The removed flag needs to be updated atomically with the pointer.
+ * It indicates that no node must attach to the node scheduled for
+ * removal, and that node garbage collection must be performed.
  * The dummy flag does not require to be updated atomically with the
  * pointer, but it is added as a pointer low bit flag to save space.
  */
 #define DUMMY_FLAG             (1UL << 1)
 #define FLAGS_MASK             ((1UL << 2) - 1)
 
+/* Value of the end pointer. Should not interact with flags. */
+#define END_VALUE              NULL
+
 struct ht_items_count {
-       unsigned long add, remove;
+       unsigned long add, del;
 } __attribute__((aligned(CAA_CACHE_LINE_SIZE)));
 
+struct rcu_level {
+       struct rcu_head head;
+       struct _cds_lfht_node nodes[0];
+};
+
 struct rcu_table {
-       unsigned long size;     /* always a power of 2 */
+       unsigned long size;     /* always a power of 2, shared (RCU) */
        unsigned long resize_target;
        int resize_initiated;
-       struct rcu_head head;
-       struct _cds_lfht_node *tbl[0];
+       struct rcu_level *tbl[MAX_TABLE_ORDER];
 };
 
 struct cds_lfht {
-       struct rcu_table *t;            /* shared */
+       struct rcu_table t;
        cds_lfht_hash_fct hash_fct;
        cds_lfht_compare_fct compare_fct;
        unsigned long hash_seed;
+       int flags;
+       /*
+        * We need to put the work threads offline (QSBR) when taking this
+        * mutex, because we use synchronize_rcu within this mutex critical
+        * section, which waits on read-side critical sections, and could
+        * therefore cause grace-period deadlock if we hold off RCU G.P.
+        * completion.
+        */
        pthread_mutex_t resize_mutex;   /* resize mutex: add/del mutex */
        unsigned int in_progress_resize, in_progress_destroy;
        void (*cds_lfht_call_rcu)(struct rcu_head *head,
                      void (*func)(struct rcu_head *head));
+       void (*cds_lfht_synchronize_rcu)(void);
+       void (*cds_lfht_rcu_read_lock)(void);
+       void (*cds_lfht_rcu_read_unlock)(void);
+       void (*cds_lfht_rcu_thread_offline)(void);
+       void (*cds_lfht_rcu_thread_online)(void);
+       void (*cds_lfht_rcu_register_thread)(void);
+       void (*cds_lfht_rcu_unregister_thread)(void);
+       pthread_attr_t *resize_attr;    /* Resize threads attributes */
        unsigned long count;            /* global approximate item count */
        struct ht_items_count *percpu_count;    /* per-cpu item count */
 };
@@ -168,6 +255,26 @@ struct rcu_resize_work {
        struct cds_lfht *ht;
 };
 
+struct partition_resize_work {
+       struct rcu_head head;
+       struct cds_lfht *ht;
+       unsigned long i, start, len;
+       void (*fct)(struct cds_lfht *ht, unsigned long i,
+                   unsigned long start, unsigned long len);
+};
+
+enum add_mode {
+       ADD_DEFAULT = 0,
+       ADD_UNIQUE = 1,
+       ADD_REPLACE = 2,
+};
+
+static
+struct cds_lfht_node *_cds_lfht_add(struct cds_lfht *ht,
+                               unsigned long size,
+                               struct cds_lfht_node *node,
+                               enum add_mode mode, int dummy);
+
 /*
  * Algorithm to reverse bits in a word by lookup table, extended to
  * 64-bit words.
@@ -360,12 +467,18 @@ int get_count_order_ulong(unsigned long x)
        return order;
 }
 
-static
-void cds_lfht_resize_lazy(struct cds_lfht *ht, struct rcu_table *t, int growth);
+#ifdef POISON_FREE
+#define poison_free(ptr)                               \
+       do {                                            \
+               memset(ptr, 0x42, sizeof(*(ptr)));      \
+               free(ptr);                              \
+       } while (0)
+#else
+#define poison_free(ptr)       free(ptr)
+#endif
 
 static
-void cds_lfht_resize_lazy_count(struct cds_lfht *ht, struct rcu_table *t,
-                               unsigned long count);
+void cds_lfht_resize_lazy(struct cds_lfht *ht, unsigned long size, int growth);
 
 /*
  * If the sched_getcpu() and sysconf(_SC_NPROCESSORS_CONF) calls are
@@ -373,9 +486,12 @@ void cds_lfht_resize_lazy_count(struct cds_lfht *ht, struct rcu_table *t,
  * In the unfortunate event the number of CPUs reported would be
  * inaccurate, we use modulo arithmetic on the number of CPUs we got.
  */
-
 #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF)
 
+static
+void cds_lfht_resize_lazy_count(struct cds_lfht *ht, unsigned long size,
+                               unsigned long count);
+
 static long nr_cpus_mask = -1;
 
 static
@@ -411,7 +527,7 @@ struct ht_items_count *alloc_per_cpu_items_count(void)
 static
 void free_per_cpu_items_count(struct ht_items_count *count)
 {
-       free(count);
+       poison_free(count);
 }
 
 static
@@ -428,7 +544,7 @@ int ht_get_cpu(void)
 }
 
 static
-void ht_count_add(struct cds_lfht *ht, struct rcu_table *t)
+void ht_count_add(struct cds_lfht *ht, unsigned long size)
 {
        unsigned long percpu_count;
        int cpu;
@@ -447,14 +563,17 @@ void ht_count_add(struct cds_lfht *ht, struct rcu_table *t)
                                           1UL << COUNT_COMMIT_ORDER);
                /* If power of 2 */
                if (!(count & (count - 1))) {
-                       dbg_printf("add global %lu\n", count);
-                       cds_lfht_resize_lazy_count(ht, t, count);
+                       if ((count >> CHAIN_LEN_RESIZE_THRESHOLD) < size)
+                               return;
+                       dbg_printf("add set global %lu\n", count);
+                       cds_lfht_resize_lazy_count(ht, size,
+                               count >> (CHAIN_LEN_TARGET - 1));
                }
        }
 }
 
 static
-void ht_count_remove(struct cds_lfht *ht, struct rcu_table *t)
+void ht_count_del(struct cds_lfht *ht, unsigned long size)
 {
        unsigned long percpu_count;
        int cpu;
@@ -464,17 +583,20 @@ void ht_count_remove(struct cds_lfht *ht, struct rcu_table *t)
        cpu = ht_get_cpu();
        if (unlikely(cpu < 0))
                return;
-       percpu_count = uatomic_add_return(&ht->percpu_count[cpu].remove, -1);
+       percpu_count = uatomic_add_return(&ht->percpu_count[cpu].del, -1);
        if (unlikely(!(percpu_count & ((1UL << COUNT_COMMIT_ORDER) - 1)))) {
                unsigned long count;
 
-               dbg_printf("remove percpu %lu\n", percpu_count);
+               dbg_printf("del percpu %lu\n", percpu_count);
                count = uatomic_add_return(&ht->count,
                                           -(1UL << COUNT_COMMIT_ORDER));
                /* If power of 2 */
                if (!(count & (count - 1))) {
-                       dbg_printf("remove global %lu\n", count);
-                       cds_lfht_resize_lazy_count(ht, t, count);
+                       if ((count >> CHAIN_LEN_RESIZE_THRESHOLD) >= size)
+                               return;
+                       dbg_printf("del set global %lu\n", count);
+                       cds_lfht_resize_lazy_count(ht, size,
+                               count >> (CHAIN_LEN_TARGET - 1));
                }
        }
 }
@@ -495,12 +617,12 @@ void free_per_cpu_items_count(struct ht_items_count *count)
 }
 
 static
-void ht_count_add(struct cds_lfht *ht)
+void ht_count_add(struct cds_lfht *ht, unsigned long size)
 {
 }
 
 static
-void ht_count_remove(struct cds_lfht *ht)
+void ht_count_del(struct cds_lfht *ht, unsigned long size)
 {
 }
 
@@ -508,15 +630,24 @@ void ht_count_remove(struct cds_lfht *ht)
 
 
 static
-void check_resize(struct cds_lfht *ht, struct rcu_table *t,
-                 uint32_t chain_len)
+void check_resize(struct cds_lfht *ht, unsigned long size, uint32_t chain_len)
 {
-       return;         //TEST
+       unsigned long count;
+
+       if (!(ht->flags & CDS_LFHT_AUTO_RESIZE))
+               return;
+       count = uatomic_read(&ht->count);
+       /*
+        * Use bucket-local length for small table expand and for
+        * environments lacking per-cpu data support.
+        */
+       if (count >= (1UL << COUNT_COMMIT_ORDER))
+               return;
        if (chain_len > 100)
                dbg_printf("WARNING: large chain length: %u.\n",
                           chain_len);
        if (chain_len >= CHAIN_LEN_RESIZE_THRESHOLD)
-               cds_lfht_resize_lazy(ht, t,
+               cds_lfht_resize_lazy(ht, size,
                        get_count_order_u32(chain_len - (CHAIN_LEN_TARGET - 1)));
 }
 
@@ -549,7 +680,19 @@ struct cds_lfht_node *flag_dummy(struct cds_lfht_node *node)
 {
        return (struct cds_lfht_node *) (((unsigned long) node) | DUMMY_FLAG);
 }
+
+static
+struct cds_lfht_node *get_end(void)
+{
+       return (struct cds_lfht_node *) END_VALUE;
+}
+
+static
+int is_end(struct cds_lfht_node *node)
+{
+       return clear_flag(node) == (struct cds_lfht_node *) END_VALUE;
+}
+
 static
 unsigned long _uatomic_max(unsigned long *ptr, unsigned long v)
 {
@@ -564,6 +707,14 @@ unsigned long _uatomic_max(unsigned long *ptr, unsigned long v)
        return v;
 }
 
+static
+void cds_lfht_free_level(struct rcu_head *head)
+{
+       struct rcu_level *l =
+               caa_container_of(head, struct rcu_level, head);
+       poison_free(l);
+}
+
 /*
  * Remove all logically deleted nodes from a bucket up to a certain node key.
  */
@@ -572,13 +723,24 @@ void _cds_lfht_gc_bucket(struct cds_lfht_node *dummy, struct cds_lfht_node *node
 {
        struct cds_lfht_node *iter_prev, *iter, *next, *new_next;
 
+       assert(!is_dummy(dummy));
+       assert(!is_removed(dummy));
+       assert(!is_dummy(node));
+       assert(!is_removed(node));
        for (;;) {
                iter_prev = dummy;
                /* We can always skip the dummy node initially */
                iter = rcu_dereference(iter_prev->p.next);
                assert(iter_prev->p.reverse_hash <= node->p.reverse_hash);
+               /*
+                * We should never be called with dummy (start of chain)
+                * and logically removed node (end of path compression
+                * marker) being the actual same node. This would be a
+                * bug in the algorithm implementation.
+                */
+               assert(dummy != node);
                for (;;) {
-                       if (unlikely(!clear_flag(iter)))
+                       if (unlikely(is_end(iter)))
                                return;
                        if (likely(clear_flag(iter)->p.reverse_hash > node->p.reverse_hash))
                                return;
@@ -593,22 +755,29 @@ void _cds_lfht_gc_bucket(struct cds_lfht_node *dummy, struct cds_lfht_node *node
                        new_next = flag_dummy(clear_flag(next));
                else
                        new_next = clear_flag(next);
+               if (is_removed(iter))
+                       new_next = flag_removed(new_next);
                (void) uatomic_cmpxchg(&iter_prev->p.next, iter, new_next);
        }
+       return;
 }
 
 static
-struct cds_lfht_node *_cds_lfht_add(struct cds_lfht *ht, struct rcu_table *t,
-                               struct cds_lfht_node *node, int unique, int dummy)
+struct cds_lfht_node *_cds_lfht_add(struct cds_lfht *ht,
+                               unsigned long size,
+                               struct cds_lfht_node *node,
+                               enum add_mode mode, int dummy)
 {
        struct cds_lfht_node *iter_prev, *iter, *next, *new_node, *new_next,
-                       *dummy_node;
+                       *dummy_node, *return_node;
        struct _cds_lfht_node *lookup;
        unsigned long hash, index, order;
 
-       if (!t->size) {
+       assert(!is_dummy(node));
+       assert(!is_removed(node));
+       if (!size) {
                assert(dummy);
-               node->p.next = flag_dummy(NULL);
+               node->p.next = flag_dummy(get_end());
                return node;    /* Initial first add (head) */
        }
        hash = bit_reverse_ulong(node->p.reverse_hash);
@@ -619,37 +788,43 @@ struct cds_lfht_node *_cds_lfht_add(struct cds_lfht *ht, struct rcu_table *t,
                 * iter_prev points to the non-removed node prior to the
                 * insert location.
                 */
-               index = hash & (t->size - 1);
+               index = hash & (size - 1);
                order = get_count_order_ulong(index + 1);
-               lookup = &t->tbl[order][index & ((1UL << (order - 1)) - 1)];
+               lookup = &ht->t.tbl[order]->nodes[index & ((!order ? 0 : (1UL << (order - 1))) - 1)];
                iter_prev = (struct cds_lfht_node *) lookup;
                /* We can always skip the dummy node initially */
                iter = rcu_dereference(iter_prev->p.next);
                assert(iter_prev->p.reverse_hash <= node->p.reverse_hash);
                for (;;) {
-                       if (unlikely(!clear_flag(iter)))
+                       if (unlikely(is_end(iter)))
                                goto insert;
                        if (likely(clear_flag(iter)->p.reverse_hash > node->p.reverse_hash))
                                goto insert;
                        next = rcu_dereference(clear_flag(iter)->p.next);
                        if (unlikely(is_removed(next)))
                                goto gc_node;
-                       if (unique
+                       if ((mode == ADD_UNIQUE || mode == ADD_REPLACE)
                            && !is_dummy(next)
                            && !ht->compare_fct(node->key, node->key_len,
                                                clear_flag(iter)->key,
-                                               clear_flag(iter)->key_len))
-                               return clear_flag(iter);
+                                               clear_flag(iter)->key_len)) {
+                               if (mode == ADD_UNIQUE)
+                                       return clear_flag(iter);
+                               else /* mode == ADD_REPLACE */
+                                       goto replace;
+                       }
                        /* Only account for identical reverse hash once */
                        if (iter_prev->p.reverse_hash != clear_flag(iter)->p.reverse_hash
                            && !is_dummy(next))
-                               check_resize(ht, t, ++chain_len);
+                               check_resize(ht, size, ++chain_len);
                        iter_prev = clear_flag(iter);
                        iter = next;
                }
+
        insert:
                assert(node != clear_flag(iter));
                assert(!is_removed(iter_prev));
+               assert(!is_removed(iter));
                assert(iter_prev != node);
                if (!dummy)
                        node->p.next = clear_flag(iter);
@@ -660,10 +835,49 @@ struct cds_lfht_node *_cds_lfht_add(struct cds_lfht *ht, struct rcu_table *t,
                else
                        new_node = node;
                if (uatomic_cmpxchg(&iter_prev->p.next, iter,
-                                   new_node) != iter)
+                                   new_node) != iter) {
                        continue;       /* retry */
+               } else {
+                       if (mode == ADD_REPLACE)
+                               return_node = NULL;
+                       else    /* ADD_DEFAULT and ADD_UNIQUE */
+                               return_node = node;
+                       goto gc_end;
+               }
+
+       replace:
+               /* Insert after node to be replaced */
+               iter_prev = clear_flag(iter);
+               iter = next;
+               assert(node != clear_flag(iter));
+               assert(!is_removed(iter_prev));
+               assert(!is_removed(iter));
+               assert(iter_prev != node);
+               assert(!dummy);
+               node->p.next = clear_flag(iter);
+               if (is_dummy(iter))
+                       new_node = flag_dummy(node);
                else
+                       new_node = node;
+               /*
+                * Here is the whole trick for lock-free replace: we add
+                * the replacement node _after_ the node we want to
+                * replace by atomically setting its next pointer at the
+                * same time we set its removal flag. Given that
+                * the lookups/get next use an iterator aware of the
+                * next pointer, they will either skip the old node due
+                * to the removal flag and see the new node, or use
+                * the old node, but will not see the new one.
+                */
+               new_node = flag_removed(new_node);
+               if (uatomic_cmpxchg(&iter_prev->p.next,
+                             iter, new_node) != iter) {
+                       continue;       /* retry */
+               } else {
+                       return_node = iter_prev;
                        goto gc_end;
+               }
+
        gc_node:
                assert(!is_removed(iter));
                if (is_dummy(iter))
@@ -675,17 +889,18 @@ struct cds_lfht_node *_cds_lfht_add(struct cds_lfht *ht, struct rcu_table *t,
        }
 gc_end:
        /* Garbage collect logically removed nodes in the bucket */
-       index = hash & (t->size - 1);
+       index = hash & (size - 1);
        order = get_count_order_ulong(index + 1);
-       lookup = &t->tbl[order][index & ((1UL << (order - 1)) - 1)];
+       lookup = &ht->t.tbl[order]->nodes[index & (!order ? 0 : ((1UL << (order - 1)) - 1))];
        dummy_node = (struct cds_lfht_node *) lookup;
        _cds_lfht_gc_bucket(dummy_node, node);
-       return node;
+       return return_node;
 }
 
 static
-int _cds_lfht_remove(struct cds_lfht *ht, struct rcu_table *t,
-               struct cds_lfht_node *node)
+int _cds_lfht_del(struct cds_lfht *ht, unsigned long size,
+               struct cds_lfht_node *node,
+               int dummy_removal)
 {
        struct cds_lfht_node *dummy, *next, *old;
        struct _cds_lfht_node *lookup;
@@ -693,14 +908,21 @@ int _cds_lfht_remove(struct cds_lfht *ht, struct rcu_table *t,
        unsigned long hash, index, order;
 
        /* logically delete the node */
+       assert(!is_dummy(node));
+       assert(!is_removed(node));
        old = rcu_dereference(node->p.next);
        do {
+               struct cds_lfht_node *new_next;
+
                next = old;
                if (unlikely(is_removed(next)))
                        goto end;
-               assert(!is_dummy(next));
-               old = uatomic_cmpxchg(&node->p.next, next,
-                                     flag_removed(next));
+               if (dummy_removal)
+                       assert(is_dummy(next));
+               else
+                       assert(!is_dummy(next));
+               new_next = flag_removed(next);
+               old = uatomic_cmpxchg(&node->p.next, next, new_next);
        } while (old != next);
 
        /* We performed the (logical) deletion. */
@@ -712,9 +934,10 @@ int _cds_lfht_remove(struct cds_lfht *ht, struct rcu_table *t,
         * if found.
         */
        hash = bit_reverse_ulong(node->p.reverse_hash);
-       index = hash & (t->size - 1);
+       assert(size > 0);
+       index = hash & (size - 1);
        order = get_count_order_ulong(index + 1);
-       lookup = &t->tbl[order][index & ((1UL << (order - 1)) - 1)];
+       lookup = &ht->t.tbl[order]->nodes[index & (!order ? 0 : ((1UL << (order - 1)) - 1))];
        dummy = (struct cds_lfht_node *) lookup;
        _cds_lfht_gc_bucket(dummy, node);
 end:
@@ -730,7 +953,107 @@ end:
 }
 
 static
-void init_table(struct cds_lfht *ht, struct rcu_table *t,
+void *partition_resize_thread(void *arg)
+{
+       struct partition_resize_work *work = arg;
+
+       work->ht->cds_lfht_rcu_register_thread();
+       work->fct(work->ht, work->i, work->start, work->len);
+       work->ht->cds_lfht_rcu_unregister_thread();
+       return NULL;
+}
+
+static
+void partition_resize_helper(struct cds_lfht *ht, unsigned long i,
+               unsigned long len,
+               void (*fct)(struct cds_lfht *ht, unsigned long i,
+                       unsigned long start, unsigned long len))
+{
+       unsigned long partition_len;
+       struct partition_resize_work *work;
+       int thread, ret;
+       unsigned long nr_threads;
+       pthread_t *thread_id;
+
+       /*
+        * Note: nr_cpus_mask + 1 is always power of 2.
+        * We spawn just the number of threads we need to satisfy the minimum
+        * partition size, up to the number of CPUs in the system.
+        */
+       nr_threads = min(nr_cpus_mask + 1,
+                        len >> MIN_PARTITION_PER_THREAD_ORDER);
+       partition_len = len >> get_count_order_ulong(nr_threads);
+       work = calloc(nr_threads, sizeof(*work));
+       thread_id = calloc(nr_threads, sizeof(*thread_id));
+       assert(work);
+       for (thread = 0; thread < nr_threads; thread++) {
+               work[thread].ht = ht;
+               work[thread].i = i;
+               work[thread].len = partition_len;
+               work[thread].start = thread * partition_len;
+               work[thread].fct = fct;
+               ret = pthread_create(&thread_id[thread], ht->resize_attr,
+                       partition_resize_thread, &work[thread]);
+               assert(!ret);
+       }
+       for (thread = 0; thread < nr_threads; thread++) {
+               ret = pthread_join(thread_id[thread], NULL);
+               assert(!ret);
+       }
+       free(work);
+       free(thread_id);
+}
+
+/*
+ * Holding RCU read lock to protect _cds_lfht_add against memory
+ * reclaim that could be performed by other call_rcu worker threads (ABA
+ * problem).
+ *
+ * When we reach a certain length, we can split this population phase over
+ * many worker threads, based on the number of CPUs available in the system.
+ * This should therefore take care of not having the expand lagging behind too
+ * many concurrent insertion threads by using the scheduler's ability to
+ * schedule dummy node population fairly with insertions.
+ */
+static
+void init_table_populate_partition(struct cds_lfht *ht, unsigned long i,
+                                  unsigned long start, unsigned long len)
+{
+       unsigned long j;
+
+       ht->cds_lfht_rcu_read_lock();
+       for (j = start; j < start + len; j++) {
+               struct cds_lfht_node *new_node =
+                       (struct cds_lfht_node *) &ht->t.tbl[i]->nodes[j];
+
+               dbg_printf("init populate: i %lu j %lu hash %lu\n",
+                          i, j, !i ? 0 : (1UL << (i - 1)) + j);
+               new_node->p.reverse_hash =
+                       bit_reverse_ulong(!i ? 0 : (1UL << (i - 1)) + j);
+               (void) _cds_lfht_add(ht, !i ? 0 : (1UL << (i - 1)),
+                               new_node, ADD_DEFAULT, 1);
+               if (CMM_LOAD_SHARED(ht->in_progress_destroy))
+                       break;
+       }
+       ht->cds_lfht_rcu_read_unlock();
+}
+
+static
+void init_table_populate(struct cds_lfht *ht, unsigned long i,
+                        unsigned long len)
+{
+       assert(nr_cpus_mask != -1);
+       if (nr_cpus_mask < 0 || len < 2 * MIN_PARTITION_PER_THREAD) {
+               ht->cds_lfht_rcu_thread_online();
+               init_table_populate_partition(ht, i, 0, len);
+               ht->cds_lfht_rcu_thread_offline();
+               return;
+       }
+       partition_resize_helper(ht, i, len, init_table_populate_partition);
+}
+
+static
+void init_table(struct cds_lfht *ht,
                unsigned long first_order, unsigned long len_order)
 {
        unsigned long i, end_order;
@@ -738,40 +1061,162 @@ void init_table(struct cds_lfht *ht, struct rcu_table *t,
        dbg_printf("init table: first_order %lu end_order %lu\n",
                   first_order, first_order + len_order);
        end_order = first_order + len_order;
-       t->size = !first_order ? 0 : (1UL << (first_order - 1));
        for (i = first_order; i < end_order; i++) {
-               unsigned long j, len;
+               unsigned long len;
 
                len = !i ? 1 : 1UL << (i - 1);
                dbg_printf("init order %lu len: %lu\n", i, len);
-               t->tbl[i] = calloc(len, sizeof(struct _cds_lfht_node));
-               for (j = 0; j < len; j++) {
-                       dbg_printf("init entry: i %lu j %lu hash %lu\n",
-                                  i, j, !i ? 0 : (1UL << (i - 1)) + j);
-                       struct cds_lfht_node *new_node =
-                               (struct cds_lfht_node *) &t->tbl[i][j];
-                       new_node->p.reverse_hash =
-                               bit_reverse_ulong(!i ? 0 : (1UL << (i - 1)) + j);
-                       (void) _cds_lfht_add(ht, t, new_node, 0, 1);
-                       if (CMM_LOAD_SHARED(ht->in_progress_destroy))
-                               break;
-               }
-               /* Update table size */
-               t->size = !i ? 1 : (1UL << i);
-               dbg_printf("init new size: %lu\n", t->size);
+
+               /* Stop expand if the resize target changes under us */
+               if (CMM_LOAD_SHARED(ht->t.resize_target) < (!i ? 1 : (1UL << i)))
+                       break;
+
+               ht->t.tbl[i] = calloc(1, sizeof(struct rcu_level)
+                               + (len * sizeof(struct _cds_lfht_node)));
+               assert(ht->t.tbl[i]);
+
+               /*
+                * Set all dummy nodes reverse hash values for a level and
+                * link all dummy nodes into the table.
+                */
+               init_table_populate(ht, i, len);
+
+               /*
+                * Update table size.
+                */
+               cmm_smp_wmb();  /* populate data before RCU size */
+               CMM_STORE_SHARED(ht->t.size, !i ? 1 : (1UL << i));
+
+               dbg_printf("init new size: %lu\n", !i ? 1 : (1UL << i));
+               if (CMM_LOAD_SHARED(ht->in_progress_destroy))
+                       break;
+       }
+}
+
+/*
+ * Holding RCU read lock to protect _cds_lfht_remove against memory
+ * reclaim that could be performed by other call_rcu worker threads (ABA
+ * problem).
+ * For a single level, we logically remove and garbage collect each node.
+ *
+ * As a design choice, we perform logical removal and garbage collection on a
+ * node-per-node basis to simplify this algorithm. We also assume keeping good
+ * cache locality of the operation would overweight possible performance gain
+ * that could be achieved by batching garbage collection for multiple levels.
+ * However, this would have to be justified by benchmarks.
+ *
+ * Concurrent removal and add operations are helping us perform garbage
+ * collection of logically removed nodes. We guarantee that all logically
+ * removed nodes have been garbage-collected (unlinked) before call_rcu is
+ * invoked to free a hole level of dummy nodes (after a grace period).
+ *
+ * Logical removal and garbage collection can therefore be done in batch or on a
+ * node-per-node basis, as long as the guarantee above holds.
+ *
+ * When we reach a certain length, we can split this removal over many worker
+ * threads, based on the number of CPUs available in the system. This should
+ * take care of not letting resize process lag behind too many concurrent
+ * updater threads actively inserting into the hash table.
+ */
+static
+void remove_table_partition(struct cds_lfht *ht, unsigned long i,
+                           unsigned long start, unsigned long len)
+{
+       unsigned long j;
+
+       ht->cds_lfht_rcu_read_lock();
+       for (j = start; j < start + len; j++) {
+               struct cds_lfht_node *fini_node =
+                       (struct cds_lfht_node *) &ht->t.tbl[i]->nodes[j];
+
+               dbg_printf("remove entry: i %lu j %lu hash %lu\n",
+                          i, j, !i ? 0 : (1UL << (i - 1)) + j);
+               fini_node->p.reverse_hash =
+                       bit_reverse_ulong(!i ? 0 : (1UL << (i - 1)) + j);
+               (void) _cds_lfht_del(ht, !i ? 0 : (1UL << (i - 1)),
+                               fini_node, 1);
                if (CMM_LOAD_SHARED(ht->in_progress_destroy))
                        break;
        }
-       t->resize_target = t->size;
-       t->resize_initiated = 0;
+       ht->cds_lfht_rcu_read_unlock();
 }
 
-struct cds_lfht *cds_lfht_new(cds_lfht_hash_fct hash_fct,
+static
+void remove_table(struct cds_lfht *ht, unsigned long i, unsigned long len)
+{
+
+       assert(nr_cpus_mask != -1);
+       if (nr_cpus_mask < 0 || len < 2 * MIN_PARTITION_PER_THREAD) {
+               ht->cds_lfht_rcu_thread_online();
+               remove_table_partition(ht, i, 0, len);
+               ht->cds_lfht_rcu_thread_offline();
+               return;
+       }
+       partition_resize_helper(ht, i, len, remove_table_partition);
+}
+
+static
+void fini_table(struct cds_lfht *ht,
+               unsigned long first_order, unsigned long len_order)
+{
+       long i, end_order;
+
+       dbg_printf("fini table: first_order %lu end_order %lu\n",
+                  first_order, first_order + len_order);
+       end_order = first_order + len_order;
+       assert(first_order > 0);
+       for (i = end_order - 1; i >= first_order; i--) {
+               unsigned long len;
+
+               len = !i ? 1 : 1UL << (i - 1);
+               dbg_printf("fini order %lu len: %lu\n", i, len);
+
+               /* Stop shrink if the resize target changes under us */
+               if (CMM_LOAD_SHARED(ht->t.resize_target) > (1UL << (i - 1)))
+                       break;
+
+               cmm_smp_wmb();  /* populate data before RCU size */
+               CMM_STORE_SHARED(ht->t.size, 1UL << (i - 1));
+
+               /*
+                * We need to wait for all add operations to reach Q.S. (and
+                * thus use the new table for lookups) before we can start
+                * releasing the old dummy nodes. Otherwise their lookup will
+                * return a logically removed node as insert position.
+                */
+               ht->cds_lfht_synchronize_rcu();
+
+               /*
+                * Set "removed" flag in dummy nodes about to be removed.
+                * Unlink all now-logically-removed dummy node pointers.
+                * Concurrent add/remove operation are helping us doing
+                * the gc.
+                */
+               remove_table(ht, i, len);
+
+               ht->cds_lfht_call_rcu(&ht->t.tbl[i]->head, cds_lfht_free_level);
+
+               dbg_printf("fini new size: %lu\n", 1UL << i);
+               if (CMM_LOAD_SHARED(ht->in_progress_destroy))
+                       break;
+       }
+}
+
+struct cds_lfht *_cds_lfht_new(cds_lfht_hash_fct hash_fct,
                        cds_lfht_compare_fct compare_fct,
                        unsigned long hash_seed,
                        unsigned long init_size,
+                       int flags,
                        void (*cds_lfht_call_rcu)(struct rcu_head *head,
-                                       void (*func)(struct rcu_head *head)))
+                                       void (*func)(struct rcu_head *head)),
+                       void (*cds_lfht_synchronize_rcu)(void),
+                       void (*cds_lfht_rcu_read_lock)(void),
+                       void (*cds_lfht_rcu_read_unlock)(void),
+                       void (*cds_lfht_rcu_thread_offline)(void),
+                       void (*cds_lfht_rcu_thread_online)(void),
+                       void (*cds_lfht_rcu_register_thread)(void),
+                       void (*cds_lfht_rcu_unregister_thread)(void),
+                       pthread_attr_t *attr)
 {
        struct cds_lfht *ht;
        unsigned long order;
@@ -780,44 +1225,58 @@ struct cds_lfht *cds_lfht_new(cds_lfht_hash_fct hash_fct,
        if (init_size && (init_size & (init_size - 1)))
                return NULL;
        ht = calloc(1, sizeof(struct cds_lfht));
+       assert(ht);
        ht->hash_fct = hash_fct;
        ht->compare_fct = compare_fct;
        ht->hash_seed = hash_seed;
        ht->cds_lfht_call_rcu = cds_lfht_call_rcu;
-       ht->in_progress_resize = 0;
+       ht->cds_lfht_synchronize_rcu = cds_lfht_synchronize_rcu;
+       ht->cds_lfht_rcu_read_lock = cds_lfht_rcu_read_lock;
+       ht->cds_lfht_rcu_read_unlock = cds_lfht_rcu_read_unlock;
+       ht->cds_lfht_rcu_thread_offline = cds_lfht_rcu_thread_offline;
+       ht->cds_lfht_rcu_thread_online = cds_lfht_rcu_thread_online;
+       ht->cds_lfht_rcu_register_thread = cds_lfht_rcu_register_thread;
+       ht->cds_lfht_rcu_unregister_thread = cds_lfht_rcu_unregister_thread;
+       ht->resize_attr = attr;
        ht->percpu_count = alloc_per_cpu_items_count();
        /* this mutex should not nest in read-side C.S. */
        pthread_mutex_init(&ht->resize_mutex, NULL);
-       order = get_count_order_ulong(max(init_size, 1)) + 1;
-       ht->t = calloc(1, sizeof(struct cds_lfht)
-                      + (order * sizeof(struct _cds_lfht_node *)));
-       ht->t->size = 0;
+       order = get_count_order_ulong(max(init_size, MIN_TABLE_SIZE)) + 1;
+       ht->flags = flags;
+       ht->cds_lfht_rcu_thread_offline();
        pthread_mutex_lock(&ht->resize_mutex);
-       init_table(ht, ht->t, 0, order);
+       ht->t.resize_target = 1UL << (order - 1);
+       init_table(ht, 0, order);
        pthread_mutex_unlock(&ht->resize_mutex);
+       ht->cds_lfht_rcu_thread_online();
        return ht;
 }
 
-struct cds_lfht_node *cds_lfht_lookup(struct cds_lfht *ht, void *key, size_t key_len)
+void cds_lfht_lookup(struct cds_lfht *ht, void *key, size_t key_len,
+               struct cds_lfht_iter *iter)
 {
-       struct rcu_table *t;
-       struct cds_lfht_node *node, *next;
+       struct cds_lfht_node *node, *next, *dummy_node;
        struct _cds_lfht_node *lookup;
-       unsigned long hash, reverse_hash, index, order;
+       unsigned long hash, reverse_hash, index, order, size;
 
        hash = ht->hash_fct(key, key_len, ht->hash_seed);
        reverse_hash = bit_reverse_ulong(hash);
 
-       t = rcu_dereference(ht->t);
-       index = hash & (t->size - 1);
+       size = rcu_dereference(ht->t.size);
+       index = hash & (size - 1);
        order = get_count_order_ulong(index + 1);
-       lookup = &t->tbl[order][index & ((1UL << (order - 1)) - 1)];
+       lookup = &ht->t.tbl[order]->nodes[index & (!order ? 0 : ((1UL << (order - 1))) - 1)];
        dbg_printf("lookup hash %lu index %lu order %lu aridx %lu\n",
-                  hash, index, order, index & ((1UL << (order - 1)) - 1));
-       node = (struct cds_lfht_node *) lookup;
+                  hash, index, order, index & (!order ? 0 : ((1UL << (order - 1)) - 1)));
+       dummy_node = (struct cds_lfht_node *) lookup;
+       /* We can always skip the dummy node initially */
+       node = rcu_dereference(dummy_node->p.next);
+       node = clear_flag(node);
        for (;;) {
-               if (unlikely(!node))
+               if (unlikely(is_end(node))) {
+                       node = NULL;
                        break;
+               }
                if (unlikely(node->p.reverse_hash > reverse_hash)) {
                        node = NULL;
                        break;
@@ -831,26 +1290,29 @@ struct cds_lfht_node *cds_lfht_lookup(struct cds_lfht *ht, void *key, size_t key
                node = clear_flag(next);
        }
        assert(!node || !is_dummy(rcu_dereference(node->p.next)));
-       return node;
+       iter->node = node;
+       iter->next = next;
 }
 
-struct cds_lfht_node *cds_lfht_next(struct cds_lfht *ht,
-                               struct cds_lfht_node *node)
+void cds_lfht_next(struct cds_lfht *ht, struct cds_lfht_iter *iter)
 {
-       struct cds_lfht_node *next;
+       struct cds_lfht_node *node, *next;
        unsigned long reverse_hash;
        void *key;
        size_t key_len;
 
+       node = iter->node;
        reverse_hash = node->p.reverse_hash;
        key = node->key;
        key_len = node->key_len;
-       next = rcu_dereference(node->p.next);
+       next = iter->next;
        node = clear_flag(next);
 
        for (;;) {
-               if (unlikely(!node))
+               if (unlikely(is_end(node))) {
+                       node = NULL;
                        break;
+               }
                if (unlikely(node->p.reverse_hash > reverse_hash)) {
                        node = NULL;
                        break;
@@ -864,81 +1326,99 @@ struct cds_lfht_node *cds_lfht_next(struct cds_lfht *ht,
                node = clear_flag(next);
        }
        assert(!node || !is_dummy(rcu_dereference(node->p.next)));
-       return node;
+       iter->node = node;
+       iter->next = next;
 }
 
 void cds_lfht_add(struct cds_lfht *ht, struct cds_lfht_node *node)
 {
-       struct rcu_table *t;
-       unsigned long hash;
+       unsigned long hash, size;
 
        hash = ht->hash_fct(node->key, node->key_len, ht->hash_seed);
        node->p.reverse_hash = bit_reverse_ulong((unsigned long) hash);
 
-       t = rcu_dereference(ht->t);
-       (void) _cds_lfht_add(ht, t, node, 0, 0);
-       ht_count_add(ht, t);
+       size = rcu_dereference(ht->t.size);
+       (void) _cds_lfht_add(ht, size, node, ADD_DEFAULT, 0);
+       ht_count_add(ht, size);
 }
 
 struct cds_lfht_node *cds_lfht_add_unique(struct cds_lfht *ht,
-                                       struct cds_lfht_node *node)
+                               struct cds_lfht_node *node)
 {
-       struct rcu_table *t;
-       unsigned long hash;
+       unsigned long hash, size;
        struct cds_lfht_node *ret;
 
        hash = ht->hash_fct(node->key, node->key_len, ht->hash_seed);
        node->p.reverse_hash = bit_reverse_ulong((unsigned long) hash);
 
-       t = rcu_dereference(ht->t);
-       ret = _cds_lfht_add(ht, t, node, 1, 0);
-       if (ret != node)
-               ht_count_add(ht, t);
+       size = rcu_dereference(ht->t.size);
+       ret = _cds_lfht_add(ht, size, node, ADD_UNIQUE, 0);
+       if (ret == node)
+               ht_count_add(ht, size);
        return ret;
 }
 
-int cds_lfht_remove(struct cds_lfht *ht, struct cds_lfht_node *node)
+struct cds_lfht_node *cds_lfht_replace(struct cds_lfht *ht,
+                               struct cds_lfht_node *node)
 {
-       struct rcu_table *t;
+       unsigned long hash, size;
+       struct cds_lfht_node *ret;
+
+       hash = ht->hash_fct(node->key, node->key_len, ht->hash_seed);
+       node->p.reverse_hash = bit_reverse_ulong((unsigned long) hash);
+
+       size = rcu_dereference(ht->t.size);
+       ret = _cds_lfht_add(ht, size, node, ADD_REPLACE, 0);
+       if (ret == NULL)
+               ht_count_add(ht, size);
+       return ret;
+}
+
+int cds_lfht_del(struct cds_lfht *ht, struct cds_lfht_node *node)
+{
+       unsigned long size;
        int ret;
 
-       t = rcu_dereference(ht->t);
-       ret = _cds_lfht_remove(ht, t, node);
+       size = rcu_dereference(ht->t.size);
+       ret = _cds_lfht_del(ht, size, node, 0);
        if (!ret)
-               ht_count_remove(ht, t);
+               ht_count_del(ht, size);
        return ret;
 }
 
 static
 int cds_lfht_delete_dummy(struct cds_lfht *ht)
 {
-       struct rcu_table *t;
        struct cds_lfht_node *node;
        struct _cds_lfht_node *lookup;
-       unsigned long order, i;
+       unsigned long order, i, size;
 
-       t = ht->t;
        /* Check that the table is empty */
-       lookup = &t->tbl[0][0];
+       lookup = &ht->t.tbl[0]->nodes[0];
        node = (struct cds_lfht_node *) lookup;
        do {
                node = clear_flag(node)->p.next;
                if (!is_dummy(node))
                        return -EPERM;
                assert(!is_removed(node));
-       } while (clear_flag(node));
+       } while (!is_end(node));
+       /*
+        * size accessed without rcu_dereference because hash table is
+        * being destroyed.
+        */
+       size = ht->t.size;
        /* Internal sanity check: all nodes left should be dummy */
-       for (order = 0; order < get_count_order_ulong(t->size) + 1; order++) {
+       for (order = 0; order < get_count_order_ulong(size) + 1; order++) {
                unsigned long len;
 
                len = !order ? 1 : 1UL << (order - 1);
                for (i = 0; i < len; i++) {
                        dbg_printf("delete order %lu i %lu hash %lu\n",
                                order, i,
-                               bit_reverse_ulong(t->tbl[order][i].reverse_hash));
-                       assert(is_dummy(t->tbl[order][i].next));
+                               bit_reverse_ulong(ht->t.tbl[order]->nodes[i].reverse_hash));
+                       assert(is_dummy(ht->t.tbl[order]->nodes[i].next));
                }
-               free(t->tbl[order]);
+               poison_free(ht->t.tbl[order]);
        }
        return 0;
 }
@@ -947,7 +1427,7 @@ int cds_lfht_delete_dummy(struct cds_lfht *ht)
  * Should only be called when no more concurrent readers nor writers can
  * possibly access the table.
  */
-int cds_lfht_destroy(struct cds_lfht *ht)
+int cds_lfht_destroy(struct cds_lfht *ht, pthread_attr_t **attr)
 {
        int ret;
 
@@ -958,9 +1438,10 @@ int cds_lfht_destroy(struct cds_lfht *ht)
        ret = cds_lfht_delete_dummy(ht);
        if (ret)
                return ret;
-       free(ht->t);
        free_per_cpu_items_count(ht->percpu_count);
-       free(ht);
+       if (attr)
+               *attr = ht->resize_attr;
+       poison_free(ht);
        return ret;
 }
 
@@ -968,7 +1449,6 @@ void cds_lfht_count_nodes(struct cds_lfht *ht,
                unsigned long *count,
                unsigned long *removed)
 {
-       struct rcu_table *t;
        struct cds_lfht_node *node, *next;
        struct _cds_lfht_node *lookup;
        unsigned long nr_dummy = 0;
@@ -976,9 +1456,8 @@ void cds_lfht_count_nodes(struct cds_lfht *ht,
        *count = 0;
        *removed = 0;
 
-       t = rcu_dereference(ht->t);
        /* Count non-dummy nodes in the table */
-       lookup = &t->tbl[0][0];
+       lookup = &ht->t.tbl[0]->nodes[0];
        node = (struct cds_lfht_node *) lookup;
        do {
                next = rcu_dereference(node->p.next);
@@ -990,81 +1469,92 @@ void cds_lfht_count_nodes(struct cds_lfht *ht,
                else
                        (nr_dummy)++;
                node = clear_flag(next);
-       } while (node);
+       } while (!is_end(node));
        dbg_printf("number of dummy nodes: %lu\n", nr_dummy);
 }
 
+/* called with resize mutex held */
 static
-void cds_lfht_free_table_cb(struct rcu_head *head)
+void _do_cds_lfht_grow(struct cds_lfht *ht,
+               unsigned long old_size, unsigned long new_size)
 {
-       struct rcu_table *t =
-               caa_container_of(head, struct rcu_table, head);
-       free(t);
+       unsigned long old_order, new_order;
+
+       old_order = get_count_order_ulong(old_size) + 1;
+       new_order = get_count_order_ulong(new_size) + 1;
+       printf("resize from %lu (order %lu) to %lu (order %lu) buckets\n",
+              old_size, old_order, new_size, new_order);
+       assert(new_size > old_size);
+       init_table(ht, old_order, new_order - old_order);
 }
 
 /* called with resize mutex held */
 static
-void _do_cds_lfht_resize(struct cds_lfht *ht)
+void _do_cds_lfht_shrink(struct cds_lfht *ht,
+               unsigned long old_size, unsigned long new_size)
 {
-       unsigned long new_size, old_size, old_order, new_order;
-       struct rcu_table *new_t, *old_t;
+       unsigned long old_order, new_order;
 
-       old_t = ht->t;
-       old_size = old_t->size;
+       new_size = max(new_size, MIN_TABLE_SIZE);
        old_order = get_count_order_ulong(old_size) + 1;
-
-       new_size = CMM_LOAD_SHARED(old_t->resize_target);
-       if (old_size == new_size)
-               return;
        new_order = get_count_order_ulong(new_size) + 1;
        printf("resize from %lu (order %lu) to %lu (order %lu) buckets\n",
               old_size, old_order, new_size, new_order);
-       new_t = malloc(sizeof(struct cds_lfht)
-                       + (new_order * sizeof(struct _cds_lfht_node *)));
-       assert(new_size > old_size);
-       memcpy(&new_t->tbl, &old_t->tbl,
-              old_order * sizeof(struct _cds_lfht_node *));
-       init_table(ht, new_t, old_order, new_order - old_order);
-       /* Changing table and size atomically wrt lookups */
-       rcu_assign_pointer(ht->t, new_t);
-       ht->cds_lfht_call_rcu(&old_t->head, cds_lfht_free_table_cb);
+       assert(new_size < old_size);
+
+       /* Remove and unlink all dummy nodes to remove. */
+       fini_table(ht, new_order, old_order - new_order);
+}
+
+
+/* called with resize mutex held */
+static
+void _do_cds_lfht_resize(struct cds_lfht *ht)
+{
+       unsigned long new_size, old_size;
+
+       /*
+        * Resize table, re-do if the target size has changed under us.
+        */
+       do {
+               ht->t.resize_initiated = 1;
+               old_size = ht->t.size;
+               new_size = CMM_LOAD_SHARED(ht->t.resize_target);
+               if (old_size < new_size)
+                       _do_cds_lfht_grow(ht, old_size, new_size);
+               else if (old_size > new_size)
+                       _do_cds_lfht_shrink(ht, old_size, new_size);
+               ht->t.resize_initiated = 0;
+               /* write resize_initiated before read resize_target */
+               cmm_smp_mb();
+       } while (ht->t.size != CMM_LOAD_SHARED(ht->t.resize_target));
 }
 
 static
-unsigned long resize_target_update(struct rcu_table *t,
+unsigned long resize_target_update(struct cds_lfht *ht, unsigned long size,
                                   int growth_order)
 {
-       return _uatomic_max(&t->resize_target,
-                           t->size << growth_order);
+       return _uatomic_max(&ht->t.resize_target,
+                           size << growth_order);
 }
 
 static
-unsigned long resize_target_update_count(struct rcu_table *t,
-                                  unsigned long count)
+void resize_target_update_count(struct cds_lfht *ht,
+                               unsigned long count)
 {
-       return _uatomic_max(&t->resize_target, count);
+       count = max(count, MIN_TABLE_SIZE);
+       uatomic_set(&ht->t.resize_target, count);
 }
 
-void cds_lfht_resize(struct cds_lfht *ht, int growth)
+void cds_lfht_resize(struct cds_lfht *ht, unsigned long new_size)
 {
-       struct rcu_table *t = rcu_dereference(ht->t);
-       unsigned long target_size;
-
-       if (growth < 0) {
-               /*
-                * Silently refuse to shrink hash table. (not supported)
-                */
-               dbg_printf("shrinking hash table not supported.\n");
-               return;
-       }
-
-       target_size = resize_target_update(t, growth);
-       if (t->size < target_size) {
-               CMM_STORE_SHARED(t->resize_initiated, 1);
-               pthread_mutex_lock(&ht->resize_mutex);
-               _do_cds_lfht_resize(ht);
-               pthread_mutex_unlock(&ht->resize_mutex);
-       }
+       resize_target_update_count(ht, new_size);
+       CMM_STORE_SHARED(ht->t.resize_initiated, 1);
+       ht->cds_lfht_rcu_thread_offline();
+       pthread_mutex_lock(&ht->resize_mutex);
+       _do_cds_lfht_resize(ht);
+       pthread_mutex_unlock(&ht->resize_mutex);
+       ht->cds_lfht_rcu_thread_online();
 }
 
 static
@@ -1074,45 +1564,56 @@ void do_resize_cb(struct rcu_head *head)
                caa_container_of(head, struct rcu_resize_work, head);
        struct cds_lfht *ht = work->ht;
 
+       ht->cds_lfht_rcu_thread_offline();
        pthread_mutex_lock(&ht->resize_mutex);
        _do_cds_lfht_resize(ht);
        pthread_mutex_unlock(&ht->resize_mutex);
-       free(work);
+       ht->cds_lfht_rcu_thread_online();
+       poison_free(work);
        cmm_smp_mb();   /* finish resize before decrement */
        uatomic_dec(&ht->in_progress_resize);
 }
 
 static
-void cds_lfht_resize_lazy(struct cds_lfht *ht, struct rcu_table *t, int growth)
+void cds_lfht_resize_lazy(struct cds_lfht *ht, unsigned long size, int growth)
 {
        struct rcu_resize_work *work;
        unsigned long target_size;
 
-       target_size = resize_target_update(t, growth);
-       if (!CMM_LOAD_SHARED(t->resize_initiated) && t->size < target_size) {
+       target_size = resize_target_update(ht, size, growth);
+       /* Store resize_target before read resize_initiated */
+       cmm_smp_mb();
+       if (!CMM_LOAD_SHARED(ht->t.resize_initiated) && size < target_size) {
                uatomic_inc(&ht->in_progress_resize);
                cmm_smp_mb();   /* increment resize count before calling it */
                work = malloc(sizeof(*work));
                work->ht = ht;
                ht->cds_lfht_call_rcu(&work->head, do_resize_cb);
-               CMM_STORE_SHARED(t->resize_initiated, 1);
+               CMM_STORE_SHARED(ht->t.resize_initiated, 1);
        }
 }
 
+#if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF)
+
 static
-void cds_lfht_resize_lazy_count(struct cds_lfht *ht, struct rcu_table *t,
+void cds_lfht_resize_lazy_count(struct cds_lfht *ht, unsigned long size,
                                unsigned long count)
 {
        struct rcu_resize_work *work;
-       unsigned long target_size;
 
-       target_size = resize_target_update_count(t, count);
-       if (!CMM_LOAD_SHARED(t->resize_initiated) && t->size < target_size) {
+       if (!(ht->flags & CDS_LFHT_AUTO_RESIZE))
+               return;
+       resize_target_update_count(ht, count);
+       /* Store resize_target before read resize_initiated */
+       cmm_smp_mb();
+       if (!CMM_LOAD_SHARED(ht->t.resize_initiated)) {
                uatomic_inc(&ht->in_progress_resize);
                cmm_smp_mb();   /* increment resize count before calling it */
                work = malloc(sizeof(*work));
                work->ht = ht;
                ht->cds_lfht_call_rcu(&work->head, do_resize_cb);
-               CMM_STORE_SHARED(t->resize_initiated, 1);
+               CMM_STORE_SHARED(ht->t.resize_initiated, 1);
        }
 }
+
+#endif
This page took 0.038938 seconds and 4 git commands to generate.