rculfhash: add list iteration tracer in gc
[urcu.git] / rculfhash.c
index 1720afb61669e971968fc2cfcd994db3e6b7b8be..6a80049ddd1f783a21e117d63437eec5771ba514 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * rculfhash.c
  *
- * Userspace RCU library - Lock-Free Expandable RCU Hash Table
+ * Userspace RCU library - Lock-Free Resizable RCU Hash Table
  *
  * Copyright 2010-2011 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
  *
@@ -29,7 +29,7 @@
  *   symposium on Parallel algorithms and architectures, ACM Press,
  *   (2002), 73-82.
  *
- * Some specificities of this Lock-Free Expandable RCU Hash Table
+ * Some specificities of this Lock-Free Resizable RCU Hash Table
  * implementation:
  *
  * - RCU read-side critical section allows readers to perform hash
  * - An index of dummy nodes is kept. These dummy nodes are the hash
  *   table "buckets", and they are also chained together in the
  *   split-ordered list, which allows recursive expansion.
- * - The resize operation only allows expanding the hash table.
- *   It is triggered either through an API call or automatically by
- *   detecting long chains in the add operation.
+ * - The resize operation for small tables only allows expanding the hash table.
+ *   It is triggered automatically by detecting long chains in the add
+ *   operation.
+ * - The resize operation for larger tables (and available through an
+ *   API) allows both expanding and shrinking the hash table.
+ * - Per-CPU Split-counters are used to keep track of the number of
+ *   nodes within the hash table for automatic resize triggering.
  * - Resize operation initiated by long chain detection is executed by a
  *   call_rcu thread, which keeps lock-freedom of add and remove.
  * - Resize operations are protected by a mutex.
  * - The per-order dummy node tables contain a compact version of the
  *   hash table nodes. These tables are invariant after they are
  *   populated into the hash table.
+ * 
+ * A bit of ascii art explanation:
+ * 
+ * Order index is the off-by-one compare to the actual power of 2 because 
+ * we use index 0 to deal with the 0 special-case.
+ * 
+ * This shows the nodes for a small table ordered by reversed bits:
+ * 
+ *    bits   reverse
+ * 0  000        000
+ * 4  100        001
+ * 2  010        010
+ * 6  110        011
+ * 1  001        100
+ * 5  101        101
+ * 3  011        110
+ * 7  111        111
+ * 
+ * This shows the nodes in order of non-reversed bits, linked by 
+ * reversed-bit order.
+ * 
+ * order              bits       reverse
+ * 0               0  000        000
+ *                 |
+ * 1               |  1  001        100       <-    <-
+ *                 |  |                        |     |
+ * 2               |  |  2  010        010     |     |
+ *                 |  |  |  3  011        110  | <-  |
+ *                 |  |  |  |                  |  |  |
+ * 3               -> |  |  |  4  100        001  |  |
+ *                    -> |  |     5  101        101  |
+ *                       -> |        6  110        011
+ *                          ->          7  111        111
  */
 
 #define _LGPL_SOURCE
 #include <stdint.h>
 #include <string.h>
 
+#include "config.h"
 #include <urcu.h>
 #include <urcu-call-rcu.h>
 #include <urcu/arch.h>
 #define dbg_printf(fmt, args...)
 #endif
 
-#define CHAIN_LEN_TARGET               4
-#define CHAIN_LEN_RESIZE_THRESHOLD     8
+/* For testing */
+#define POISON_FREE
+
+/*
+ * Per-CPU split-counters lazily update the global counter each 1024
+ * addition/removal. It automatically keeps track of resize required.
+ * We use the bucket length as indicator for need to expand for small
+ * tables and machines lacking per-cpu data suppport.
+ */
+#define COUNT_COMMIT_ORDER             10
+#define CHAIN_LEN_TARGET               1
+#define CHAIN_LEN_RESIZE_THRESHOLD     3
+
+/*
+ * Define the minimum table size. Protects against hash table resize overload
+ * when too many entries are added quickly before the resize can complete.
+ * This is especially the case if the table could be shrinked to a size of 1.
+ * TODO: we might want to make the add/remove operations help the resize to
+ * add or remove dummy nodes when a resize is ongoing to ensure upper-bound on
+ * chain length.
+ */
+#define MIN_TABLE_SIZE                 128
 
 #ifndef max
 #define max(a, b)      ((a) > (b) ? (a) : (b))
 #define DUMMY_FLAG             (1UL << 1)
 #define FLAGS_MASK             ((1UL << 2) - 1)
 
+struct ht_items_count {
+       unsigned long add, remove;
+} __attribute__((aligned(CAA_CACHE_LINE_SIZE)));
+
+struct rcu_level {
+       struct rcu_head head;
+       struct _cds_lfht_node nodes[0];
+};
+
 struct rcu_table {
        unsigned long size;     /* always a power of 2 */
        unsigned long resize_target;
        int resize_initiated;
        struct rcu_head head;
-       struct _cds_lfht_node *tbl[0];
+       struct rcu_level *tbl[0];
 };
 
 struct cds_lfht {
@@ -147,10 +214,14 @@ struct cds_lfht {
        cds_lfht_hash_fct hash_fct;
        cds_lfht_compare_fct compare_fct;
        unsigned long hash_seed;
+       int flags;
        pthread_mutex_t resize_mutex;   /* resize mutex: add/del mutex */
        unsigned int in_progress_resize, in_progress_destroy;
        void (*cds_lfht_call_rcu)(struct rcu_head *head,
                      void (*func)(struct rcu_head *head));
+       void (*cds_lfht_synchronize_rcu)(void);
+       unsigned long count;            /* global approximate item count */
+       struct ht_items_count *percpu_count;    /* per-cpu item count */
 };
 
 struct rcu_resize_work {
@@ -350,13 +421,185 @@ int get_count_order_ulong(unsigned long x)
        return order;
 }
 
+#ifdef POISON_FREE
+#define poison_free(ptr)                               \
+       do {                                            \
+               memset(ptr, 0x42, sizeof(*(ptr)));      \
+               free(ptr);                              \
+       } while (0)
+#else
+#define poison_free(ptr)       free(ptr)
+#endif
+
 static
 void cds_lfht_resize_lazy(struct cds_lfht *ht, struct rcu_table *t, int growth);
 
+/*
+ * If the sched_getcpu() and sysconf(_SC_NPROCESSORS_CONF) calls are
+ * available, then we support hash table item accounting.
+ * In the unfortunate event the number of CPUs reported would be
+ * inaccurate, we use modulo arithmetic on the number of CPUs we got.
+ */
+#if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF)
+
+static
+void cds_lfht_resize_lazy_count(struct cds_lfht *ht, struct rcu_table *t,
+                               unsigned long count);
+
+static long nr_cpus_mask = -1;
+
+static
+struct ht_items_count *alloc_per_cpu_items_count(void)
+{
+       struct ht_items_count *count;
+
+       switch (nr_cpus_mask) {
+       case -2:
+               return NULL;
+       case -1:
+       {
+               long maxcpus;
+
+               maxcpus = sysconf(_SC_NPROCESSORS_CONF);
+               if (maxcpus <= 0) {
+                       nr_cpus_mask = -2;
+                       return NULL;
+               }
+               /*
+                * round up number of CPUs to next power of two, so we
+                * can use & for modulo.
+                */
+               maxcpus = 1UL << get_count_order_ulong(maxcpus);
+               nr_cpus_mask = maxcpus - 1;
+       }
+               /* Fall-through */
+       default:
+               return calloc(nr_cpus_mask + 1, sizeof(*count));
+       }
+}
+
+static
+void free_per_cpu_items_count(struct ht_items_count *count)
+{
+       poison_free(count);
+}
+
+static
+int ht_get_cpu(void)
+{
+       int cpu;
+
+       assert(nr_cpus_mask >= 0);
+       cpu = sched_getcpu();
+       if (unlikely(cpu < 0))
+               return cpu;
+       else
+               return cpu & nr_cpus_mask;
+}
+
+static
+void ht_count_add(struct cds_lfht *ht, struct rcu_table *t)
+{
+       unsigned long percpu_count;
+       int cpu;
+
+       if (unlikely(!ht->percpu_count))
+               return;
+       cpu = ht_get_cpu();
+       if (unlikely(cpu < 0))
+               return;
+       percpu_count = uatomic_add_return(&ht->percpu_count[cpu].add, 1);
+       if (unlikely(!(percpu_count & ((1UL << COUNT_COMMIT_ORDER) - 1)))) {
+               unsigned long count;
+
+               dbg_printf("add percpu %lu\n", percpu_count);
+               count = uatomic_add_return(&ht->count,
+                                          1UL << COUNT_COMMIT_ORDER);
+               /* If power of 2 */
+               if (!(count & (count - 1))) {
+                       if ((count >> CHAIN_LEN_RESIZE_THRESHOLD)
+                                       < t->size)
+                               return;
+                       dbg_printf("add set global %lu\n", count);
+                       cds_lfht_resize_lazy_count(ht, t,
+                               count >> (CHAIN_LEN_TARGET - 1));
+               }
+       }
+}
+
+static
+void ht_count_remove(struct cds_lfht *ht, struct rcu_table *t)
+{
+       unsigned long percpu_count;
+       int cpu;
+
+       if (unlikely(!ht->percpu_count))
+               return;
+       cpu = ht_get_cpu();
+       if (unlikely(cpu < 0))
+               return;
+       percpu_count = uatomic_add_return(&ht->percpu_count[cpu].remove, -1);
+       if (unlikely(!(percpu_count & ((1UL << COUNT_COMMIT_ORDER) - 1)))) {
+               unsigned long count;
+
+               dbg_printf("remove percpu %lu\n", percpu_count);
+               count = uatomic_add_return(&ht->count,
+                                          -(1UL << COUNT_COMMIT_ORDER));
+               /* If power of 2 */
+               if (!(count & (count - 1))) {
+                       if ((count >> CHAIN_LEN_RESIZE_THRESHOLD)
+                                       >= t->size)
+                               return;
+                       dbg_printf("remove set global %lu\n", count);
+                       cds_lfht_resize_lazy_count(ht, t,
+                               count >> (CHAIN_LEN_TARGET - 1));
+               }
+       }
+}
+
+#else /* #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF) */
+
+static const long nr_cpus_mask = -1;
+
+static
+struct ht_items_count *alloc_per_cpu_items_count(void)
+{
+       return NULL;
+}
+
+static
+void free_per_cpu_items_count(struct ht_items_count *count)
+{
+}
+
+static
+void ht_count_add(struct cds_lfht *ht, struct rcu_table *t)
+{
+}
+
+static
+void ht_count_remove(struct cds_lfht *ht, struct rcu_table *t)
+{
+}
+
+#endif /* #else #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF) */
+
+
 static
 void check_resize(struct cds_lfht *ht, struct rcu_table *t,
                  uint32_t chain_len)
 {
+       unsigned long count;
+
+       if (!(ht->flags & CDS_LFHT_AUTO_RESIZE))
+               return;
+       count = uatomic_read(&ht->count);
+       /*
+        * Use bucket-local length for small table expand and for
+        * environments lacking per-cpu data support.
+        */
+       if (count >= (1UL << COUNT_COMMIT_ORDER))
+               return;
        if (chain_len > 100)
                dbg_printf("WARNING: large chain length: %u.\n",
                           chain_len);
@@ -409,6 +652,22 @@ unsigned long _uatomic_max(unsigned long *ptr, unsigned long v)
        return v;
 }
 
+static
+void cds_lfht_free_table_cb(struct rcu_head *head)
+{
+       struct rcu_table *t =
+               caa_container_of(head, struct rcu_table, head);
+       poison_free(t);
+}
+
+static
+void cds_lfht_free_level(struct rcu_head *head)
+{
+       struct rcu_level *l =
+               caa_container_of(head, struct rcu_level, head);
+       poison_free(l);
+}
+
 /*
  * Remove all logically deleted nodes from a bucket up to a certain node key.
  */
@@ -416,12 +675,28 @@ static
 void _cds_lfht_gc_bucket(struct cds_lfht_node *dummy, struct cds_lfht_node *node)
 {
        struct cds_lfht_node *iter_prev, *iter, *next, *new_next;
-
+       struct cds_lfht_node *iter_trace[64];
+       unsigned long trace_idx = 0;
+
+       memset(iter_trace, 0, sizeof(iter_trace));
+       assert(!is_dummy(dummy));
+       assert(!is_removed(dummy));
+       assert(!is_dummy(node));
+       assert(!is_removed(node));
        for (;;) {
+               iter_trace[trace_idx++ & (64 - 1)] = (void *) 0x1;
                iter_prev = dummy;
                /* We can always skip the dummy node initially */
                iter = rcu_dereference(iter_prev->p.next);
+               iter_trace[trace_idx++ & (64 - 1)] = iter;
                assert(iter_prev->p.reverse_hash <= node->p.reverse_hash);
+               /*
+                * We should never be called with dummy (start of chain)
+                * and logically removed node (end of path compression
+                * marker) being the actual same node. This would be a
+                * bug in the algorithm implementation.
+                */
+               assert(dummy != node);
                for (;;) {
                        if (unlikely(!clear_flag(iter)))
                                return;
@@ -432,6 +707,7 @@ void _cds_lfht_gc_bucket(struct cds_lfht_node *dummy, struct cds_lfht_node *node
                                break;
                        iter_prev = clear_flag(iter);
                        iter = next;
+                       iter_trace[trace_idx++ & (64 - 1)] = iter;
                }
                assert(!is_removed(iter));
                if (is_dummy(iter))
@@ -439,6 +715,7 @@ void _cds_lfht_gc_bucket(struct cds_lfht_node *dummy, struct cds_lfht_node *node
                else
                        new_next = clear_flag(next);
                (void) uatomic_cmpxchg(&iter_prev->p.next, iter, new_next);
+               iter_trace[trace_idx++ & (64 - 1)] = (void *) 0x2;
        }
 }
 
@@ -451,6 +728,8 @@ struct cds_lfht_node *_cds_lfht_add(struct cds_lfht *ht, struct rcu_table *t,
        struct _cds_lfht_node *lookup;
        unsigned long hash, index, order;
 
+       assert(!is_dummy(node));
+       assert(!is_removed(node));
        if (!t->size) {
                assert(dummy);
                node->p.next = flag_dummy(NULL);
@@ -466,14 +745,16 @@ struct cds_lfht_node *_cds_lfht_add(struct cds_lfht *ht, struct rcu_table *t,
                 */
                index = hash & (t->size - 1);
                order = get_count_order_ulong(index + 1);
-               lookup = &t->tbl[order][index & ((1UL << (order - 1)) - 1)];
+               lookup = &t->tbl[order]->nodes[index & ((!order ? 0 : (1UL << (order - 1))) - 1)];
                iter_prev = (struct cds_lfht_node *) lookup;
                /* We can always skip the dummy node initially */
                iter = rcu_dereference(iter_prev->p.next);
                assert(iter_prev->p.reverse_hash <= node->p.reverse_hash);
                for (;;) {
+                       /* TODO: check if removed */
                        if (unlikely(!clear_flag(iter)))
                                goto insert;
+                       /* TODO: check if removed */
                        if (likely(clear_flag(iter)->p.reverse_hash > node->p.reverse_hash))
                                goto insert;
                        next = rcu_dereference(clear_flag(iter)->p.next);
@@ -495,6 +776,7 @@ struct cds_lfht_node *_cds_lfht_add(struct cds_lfht *ht, struct rcu_table *t,
        insert:
                assert(node != clear_flag(iter));
                assert(!is_removed(iter_prev));
+               assert(!is_removed(iter));
                assert(iter_prev != node);
                if (!dummy)
                        node->p.next = clear_flag(iter);
@@ -522,7 +804,7 @@ gc_end:
        /* Garbage collect logically removed nodes in the bucket */
        index = hash & (t->size - 1);
        order = get_count_order_ulong(index + 1);
-       lookup = &t->tbl[order][index & ((1UL << (order - 1)) - 1)];
+       lookup = &t->tbl[order]->nodes[index & (!order ? 0 : ((1UL << (order - 1)) - 1))];
        dummy_node = (struct cds_lfht_node *) lookup;
        _cds_lfht_gc_bucket(dummy_node, node);
        return node;
@@ -530,7 +812,7 @@ gc_end:
 
 static
 int _cds_lfht_remove(struct cds_lfht *ht, struct rcu_table *t,
-               struct cds_lfht_node *node)
+               struct cds_lfht_node *node, int dummy_removal)
 {
        struct cds_lfht_node *dummy, *next, *old;
        struct _cds_lfht_node *lookup;
@@ -538,12 +820,17 @@ int _cds_lfht_remove(struct cds_lfht *ht, struct rcu_table *t,
        unsigned long hash, index, order;
 
        /* logically delete the node */
+       assert(!is_dummy(node));
+       assert(!is_removed(node));
        old = rcu_dereference(node->p.next);
        do {
                next = old;
                if (unlikely(is_removed(next)))
                        goto end;
-               assert(!is_dummy(next));
+               if (dummy_removal)
+                       assert(is_dummy(next));
+               else
+                       assert(!is_dummy(next));
                old = uatomic_cmpxchg(&node->p.next, next,
                                      flag_removed(next));
        } while (old != next);
@@ -557,9 +844,10 @@ int _cds_lfht_remove(struct cds_lfht *ht, struct rcu_table *t,
         * if found.
         */
        hash = bit_reverse_ulong(node->p.reverse_hash);
+       assert(t->size > 0);
        index = hash & (t->size - 1);
        order = get_count_order_ulong(index + 1);
-       lookup = &t->tbl[order][index & ((1UL << (order - 1)) - 1)];
+       lookup = &t->tbl[order]->nodes[index & (!order ? 0 : ((1UL << (order - 1)) - 1))];
        dummy = (struct cds_lfht_node *) lookup;
        _cds_lfht_gc_bucket(dummy, node);
 end:
@@ -589,12 +877,14 @@ void init_table(struct cds_lfht *ht, struct rcu_table *t,
 
                len = !i ? 1 : 1UL << (i - 1);
                dbg_printf("init order %lu len: %lu\n", i, len);
-               t->tbl[i] = calloc(len, sizeof(struct _cds_lfht_node));
+               t->tbl[i] = calloc(1, sizeof(struct rcu_level)
+                               + (len * sizeof(struct _cds_lfht_node)));
                for (j = 0; j < len; j++) {
+                       struct cds_lfht_node *new_node =
+                               (struct cds_lfht_node *) &t->tbl[i]->nodes[j];
+
                        dbg_printf("init entry: i %lu j %lu hash %lu\n",
                                   i, j, !i ? 0 : (1UL << (i - 1)) + j);
-                       struct cds_lfht_node *new_node =
-                               (struct cds_lfht_node *) &t->tbl[i][j];
                        new_node->p.reverse_hash =
                                bit_reverse_ulong(!i ? 0 : (1UL << (i - 1)) + j);
                        (void) _cds_lfht_add(ht, t, new_node, 0, 1);
@@ -611,31 +901,80 @@ void init_table(struct cds_lfht *ht, struct rcu_table *t,
        t->resize_initiated = 0;
 }
 
+static
+void fini_table(struct cds_lfht *ht, struct rcu_table *t,
+               unsigned long first_order, unsigned long len_order)
+{
+       long i, end_order;
+
+       dbg_printf("fini table: first_order %lu end_order %lu\n",
+                  first_order, first_order + len_order);
+       end_order = first_order + len_order;
+       assert(first_order > 0);
+       assert(t->size == (1UL << (end_order - 1)));
+       for (i = end_order - 1; i >= first_order; i--) {
+               unsigned long j, len;
+
+               len = !i ? 1 : 1UL << (i - 1);
+               dbg_printf("fini order %lu len: %lu\n", i, len);
+               /*
+                * Update table size. Need to shrink this table prior to
+                * removal so gc lookups use non-logically-removed dummy
+                * nodes.
+                */
+               t->size = 1UL << (i - 1);
+               /* Unlink */
+               for (j = 0; j < len; j++) {
+                       struct cds_lfht_node *fini_node =
+                               (struct cds_lfht_node *) &t->tbl[i]->nodes[j];
+
+                       dbg_printf("fini entry: i %lu j %lu hash %lu\n",
+                                  i, j, !i ? 0 : (1UL << (i - 1)) + j);
+                       fini_node->p.reverse_hash =
+                               bit_reverse_ulong(!i ? 0 : (1UL << (i - 1)) + j);
+                       (void) _cds_lfht_remove(ht, t, fini_node, 1);
+                       if (CMM_LOAD_SHARED(ht->in_progress_destroy))
+                               break;
+               }
+               ht->cds_lfht_call_rcu(&t->tbl[i]->head, cds_lfht_free_level);
+               dbg_printf("fini new size: %lu\n", t->size);
+               if (CMM_LOAD_SHARED(ht->in_progress_destroy))
+                       break;
+       }
+       t->resize_target = t->size;
+       t->resize_initiated = 0;
+}
+
 struct cds_lfht *cds_lfht_new(cds_lfht_hash_fct hash_fct,
                        cds_lfht_compare_fct compare_fct,
                        unsigned long hash_seed,
                        unsigned long init_size,
+                       int flags,
                        void (*cds_lfht_call_rcu)(struct rcu_head *head,
-                                       void (*func)(struct rcu_head *head)))
+                                       void (*func)(struct rcu_head *head)),
+                       void (*cds_lfht_synchronize_rcu)(void))
 {
        struct cds_lfht *ht;
        unsigned long order;
 
        /* init_size must be power of two */
-       if (init_size & (init_size - 1))
+       if (init_size && (init_size & (init_size - 1)))
                return NULL;
        ht = calloc(1, sizeof(struct cds_lfht));
        ht->hash_fct = hash_fct;
        ht->compare_fct = compare_fct;
        ht->hash_seed = hash_seed;
        ht->cds_lfht_call_rcu = cds_lfht_call_rcu;
+       ht->cds_lfht_synchronize_rcu = cds_lfht_synchronize_rcu;
        ht->in_progress_resize = 0;
+       ht->percpu_count = alloc_per_cpu_items_count();
        /* this mutex should not nest in read-side C.S. */
        pthread_mutex_init(&ht->resize_mutex, NULL);
-       order = get_count_order_ulong(max(init_size, 1)) + 1;
+       order = get_count_order_ulong(max(init_size, MIN_TABLE_SIZE)) + 1;
        ht->t = calloc(1, sizeof(struct cds_lfht)
-                      + (order * sizeof(struct _cds_lfht_node *)));
+                      + (order * sizeof(struct rcu_level *)));
        ht->t->size = 0;
+       ht->flags = flags;
        pthread_mutex_lock(&ht->resize_mutex);
        init_table(ht, ht->t, 0, order);
        pthread_mutex_unlock(&ht->resize_mutex);
@@ -655,9 +994,9 @@ struct cds_lfht_node *cds_lfht_lookup(struct cds_lfht *ht, void *key, size_t key
        t = rcu_dereference(ht->t);
        index = hash & (t->size - 1);
        order = get_count_order_ulong(index + 1);
-       lookup = &t->tbl[order][index & ((1UL << (order - 1)) - 1)];
+       lookup = &t->tbl[order]->nodes[index & (!order ? 0 : ((1UL << (order - 1))) - 1)];
        dbg_printf("lookup hash %lu index %lu order %lu aridx %lu\n",
-                  hash, index, order, index & ((1UL << (order - 1)) - 1));
+                  hash, index, order, index & (!order ? 0 : ((1UL << (order - 1)) - 1)));
        node = (struct cds_lfht_node *) lookup;
        for (;;) {
                if (unlikely(!node))
@@ -678,6 +1017,39 @@ struct cds_lfht_node *cds_lfht_lookup(struct cds_lfht *ht, void *key, size_t key
        return node;
 }
 
+struct cds_lfht_node *cds_lfht_next(struct cds_lfht *ht,
+                               struct cds_lfht_node *node)
+{
+       struct cds_lfht_node *next;
+       unsigned long reverse_hash;
+       void *key;
+       size_t key_len;
+
+       reverse_hash = node->p.reverse_hash;
+       key = node->key;
+       key_len = node->key_len;
+       next = rcu_dereference(node->p.next);
+       node = clear_flag(next);
+
+       for (;;) {
+               if (unlikely(!node))
+                       break;
+               if (unlikely(node->p.reverse_hash > reverse_hash)) {
+                       node = NULL;
+                       break;
+               }
+               next = rcu_dereference(node->p.next);
+               if (likely(!is_removed(next))
+                   && !is_dummy(next)
+                   && likely(!ht->compare_fct(node->key, node->key_len, key, key_len))) {
+                               break;
+               }
+               node = clear_flag(next);
+       }
+       assert(!node || !is_dummy(rcu_dereference(node->p.next)));
+       return node;
+}
+
 void cds_lfht_add(struct cds_lfht *ht, struct cds_lfht_node *node)
 {
        struct rcu_table *t;
@@ -688,6 +1060,7 @@ void cds_lfht_add(struct cds_lfht *ht, struct cds_lfht_node *node)
 
        t = rcu_dereference(ht->t);
        (void) _cds_lfht_add(ht, t, node, 0, 0);
+       ht_count_add(ht, t);
 }
 
 struct cds_lfht_node *cds_lfht_add_unique(struct cds_lfht *ht,
@@ -695,20 +1068,28 @@ struct cds_lfht_node *cds_lfht_add_unique(struct cds_lfht *ht,
 {
        struct rcu_table *t;
        unsigned long hash;
+       struct cds_lfht_node *ret;
 
        hash = ht->hash_fct(node->key, node->key_len, ht->hash_seed);
        node->p.reverse_hash = bit_reverse_ulong((unsigned long) hash);
 
        t = rcu_dereference(ht->t);
-       return _cds_lfht_add(ht, t, node, 1, 0);
+       ret = _cds_lfht_add(ht, t, node, 1, 0);
+       if (ret != node)
+               ht_count_add(ht, t);
+       return ret;
 }
 
 int cds_lfht_remove(struct cds_lfht *ht, struct cds_lfht_node *node)
 {
        struct rcu_table *t;
+       int ret;
 
        t = rcu_dereference(ht->t);
-       return _cds_lfht_remove(ht, t, node);
+       ret = _cds_lfht_remove(ht, t, node, 0);
+       if (!ret)
+               ht_count_remove(ht, t);
+       return ret;
 }
 
 static
@@ -721,7 +1102,7 @@ int cds_lfht_delete_dummy(struct cds_lfht *ht)
 
        t = ht->t;
        /* Check that the table is empty */
-       lookup = &t->tbl[0][0];
+       lookup = &t->tbl[0]->nodes[0];
        node = (struct cds_lfht_node *) lookup;
        do {
                node = clear_flag(node)->p.next;
@@ -737,10 +1118,10 @@ int cds_lfht_delete_dummy(struct cds_lfht *ht)
                for (i = 0; i < len; i++) {
                        dbg_printf("delete order %lu i %lu hash %lu\n",
                                order, i,
-                               bit_reverse_ulong(t->tbl[order][i].reverse_hash));
-                       assert(is_dummy(t->tbl[order][i].next));
+                               bit_reverse_ulong(t->tbl[order]->nodes[i].reverse_hash));
+                       assert(is_dummy(t->tbl[order]->nodes[i].next));
                }
-               free(t->tbl[order]);
+               poison_free(t->tbl[order]);
        }
        return 0;
 }
@@ -760,8 +1141,9 @@ int cds_lfht_destroy(struct cds_lfht *ht)
        ret = cds_lfht_delete_dummy(ht);
        if (ret)
                return ret;
-       free(ht->t);
-       free(ht);
+       poison_free(ht->t);
+       free_per_cpu_items_count(ht->percpu_count);
+       poison_free(ht);
        return ret;
 }
 
@@ -779,7 +1161,7 @@ void cds_lfht_count_nodes(struct cds_lfht *ht,
 
        t = rcu_dereference(ht->t);
        /* Count non-dummy nodes in the table */
-       lookup = &t->tbl[0][0];
+       lookup = &t->tbl[0]->nodes[0];
        node = (struct cds_lfht_node *) lookup;
        do {
                next = rcu_dereference(node->p.next);
@@ -795,42 +1177,87 @@ void cds_lfht_count_nodes(struct cds_lfht *ht,
        dbg_printf("number of dummy nodes: %lu\n", nr_dummy);
 }
 
+/* called with resize mutex held */
 static
-void cds_lfht_free_table_cb(struct rcu_head *head)
+void _do_cds_lfht_grow(struct cds_lfht *ht, struct rcu_table *old_t,
+               unsigned long old_size, unsigned long new_size)
 {
-       struct rcu_table *t =
-               caa_container_of(head, struct rcu_table, head);
-       free(t);
+       unsigned long old_order, new_order;
+       struct rcu_table *new_t;
+
+       old_order = get_count_order_ulong(old_size) + 1;
+       new_order = get_count_order_ulong(new_size) + 1;
+       printf("resize from %lu (order %lu) to %lu (order %lu) buckets\n",
+              old_size, old_order, new_size, new_order);
+       new_t = malloc(sizeof(struct cds_lfht)
+                       + (new_order * sizeof(struct rcu_level *)));
+       assert(new_size > old_size);
+       memcpy(&new_t->tbl, &old_t->tbl,
+              old_order * sizeof(struct rcu_level *));
+       init_table(ht, new_t, old_order, new_order - old_order);
+       /* Changing table and size atomically wrt lookups */
+       rcu_assign_pointer(ht->t, new_t);
+       ht->cds_lfht_call_rcu(&old_t->head, cds_lfht_free_table_cb);
 }
 
 /* called with resize mutex held */
 static
-void _do_cds_lfht_resize(struct cds_lfht *ht)
+void _do_cds_lfht_shrink(struct cds_lfht *ht, struct rcu_table *old_t,
+               unsigned long old_size, unsigned long new_size)
 {
-       unsigned long new_size, old_size, old_order, new_order;
-       struct rcu_table *new_t, *old_t;
+       unsigned long old_order, new_order;
+       struct rcu_table *new_t;
 
-       old_t = ht->t;
-       old_size = old_t->size;
+       new_size = max(new_size, MIN_TABLE_SIZE);
        old_order = get_count_order_ulong(old_size) + 1;
-
-       new_size = CMM_LOAD_SHARED(old_t->resize_target);
-       if (old_size == new_size)
-               return;
        new_order = get_count_order_ulong(new_size) + 1;
-       dbg_printf("resize from %lu (order %lu) to %lu (order %lu) buckets\n",
+       printf("resize from %lu (order %lu) to %lu (order %lu) buckets\n",
               old_size, old_order, new_size, new_order);
        new_t = malloc(sizeof(struct cds_lfht)
-                       + (new_order * sizeof(struct _cds_lfht_node *)));
-       assert(new_size > old_size);
+                       + (new_order * sizeof(struct rcu_level *)));
+       assert(new_size < old_size);
        memcpy(&new_t->tbl, &old_t->tbl,
-              old_order * sizeof(struct _cds_lfht_node *));
-       init_table(ht, new_t, old_order, new_order - old_order);
+              new_order * sizeof(struct rcu_level *));
+       new_t->size = !new_order ? 1 : (1UL << (new_order - 1));
+       assert(new_t->size == new_size);
+       new_t->resize_target = new_t->size;
+       new_t->resize_initiated = 0;
+
        /* Changing table and size atomically wrt lookups */
        rcu_assign_pointer(ht->t, new_t);
+
+       /*
+        * We need to wait for all add operations to reach Q.S. (and
+        * thus use the new table for lookups) before we can start
+        * releasing the old dummy nodes. Otherwise their lookup will
+        * return a logically removed node as insert position.
+        */
+       ht->cds_lfht_synchronize_rcu();
+
+       /* Unlink and remove all now-unused dummy node pointers. */
+       fini_table(ht, old_t, new_order, old_order - new_order);
        ht->cds_lfht_call_rcu(&old_t->head, cds_lfht_free_table_cb);
 }
 
+
+/* called with resize mutex held */
+static
+void _do_cds_lfht_resize(struct cds_lfht *ht)
+{
+       unsigned long new_size, old_size;
+       struct rcu_table *old_t;
+
+       old_t = ht->t;
+       old_size = old_t->size;
+       new_size = CMM_LOAD_SHARED(old_t->resize_target);
+       if (old_size < new_size)
+               _do_cds_lfht_grow(ht, old_t, old_size, new_size);
+       else if (old_size > new_size)
+               _do_cds_lfht_shrink(ht, old_t, old_size, new_size);
+       else
+               CMM_STORE_SHARED(old_t->resize_initiated, 0);
+}
+
 static
 unsigned long resize_target_update(struct rcu_table *t,
                                   int growth_order)
@@ -839,26 +1266,23 @@ unsigned long resize_target_update(struct rcu_table *t,
                            t->size << growth_order);
 }
 
-void cds_lfht_resize(struct cds_lfht *ht, int growth)
+static
+void resize_target_update_count(struct rcu_table *t,
+                               unsigned long count)
 {
-       struct rcu_table *t = rcu_dereference(ht->t);
-       unsigned long target_size;
+       count = max(count, MIN_TABLE_SIZE);
+       uatomic_set(&t->resize_target, count);
+}
 
-       if (growth < 0) {
-               /*
-                * Silently refuse to shrink hash table. (not supported)
-                */
-               dbg_printf("shrinking hash table not supported.\n");
-               return;
-       }
+void cds_lfht_resize(struct cds_lfht *ht, unsigned long new_size)
+{
+       struct rcu_table *t = rcu_dereference(ht->t);
 
-       target_size = resize_target_update(t, growth);
-       if (t->size < target_size) {
-               CMM_STORE_SHARED(t->resize_initiated, 1);
-               pthread_mutex_lock(&ht->resize_mutex);
-               _do_cds_lfht_resize(ht);
-               pthread_mutex_unlock(&ht->resize_mutex);
-       }
+       resize_target_update_count(t, new_size);
+       CMM_STORE_SHARED(t->resize_initiated, 1);
+       pthread_mutex_lock(&ht->resize_mutex);
+       _do_cds_lfht_resize(ht);
+       pthread_mutex_unlock(&ht->resize_mutex);
 }
 
 static
@@ -871,7 +1295,7 @@ void do_resize_cb(struct rcu_head *head)
        pthread_mutex_lock(&ht->resize_mutex);
        _do_cds_lfht_resize(ht);
        pthread_mutex_unlock(&ht->resize_mutex);
-       free(work);
+       poison_free(work);
        cmm_smp_mb();   /* finish resize before decrement */
        uatomic_dec(&ht->in_progress_resize);
 }
@@ -892,3 +1316,26 @@ void cds_lfht_resize_lazy(struct cds_lfht *ht, struct rcu_table *t, int growth)
                CMM_STORE_SHARED(t->resize_initiated, 1);
        }
 }
+
+#if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF)
+
+static
+void cds_lfht_resize_lazy_count(struct cds_lfht *ht, struct rcu_table *t,
+                               unsigned long count)
+{
+       struct rcu_resize_work *work;
+
+       if (!(ht->flags & CDS_LFHT_AUTO_RESIZE))
+               return;
+       resize_target_update_count(t, count);
+       if (!CMM_LOAD_SHARED(t->resize_initiated)) {
+               uatomic_inc(&ht->in_progress_resize);
+               cmm_smp_mb();   /* increment resize count before calling it */
+               work = malloc(sizeof(*work));
+               work->ht = ht;
+               ht->cds_lfht_call_rcu(&work->head, do_resize_cb);
+               CMM_STORE_SHARED(t->resize_initiated, 1);
+       }
+}
+
+#endif
This page took 0.033129 seconds and 4 git commands to generate.