Merge branch 'master' into urcu/ht-shrink
authorMathieu Desnoyers <mathieu.desnoyers@efficios.com>
Wed, 2 Nov 2011 00:15:55 +0000 (20:15 -0400)
committerMathieu Desnoyers <mathieu.desnoyers@efficios.com>
Wed, 2 Nov 2011 00:15:55 +0000 (20:15 -0400)
Fixed conflicts:
tests/test_urcu_qsbr.c
tests/test_urcu_qsbr_gc.c

Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Makefile.am
rculfhash.c [new file with mode: 0644]
tests/Makefile.am
tests/test_urcu_hash.c [new file with mode: 0644]
urcu-qsbr.c
urcu/cds.h
urcu/jhash.h [new file with mode: 0644]
urcu/rculfhash.h [new file with mode: 0644]

index 30c700be9d6abc00cc8e3c57b8ec102d0b5e5e40..4e4ce894b9bc1ee8d1f247a8da8255b526619f59 100644 (file)
@@ -14,7 +14,8 @@ nobase_dist_include_HEADERS = urcu/compiler.h urcu/hlist.h urcu/list.h \
                urcu/uatomic/generic.h urcu/arch/generic.h urcu/wfstack.h \
                urcu/wfqueue.h urcu/rculfstack.h urcu/rculfqueue.h \
                urcu/ref.h urcu/map/*.h urcu/static/*.h urcu/cds.h \
-               urcu/urcu_ref.h urcu/urcu-futex.h urcu/uatomic_arch.h
+               urcu/urcu_ref.h urcu/urcu-futex.h urcu/uatomic_arch.h \
+               urcu/rculfhash.h
 nobase_nodist_include_HEADERS = urcu/arch.h urcu/uatomic.h urcu/config.h
 
 EXTRA_DIST = $(top_srcdir)/urcu/arch/*.h $(top_srcdir)/urcu/uatomic/*.h \
@@ -61,7 +62,7 @@ liburcu_signal_la_LIBADD = liburcu-common.la
 liburcu_bp_la_SOURCES = urcu-bp.c urcu-pointer.c $(COMPAT)
 liburcu_bp_la_LIBADD = liburcu-common.la
 
-liburcu_cds_la_SOURCES = rculfqueue.c rculfstack.c $(COMPAT)
+liburcu_cds_la_SOURCES = rculfqueue.c rculfstack.c rculfhash.c $(COMPAT)
 liburcu_cds_la_LIBADD = liburcu-common.la
 
 pkgconfigdir = $(libdir)/pkgconfig
diff --git a/rculfhash.c b/rculfhash.c
new file mode 100644 (file)
index 0000000..f4187e7
--- /dev/null
@@ -0,0 +1,1845 @@
+/*
+ * rculfhash.c
+ *
+ * Userspace RCU library - Lock-Free Resizable RCU Hash Table
+ *
+ * Copyright 2010-2011 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/*
+ * Based on the following articles:
+ * - Ori Shalev and Nir Shavit. Split-ordered lists: Lock-free
+ *   extensible hash tables. J. ACM 53, 3 (May 2006), 379-405.
+ * - Michael, M. M. High performance dynamic lock-free hash tables
+ *   and list-based sets. In Proceedings of the fourteenth annual ACM
+ *   symposium on Parallel algorithms and architectures, ACM Press,
+ *   (2002), 73-82.
+ *
+ * Some specificities of this Lock-Free Resizable RCU Hash Table
+ * implementation:
+ *
+ * - RCU read-side critical section allows readers to perform hash
+ *   table lookups and use the returned objects safely by delaying
+ *   memory reclaim of a grace period.
+ * - Add and remove operations are lock-free, and do not need to
+ *   allocate memory. They need to be executed within RCU read-side
+ *   critical section to ensure the objects they read are valid and to
+ *   deal with the cmpxchg ABA problem.
+ * - add and add_unique operations are supported. add_unique checks if
+ *   the node key already exists in the hash table. It ensures no key
+ *   duplicata exists.
+ * - The resize operation executes concurrently with add/remove/lookup.
+ * - Hash table nodes are contained within a split-ordered list. This
+ *   list is ordered by incrementing reversed-bits-hash value.
+ * - An index of dummy nodes is kept. These dummy nodes are the hash
+ *   table "buckets", and they are also chained together in the
+ *   split-ordered list, which allows recursive expansion.
+ * - The resize operation for small tables only allows expanding the hash table.
+ *   It is triggered automatically by detecting long chains in the add
+ *   operation.
+ * - The resize operation for larger tables (and available through an
+ *   API) allows both expanding and shrinking the hash table.
+ * - Split-counters are used to keep track of the number of
+ *   nodes within the hash table for automatic resize triggering.
+ * - Resize operation initiated by long chain detection is executed by a
+ *   call_rcu thread, which keeps lock-freedom of add and remove.
+ * - Resize operations are protected by a mutex.
+ * - The removal operation is split in two parts: first, a "removed"
+ *   flag is set in the next pointer within the node to remove. Then,
+ *   a "garbage collection" is performed in the bucket containing the
+ *   removed node (from the start of the bucket up to the removed node).
+ *   All encountered nodes with "removed" flag set in their next
+ *   pointers are removed from the linked-list. If the cmpxchg used for
+ *   removal fails (due to concurrent garbage-collection or concurrent
+ *   add), we retry from the beginning of the bucket. This ensures that
+ *   the node with "removed" flag set is removed from the hash table
+ *   (not visible to lookups anymore) before the RCU read-side critical
+ *   section held across removal ends. Furthermore, this ensures that
+ *   the node with "removed" flag set is removed from the linked-list
+ *   before its memory is reclaimed. Only the thread which removal
+ *   successfully set the "removed" flag (with a cmpxchg) into a node's
+ *   next pointer is considered to have succeeded its removal (and thus
+ *   owns the node to reclaim). Because we garbage-collect starting from
+ *   an invariant node (the start-of-bucket dummy node) up to the
+ *   "removed" node (or find a reverse-hash that is higher), we are sure
+ *   that a successful traversal of the chain leads to a chain that is
+ *   present in the linked-list (the start node is never removed) and
+ *   that is does not contain the "removed" node anymore, even if
+ *   concurrent delete/add operations are changing the structure of the
+ *   list concurrently.
+ * - The add operation performs gargage collection of buckets if it
+ *   encounters nodes with removed flag set in the bucket where it wants
+ *   to add its new node. This ensures lock-freedom of add operation by
+ *   helping the remover unlink nodes from the list rather than to wait
+ *   for it do to so.
+ * - A RCU "order table" indexed by log2(hash index) is copied and
+ *   expanded by the resize operation. This order table allows finding
+ *   the "dummy node" tables.
+ * - There is one dummy node table per hash index order. The size of
+ *   each dummy node table is half the number of hashes contained in
+ *   this order (except for order 0).
+ * - synchronzie_rcu is used to garbage-collect the old dummy node table.
+ * - The per-order dummy node tables contain a compact version of the
+ *   hash table nodes. These tables are invariant after they are
+ *   populated into the hash table.
+ *
+ * Dummy node tables:
+ *
+ * hash table  hash table      the last        all dummy node tables
+ * order       size            dummy node      0   1   2   3   4   5   6(index)
+ *                             table size
+ * 0           1               1               1
+ * 1           2               1               1   1
+ * 2           4               2               1   1   2
+ * 3           8               4               1   1   2   4
+ * 4           16              8               1   1   2   4   8
+ * 5           32              16              1   1   2   4   8  16
+ * 6           64              32              1   1   2   4   8  16  32
+ *
+ * When growing/shrinking, we only focus on the last dummy node table
+ * which size is (!order ? 1 : (1 << (order -1))).
+ *
+ * Example for growing/shrinking:
+ * grow hash table from order 5 to 6: init the index=6 dummy node table
+ * shrink hash table from order 6 to 5: fini the index=6 dummy node table
+ *
+ * A bit of ascii art explanation:
+ * 
+ * Order index is the off-by-one compare to the actual power of 2 because 
+ * we use index 0 to deal with the 0 special-case.
+ * 
+ * This shows the nodes for a small table ordered by reversed bits:
+ * 
+ *    bits   reverse
+ * 0  000        000
+ * 4  100        001
+ * 2  010        010
+ * 6  110        011
+ * 1  001        100
+ * 5  101        101
+ * 3  011        110
+ * 7  111        111
+ * 
+ * This shows the nodes in order of non-reversed bits, linked by 
+ * reversed-bit order.
+ * 
+ * order              bits       reverse
+ * 0               0  000        000
+ * 1               |  1  001        100             <-
+ * 2               |  |  2  010        010    <-     |
+ *                 |  |  |  3  011        110  | <-  |
+ * 3               -> |  |  |  4  100        001  |  |
+ *                    -> |  |     5  101        101  |
+ *                       -> |        6  110        011
+ *                          ->          7  111        111
+ */
+
+#define _LGPL_SOURCE
+#include <stdlib.h>
+#include <errno.h>
+#include <assert.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <string.h>
+
+#include "config.h"
+#include <urcu.h>
+#include <urcu-call-rcu.h>
+#include <urcu/arch.h>
+#include <urcu/uatomic.h>
+#include <urcu/compiler.h>
+#include <urcu/rculfhash.h>
+#include <stdio.h>
+#include <pthread.h>
+
+#ifdef DEBUG
+#define dbg_printf(fmt, args...)     printf("[debug rculfhash] " fmt, ## args)
+#else
+#define dbg_printf(fmt, args...)
+#endif
+
+/*
+ * Split-counters lazily update the global counter each 1024
+ * addition/removal. It automatically keeps track of resize required.
+ * We use the bucket length as indicator for need to expand for small
+ * tables and machines lacking per-cpu data suppport.
+ */
+#define COUNT_COMMIT_ORDER             10
+#define DEFAULT_SPLIT_COUNT_MASK       0xFUL
+#define CHAIN_LEN_TARGET               1
+#define CHAIN_LEN_RESIZE_THRESHOLD     3
+
+/*
+ * Define the minimum table size.
+ */
+#define MIN_TABLE_SIZE                 1
+
+#if (CAA_BITS_PER_LONG == 32)
+#define MAX_TABLE_ORDER                        32
+#else
+#define MAX_TABLE_ORDER                        64
+#endif
+
+/*
+ * Minimum number of dummy nodes to touch per thread to parallelize grow/shrink.
+ */
+#define MIN_PARTITION_PER_THREAD_ORDER 12
+#define MIN_PARTITION_PER_THREAD       (1UL << MIN_PARTITION_PER_THREAD_ORDER)
+
+#ifndef min
+#define min(a, b)      ((a) < (b) ? (a) : (b))
+#endif
+
+#ifndef max
+#define max(a, b)      ((a) > (b) ? (a) : (b))
+#endif
+
+/*
+ * The removed flag needs to be updated atomically with the pointer.
+ * It indicates that no node must attach to the node scheduled for
+ * removal, and that node garbage collection must be performed.
+ * The dummy flag does not require to be updated atomically with the
+ * pointer, but it is added as a pointer low bit flag to save space.
+ */
+#define REMOVED_FLAG           (1UL << 0)
+#define DUMMY_FLAG             (1UL << 1)
+#define FLAGS_MASK             ((1UL << 2) - 1)
+
+/* Value of the end pointer. Should not interact with flags. */
+#define END_VALUE              NULL
+
+/*
+ * ht_items_count: Split-counters counting the number of node addition
+ * and removal in the table. Only used if the CDS_LFHT_ACCOUNTING flag
+ * is set at hash table creation.
+ *
+ * These are free-running counters, never reset to zero. They count the
+ * number of add/remove, and trigger every (1 << COUNT_COMMIT_ORDER)
+ * operations to update the global counter. We choose a power-of-2 value
+ * for the trigger to deal with 32 or 64-bit overflow of the counter.
+ */
+struct ht_items_count {
+       unsigned long add, del;
+} __attribute__((aligned(CAA_CACHE_LINE_SIZE)));
+
+/*
+ * rcu_level: Contains the per order-index-level dummy node table. The
+ * size of each dummy node table is half the number of hashes contained
+ * in this order (except for order 0). The minimum allocation size
+ * parameter allows combining the dummy node arrays of the lowermost
+ * levels to improve cache locality for small index orders.
+ */
+struct rcu_level {
+       /* Note: manually update allocation length when adding a field */
+       struct _cds_lfht_node nodes[0];
+};
+
+/*
+ * rcu_table: Contains the size and desired new size if a resize
+ * operation is in progress, as well as the statically-sized array of
+ * rcu_level pointers.
+ */
+struct rcu_table {
+       unsigned long size;     /* always a power of 2, shared (RCU) */
+       unsigned long resize_target;
+       int resize_initiated;
+       struct rcu_level *tbl[MAX_TABLE_ORDER];
+};
+
+/*
+ * cds_lfht: Top-level data structure representing a lock-free hash
+ * table. Defined in the implementation file to make it be an opaque
+ * cookie to users.
+ */
+struct cds_lfht {
+       struct rcu_table t;
+       cds_lfht_hash_fct hash_fct;
+       cds_lfht_compare_fct compare_fct;
+       unsigned long min_alloc_order;
+       unsigned long min_alloc_size;
+       unsigned long hash_seed;
+       int flags;
+       /*
+        * We need to put the work threads offline (QSBR) when taking this
+        * mutex, because we use synchronize_rcu within this mutex critical
+        * section, which waits on read-side critical sections, and could
+        * therefore cause grace-period deadlock if we hold off RCU G.P.
+        * completion.
+        */
+       pthread_mutex_t resize_mutex;   /* resize mutex: add/del mutex */
+       unsigned int in_progress_resize, in_progress_destroy;
+       void (*cds_lfht_call_rcu)(struct rcu_head *head,
+                     void (*func)(struct rcu_head *head));
+       void (*cds_lfht_synchronize_rcu)(void);
+       void (*cds_lfht_rcu_read_lock)(void);
+       void (*cds_lfht_rcu_read_unlock)(void);
+       void (*cds_lfht_rcu_thread_offline)(void);
+       void (*cds_lfht_rcu_thread_online)(void);
+       void (*cds_lfht_rcu_register_thread)(void);
+       void (*cds_lfht_rcu_unregister_thread)(void);
+       pthread_attr_t *resize_attr;    /* Resize threads attributes */
+       long count;                     /* global approximate item count */
+       struct ht_items_count *split_count;     /* split item count */
+};
+
+/*
+ * rcu_resize_work: Contains arguments passed to RCU worker thread
+ * responsible for performing lazy resize.
+ */
+struct rcu_resize_work {
+       struct rcu_head head;
+       struct cds_lfht *ht;
+};
+
+/*
+ * partition_resize_work: Contains arguments passed to worker threads
+ * executing the hash table resize on partitions of the hash table
+ * assigned to each processor's worker thread.
+ */
+struct partition_resize_work {
+       pthread_t thread_id;
+       struct cds_lfht *ht;
+       unsigned long i, start, len;
+       void (*fct)(struct cds_lfht *ht, unsigned long i,
+                   unsigned long start, unsigned long len);
+};
+
+static
+void _cds_lfht_add(struct cds_lfht *ht,
+               unsigned long size,
+               struct cds_lfht_node *node,
+               struct cds_lfht_iter *unique_ret,
+               int dummy);
+
+/*
+ * Algorithm to reverse bits in a word by lookup table, extended to
+ * 64-bit words.
+ * Source:
+ * http://graphics.stanford.edu/~seander/bithacks.html#BitReverseTable
+ * Originally from Public Domain.
+ */
+
+static const uint8_t BitReverseTable256[256] = 
+{
+#define R2(n) (n),   (n) + 2*64,     (n) + 1*64,     (n) + 3*64
+#define R4(n) R2(n), R2((n) + 2*16), R2((n) + 1*16), R2((n) + 3*16)
+#define R6(n) R4(n), R4((n) + 2*4 ), R4((n) + 1*4 ), R4((n) + 3*4 )
+       R6(0), R6(2), R6(1), R6(3)
+};
+#undef R2
+#undef R4
+#undef R6
+
+static
+uint8_t bit_reverse_u8(uint8_t v)
+{
+       return BitReverseTable256[v];
+}
+
+static __attribute__((unused))
+uint32_t bit_reverse_u32(uint32_t v)
+{
+       return ((uint32_t) bit_reverse_u8(v) << 24) | 
+               ((uint32_t) bit_reverse_u8(v >> 8) << 16) | 
+               ((uint32_t) bit_reverse_u8(v >> 16) << 8) | 
+               ((uint32_t) bit_reverse_u8(v >> 24));
+}
+
+static __attribute__((unused))
+uint64_t bit_reverse_u64(uint64_t v)
+{
+       return ((uint64_t) bit_reverse_u8(v) << 56) | 
+               ((uint64_t) bit_reverse_u8(v >> 8)  << 48) | 
+               ((uint64_t) bit_reverse_u8(v >> 16) << 40) |
+               ((uint64_t) bit_reverse_u8(v >> 24) << 32) |
+               ((uint64_t) bit_reverse_u8(v >> 32) << 24) | 
+               ((uint64_t) bit_reverse_u8(v >> 40) << 16) | 
+               ((uint64_t) bit_reverse_u8(v >> 48) << 8) |
+               ((uint64_t) bit_reverse_u8(v >> 56));
+}
+
+static
+unsigned long bit_reverse_ulong(unsigned long v)
+{
+#if (CAA_BITS_PER_LONG == 32)
+       return bit_reverse_u32(v);
+#else
+       return bit_reverse_u64(v);
+#endif
+}
+
+/*
+ * fls: returns the position of the most significant bit.
+ * Returns 0 if no bit is set, else returns the position of the most
+ * significant bit (from 1 to 32 on 32-bit, from 1 to 64 on 64-bit).
+ */
+#if defined(__i386) || defined(__x86_64)
+static inline
+unsigned int fls_u32(uint32_t x)
+{
+       int r;
+
+       asm("bsrl %1,%0\n\t"
+           "jnz 1f\n\t"
+           "movl $-1,%0\n\t"
+           "1:\n\t"
+           : "=r" (r) : "rm" (x));
+       return r + 1;
+}
+#define HAS_FLS_U32
+#endif
+
+#if defined(__x86_64)
+static inline
+unsigned int fls_u64(uint64_t x)
+{
+       long r;
+
+       asm("bsrq %1,%0\n\t"
+           "jnz 1f\n\t"
+           "movq $-1,%0\n\t"
+           "1:\n\t"
+           : "=r" (r) : "rm" (x));
+       return r + 1;
+}
+#define HAS_FLS_U64
+#endif
+
+#ifndef HAS_FLS_U64
+static __attribute__((unused))
+unsigned int fls_u64(uint64_t x)
+{
+       unsigned int r = 64;
+
+       if (!x)
+               return 0;
+
+       if (!(x & 0xFFFFFFFF00000000ULL)) {
+               x <<= 32;
+               r -= 32;
+       }
+       if (!(x & 0xFFFF000000000000ULL)) {
+               x <<= 16;
+               r -= 16;
+       }
+       if (!(x & 0xFF00000000000000ULL)) {
+               x <<= 8;
+               r -= 8;
+       }
+       if (!(x & 0xF000000000000000ULL)) {
+               x <<= 4;
+               r -= 4;
+       }
+       if (!(x & 0xC000000000000000ULL)) {
+               x <<= 2;
+               r -= 2;
+       }
+       if (!(x & 0x8000000000000000ULL)) {
+               x <<= 1;
+               r -= 1;
+       }
+       return r;
+}
+#endif
+
+#ifndef HAS_FLS_U32
+static __attribute__((unused))
+unsigned int fls_u32(uint32_t x)
+{
+       unsigned int r = 32;
+
+       if (!x)
+               return 0;
+       if (!(x & 0xFFFF0000U)) {
+               x <<= 16;
+               r -= 16;
+       }
+       if (!(x & 0xFF000000U)) {
+               x <<= 8;
+               r -= 8;
+       }
+       if (!(x & 0xF0000000U)) {
+               x <<= 4;
+               r -= 4;
+       }
+       if (!(x & 0xC0000000U)) {
+               x <<= 2;
+               r -= 2;
+       }
+       if (!(x & 0x80000000U)) {
+               x <<= 1;
+               r -= 1;
+       }
+       return r;
+}
+#endif
+
+unsigned int fls_ulong(unsigned long x)
+{
+#if (CAA_BITS_PER_LONG == 32)
+       return fls_u32(x);
+#else
+       return fls_u64(x);
+#endif
+}
+
+/*
+ * Return the minimum order for which x <= (1UL << order).
+ * Return -1 if x is 0.
+ */
+int get_count_order_u32(uint32_t x)
+{
+       if (!x)
+               return -1;
+
+       return fls_u32(x - 1);
+}
+
+/*
+ * Return the minimum order for which x <= (1UL << order).
+ * Return -1 if x is 0.
+ */
+int get_count_order_ulong(unsigned long x)
+{
+       if (!x)
+               return -1;
+
+       return fls_ulong(x - 1);
+}
+
+#ifdef POISON_FREE
+#define poison_free(ptr)                                       \
+       do {                                                    \
+               if (ptr) {                                      \
+                       memset(ptr, 0x42, sizeof(*(ptr)));      \
+                       free(ptr);                              \
+               }                                               \
+       } while (0)
+#else
+#define poison_free(ptr)       free(ptr)
+#endif
+
+static
+void cds_lfht_resize_lazy(struct cds_lfht *ht, unsigned long size, int growth);
+
+static
+void cds_lfht_resize_lazy_count(struct cds_lfht *ht, unsigned long size,
+                               unsigned long count);
+
+static long nr_cpus_mask = -1;
+static long split_count_mask = -1;
+
+#if defined(HAVE_SYSCONF)
+static void ht_init_nr_cpus_mask(void)
+{
+       long maxcpus;
+
+       maxcpus = sysconf(_SC_NPROCESSORS_CONF);
+       if (maxcpus <= 0) {
+               nr_cpus_mask = -2;
+               return;
+       }
+       /*
+        * round up number of CPUs to next power of two, so we
+        * can use & for modulo.
+        */
+       maxcpus = 1UL << get_count_order_ulong(maxcpus);
+       nr_cpus_mask = maxcpus - 1;
+}
+#else /* #if defined(HAVE_SYSCONF) */
+static void ht_init_nr_cpus_mask(void)
+{
+       nr_cpus_mask = -2;
+}
+#endif /* #else #if defined(HAVE_SYSCONF) */
+
+static
+void alloc_split_items_count(struct cds_lfht *ht)
+{
+       struct ht_items_count *count;
+
+       if (nr_cpus_mask == -1) {
+               ht_init_nr_cpus_mask();
+               if (nr_cpus_mask < 0)
+                       split_count_mask = DEFAULT_SPLIT_COUNT_MASK;
+               else
+                       split_count_mask = nr_cpus_mask;
+       }
+
+       assert(split_count_mask >= 0);
+
+       if (ht->flags & CDS_LFHT_ACCOUNTING) {
+               ht->split_count = calloc(split_count_mask + 1, sizeof(*count));
+               assert(ht->split_count);
+       } else {
+               ht->split_count = NULL;
+       }
+}
+
+static
+void free_split_items_count(struct cds_lfht *ht)
+{
+       poison_free(ht->split_count);
+}
+
+#if defined(HAVE_SCHED_GETCPU)
+static
+int ht_get_split_count_index(unsigned long hash)
+{
+       int cpu;
+
+       assert(split_count_mask >= 0);
+       cpu = sched_getcpu();
+       if (unlikely(cpu < 0))
+               return hash & split_count_mask;
+       else
+               return cpu & split_count_mask;
+}
+#else /* #if defined(HAVE_SCHED_GETCPU) */
+static
+int ht_get_split_count_index(unsigned long hash)
+{
+       return hash & split_count_mask;
+}
+#endif /* #else #if defined(HAVE_SCHED_GETCPU) */
+
+static
+void ht_count_add(struct cds_lfht *ht, unsigned long size, unsigned long hash)
+{
+       unsigned long split_count;
+       int index;
+
+       if (unlikely(!ht->split_count))
+               return;
+       index = ht_get_split_count_index(hash);
+       split_count = uatomic_add_return(&ht->split_count[index].add, 1);
+       if (unlikely(!(split_count & ((1UL << COUNT_COMMIT_ORDER) - 1)))) {
+               long count;
+
+               dbg_printf("add split count %lu\n", split_count);
+               count = uatomic_add_return(&ht->count,
+                                          1UL << COUNT_COMMIT_ORDER);
+               /* If power of 2 */
+               if (!(count & (count - 1))) {
+                       if ((count >> CHAIN_LEN_RESIZE_THRESHOLD) < size)
+                               return;
+                       dbg_printf("add set global %ld\n", count);
+                       cds_lfht_resize_lazy_count(ht, size,
+                               count >> (CHAIN_LEN_TARGET - 1));
+               }
+       }
+}
+
+static
+void ht_count_del(struct cds_lfht *ht, unsigned long size, unsigned long hash)
+{
+       unsigned long split_count;
+       int index;
+
+       if (unlikely(!ht->split_count))
+               return;
+       index = ht_get_split_count_index(hash);
+       split_count = uatomic_add_return(&ht->split_count[index].del, 1);
+       if (unlikely(!(split_count & ((1UL << COUNT_COMMIT_ORDER) - 1)))) {
+               long count;
+
+               dbg_printf("del split count %lu\n", split_count);
+               count = uatomic_add_return(&ht->count,
+                                          -(1UL << COUNT_COMMIT_ORDER));
+               /* If power of 2 */
+               if (!(count & (count - 1))) {
+                       if ((count >> CHAIN_LEN_RESIZE_THRESHOLD) >= size)
+                               return;
+                       dbg_printf("del set global %ld\n", count);
+                       /*
+                        * Don't shrink table if the number of nodes is below a
+                        * certain threshold.
+                        */
+                       if (count < (1UL << COUNT_COMMIT_ORDER) * (split_count_mask + 1))
+                               return;
+                       cds_lfht_resize_lazy_count(ht, size,
+                               count >> (CHAIN_LEN_TARGET - 1));
+               }
+       }
+}
+
+static
+void check_resize(struct cds_lfht *ht, unsigned long size, uint32_t chain_len)
+{
+       unsigned long count;
+
+       if (!(ht->flags & CDS_LFHT_AUTO_RESIZE))
+               return;
+       count = uatomic_read(&ht->count);
+       /*
+        * Use bucket-local length for small table expand and for
+        * environments lacking per-cpu data support.
+        */
+       if (count >= (1UL << COUNT_COMMIT_ORDER))
+               return;
+       if (chain_len > 100)
+               dbg_printf("WARNING: large chain length: %u.\n",
+                          chain_len);
+       if (chain_len >= CHAIN_LEN_RESIZE_THRESHOLD)
+               cds_lfht_resize_lazy(ht, size,
+                       get_count_order_u32(chain_len - (CHAIN_LEN_TARGET - 1)));
+}
+
+static
+struct cds_lfht_node *clear_flag(struct cds_lfht_node *node)
+{
+       return (struct cds_lfht_node *) (((unsigned long) node) & ~FLAGS_MASK);
+}
+
+static
+int is_removed(struct cds_lfht_node *node)
+{
+       return ((unsigned long) node) & REMOVED_FLAG;
+}
+
+static
+struct cds_lfht_node *flag_removed(struct cds_lfht_node *node)
+{
+       return (struct cds_lfht_node *) (((unsigned long) node) | REMOVED_FLAG);
+}
+
+static
+int is_dummy(struct cds_lfht_node *node)
+{
+       return ((unsigned long) node) & DUMMY_FLAG;
+}
+
+static
+struct cds_lfht_node *flag_dummy(struct cds_lfht_node *node)
+{
+       return (struct cds_lfht_node *) (((unsigned long) node) | DUMMY_FLAG);
+}
+
+static
+struct cds_lfht_node *get_end(void)
+{
+       return (struct cds_lfht_node *) END_VALUE;
+}
+
+static
+int is_end(struct cds_lfht_node *node)
+{
+       return clear_flag(node) == (struct cds_lfht_node *) END_VALUE;
+}
+
+static
+unsigned long _uatomic_max(unsigned long *ptr, unsigned long v)
+{
+       unsigned long old1, old2;
+
+       old1 = uatomic_read(ptr);
+       do {
+               old2 = old1;
+               if (old2 >= v)
+                       return old2;
+       } while ((old1 = uatomic_cmpxchg(ptr, old2, v)) != old2);
+       return v;
+}
+
+static
+struct _cds_lfht_node *lookup_bucket(struct cds_lfht *ht, unsigned long size,
+               unsigned long hash)
+{
+       unsigned long index, order;
+
+       assert(size > 0);
+       index = hash & (size - 1);
+
+       if (index < ht->min_alloc_size) {
+               dbg_printf("lookup hash %lu index %lu order 0 aridx 0\n",
+                          hash, index);
+               return &ht->t.tbl[0]->nodes[index];
+       }
+       /*
+        * equivalent to get_count_order_ulong(index + 1), but optimizes
+        * away the non-existing 0 special-case for
+        * get_count_order_ulong.
+        */
+       order = fls_ulong(index);
+       dbg_printf("lookup hash %lu index %lu order %lu aridx %lu\n",
+                  hash, index, order, index & ((1UL << (order - 1)) - 1));
+       return &ht->t.tbl[order]->nodes[index & ((1UL << (order - 1)) - 1)];
+}
+
+/*
+ * Remove all logically deleted nodes from a bucket up to a certain node key.
+ */
+static
+void _cds_lfht_gc_bucket(struct cds_lfht_node *dummy, struct cds_lfht_node *node)
+{
+       struct cds_lfht_node *iter_prev, *iter, *next, *new_next;
+
+       assert(!is_dummy(dummy));
+       assert(!is_removed(dummy));
+       assert(!is_dummy(node));
+       assert(!is_removed(node));
+       for (;;) {
+               iter_prev = dummy;
+               /* We can always skip the dummy node initially */
+               iter = rcu_dereference(iter_prev->p.next);
+               assert(!is_removed(iter));
+               assert(iter_prev->p.reverse_hash <= node->p.reverse_hash);
+               /*
+                * We should never be called with dummy (start of chain)
+                * and logically removed node (end of path compression
+                * marker) being the actual same node. This would be a
+                * bug in the algorithm implementation.
+                */
+               assert(dummy != node);
+               for (;;) {
+                       if (unlikely(is_end(iter)))
+                               return;
+                       if (likely(clear_flag(iter)->p.reverse_hash > node->p.reverse_hash))
+                               return;
+                       next = rcu_dereference(clear_flag(iter)->p.next);
+                       if (likely(is_removed(next)))
+                               break;
+                       iter_prev = clear_flag(iter);
+                       iter = next;
+               }
+               assert(!is_removed(iter));
+               if (is_dummy(iter))
+                       new_next = flag_dummy(clear_flag(next));
+               else
+                       new_next = clear_flag(next);
+               (void) uatomic_cmpxchg(&iter_prev->p.next, iter, new_next);
+       }
+       return;
+}
+
+static
+int _cds_lfht_replace(struct cds_lfht *ht, unsigned long size,
+               struct cds_lfht_node *old_node,
+               struct cds_lfht_node *old_next,
+               struct cds_lfht_node *new_node)
+{
+       struct cds_lfht_node *dummy, *ret_next;
+       struct _cds_lfht_node *lookup;
+
+       if (!old_node)  /* Return -ENOENT if asked to replace NULL node */
+               return -ENOENT;
+
+       assert(!is_removed(old_node));
+       assert(!is_dummy(old_node));
+       assert(!is_removed(new_node));
+       assert(!is_dummy(new_node));
+       assert(new_node != old_node);
+       for (;;) {
+               /* Insert after node to be replaced */
+               if (is_removed(old_next)) {
+                       /*
+                        * Too late, the old node has been removed under us
+                        * between lookup and replace. Fail.
+                        */
+                       return -ENOENT;
+               }
+               assert(!is_dummy(old_next));
+               assert(new_node != clear_flag(old_next));
+               new_node->p.next = clear_flag(old_next);
+               /*
+                * Here is the whole trick for lock-free replace: we add
+                * the replacement node _after_ the node we want to
+                * replace by atomically setting its next pointer at the
+                * same time we set its removal flag. Given that
+                * the lookups/get next use an iterator aware of the
+                * next pointer, they will either skip the old node due
+                * to the removal flag and see the new node, or use
+                * the old node, but will not see the new one.
+                */
+               ret_next = uatomic_cmpxchg(&old_node->p.next,
+                             old_next, flag_removed(new_node));
+               if (ret_next == old_next)
+                       break;          /* We performed the replacement. */
+               old_next = ret_next;
+       }
+
+       /*
+        * Ensure that the old node is not visible to readers anymore:
+        * lookup for the node, and remove it (along with any other
+        * logically removed node) if found.
+        */
+       lookup = lookup_bucket(ht, size, bit_reverse_ulong(old_node->p.reverse_hash));
+       dummy = (struct cds_lfht_node *) lookup;
+       _cds_lfht_gc_bucket(dummy, new_node);
+
+       assert(is_removed(rcu_dereference(old_node->p.next)));
+       return 0;
+}
+
+/*
+ * A non-NULL unique_ret pointer uses the "add unique" (or uniquify) add
+ * mode. A NULL unique_ret allows creation of duplicate keys.
+ */
+static
+void _cds_lfht_add(struct cds_lfht *ht,
+               unsigned long size,
+               struct cds_lfht_node *node,
+               struct cds_lfht_iter *unique_ret,
+               int dummy)
+{
+       struct cds_lfht_node *iter_prev, *iter, *next, *new_node, *new_next,
+                       *return_node;
+       struct _cds_lfht_node *lookup;
+
+       assert(!is_dummy(node));
+       assert(!is_removed(node));
+       lookup = lookup_bucket(ht, size, bit_reverse_ulong(node->p.reverse_hash));
+       for (;;) {
+               uint32_t chain_len = 0;
+
+               /*
+                * iter_prev points to the non-removed node prior to the
+                * insert location.
+                */
+               iter_prev = (struct cds_lfht_node *) lookup;
+               /* We can always skip the dummy node initially */
+               iter = rcu_dereference(iter_prev->p.next);
+               assert(iter_prev->p.reverse_hash <= node->p.reverse_hash);
+               for (;;) {
+                       if (unlikely(is_end(iter)))
+                               goto insert;
+                       if (likely(clear_flag(iter)->p.reverse_hash > node->p.reverse_hash))
+                               goto insert;
+
+                       /* dummy node is the first node of the identical-hash-value chain */
+                       if (dummy && clear_flag(iter)->p.reverse_hash == node->p.reverse_hash)
+                               goto insert;
+
+                       next = rcu_dereference(clear_flag(iter)->p.next);
+                       if (unlikely(is_removed(next)))
+                               goto gc_node;
+
+                       /* uniquely add */
+                       if (unique_ret
+                           && !is_dummy(next)
+                           && clear_flag(iter)->p.reverse_hash == node->p.reverse_hash) {
+                               struct cds_lfht_iter d_iter = { .node = node, .next = iter, };
+
+                               /*
+                                * uniquely adding inserts the node as the first
+                                * node of the identical-hash-value node chain.
+                                *
+                                * This semantic ensures no duplicated keys
+                                * should ever be observable in the table
+                                * (including observe one node by one node
+                                * by forward iterations)
+                                */
+                               cds_lfht_next_duplicate(ht, &d_iter);
+                               if (!d_iter.node)
+                                       goto insert;
+
+                               *unique_ret = d_iter;
+                               return;
+                       }
+
+                       /* Only account for identical reverse hash once */
+                       if (iter_prev->p.reverse_hash != clear_flag(iter)->p.reverse_hash
+                           && !is_dummy(next))
+                               check_resize(ht, size, ++chain_len);
+                       iter_prev = clear_flag(iter);
+                       iter = next;
+               }
+
+       insert:
+               assert(node != clear_flag(iter));
+               assert(!is_removed(iter_prev));
+               assert(!is_removed(iter));
+               assert(iter_prev != node);
+               if (!dummy)
+                       node->p.next = clear_flag(iter);
+               else
+                       node->p.next = flag_dummy(clear_flag(iter));
+               if (is_dummy(iter))
+                       new_node = flag_dummy(node);
+               else
+                       new_node = node;
+               if (uatomic_cmpxchg(&iter_prev->p.next, iter,
+                                   new_node) != iter) {
+                       continue;       /* retry */
+               } else {
+                       return_node = node;
+                       goto end;
+               }
+
+       gc_node:
+               assert(!is_removed(iter));
+               if (is_dummy(iter))
+                       new_next = flag_dummy(clear_flag(next));
+               else
+                       new_next = clear_flag(next);
+               (void) uatomic_cmpxchg(&iter_prev->p.next, iter, new_next);
+               /* retry */
+       }
+end:
+       if (unique_ret) {
+               unique_ret->node = return_node;
+               /* unique_ret->next left unset, never used. */
+       }
+}
+
+static
+int _cds_lfht_del(struct cds_lfht *ht, unsigned long size,
+               struct cds_lfht_node *node,
+               int dummy_removal)
+{
+       struct cds_lfht_node *dummy, *next, *old;
+       struct _cds_lfht_node *lookup;
+
+       if (!node)      /* Return -ENOENT if asked to delete NULL node */
+               return -ENOENT;
+
+       /* logically delete the node */
+       assert(!is_dummy(node));
+       assert(!is_removed(node));
+       old = rcu_dereference(node->p.next);
+       do {
+               struct cds_lfht_node *new_next;
+
+               next = old;
+               if (unlikely(is_removed(next)))
+                       return -ENOENT;
+               if (dummy_removal)
+                       assert(is_dummy(next));
+               else
+                       assert(!is_dummy(next));
+               new_next = flag_removed(next);
+               old = uatomic_cmpxchg(&node->p.next, next, new_next);
+       } while (old != next);
+       /* We performed the (logical) deletion. */
+
+       /*
+        * Ensure that the node is not visible to readers anymore: lookup for
+        * the node, and remove it (along with any other logically removed node)
+        * if found.
+        */
+       lookup = lookup_bucket(ht, size, bit_reverse_ulong(node->p.reverse_hash));
+       dummy = (struct cds_lfht_node *) lookup;
+       _cds_lfht_gc_bucket(dummy, node);
+
+       assert(is_removed(rcu_dereference(node->p.next)));
+       return 0;
+}
+
+static
+void *partition_resize_thread(void *arg)
+{
+       struct partition_resize_work *work = arg;
+
+       work->ht->cds_lfht_rcu_register_thread();
+       work->fct(work->ht, work->i, work->start, work->len);
+       work->ht->cds_lfht_rcu_unregister_thread();
+       return NULL;
+}
+
+static
+void partition_resize_helper(struct cds_lfht *ht, unsigned long i,
+               unsigned long len,
+               void (*fct)(struct cds_lfht *ht, unsigned long i,
+                       unsigned long start, unsigned long len))
+{
+       unsigned long partition_len;
+       struct partition_resize_work *work;
+       int thread, ret;
+       unsigned long nr_threads;
+
+       /*
+        * Note: nr_cpus_mask + 1 is always power of 2.
+        * We spawn just the number of threads we need to satisfy the minimum
+        * partition size, up to the number of CPUs in the system.
+        */
+       if (nr_cpus_mask > 0) {
+               nr_threads = min(nr_cpus_mask + 1,
+                                len >> MIN_PARTITION_PER_THREAD_ORDER);
+       } else {
+               nr_threads = 1;
+       }
+       partition_len = len >> get_count_order_ulong(nr_threads);
+       work = calloc(nr_threads, sizeof(*work));
+       assert(work);
+       for (thread = 0; thread < nr_threads; thread++) {
+               work[thread].ht = ht;
+               work[thread].i = i;
+               work[thread].len = partition_len;
+               work[thread].start = thread * partition_len;
+               work[thread].fct = fct;
+               ret = pthread_create(&(work[thread].thread_id), ht->resize_attr,
+                       partition_resize_thread, &work[thread]);
+               assert(!ret);
+       }
+       for (thread = 0; thread < nr_threads; thread++) {
+               ret = pthread_join(work[thread].thread_id, NULL);
+               assert(!ret);
+       }
+       free(work);
+}
+
+/*
+ * Holding RCU read lock to protect _cds_lfht_add against memory
+ * reclaim that could be performed by other call_rcu worker threads (ABA
+ * problem).
+ *
+ * When we reach a certain length, we can split this population phase over
+ * many worker threads, based on the number of CPUs available in the system.
+ * This should therefore take care of not having the expand lagging behind too
+ * many concurrent insertion threads by using the scheduler's ability to
+ * schedule dummy node population fairly with insertions.
+ */
+static
+void init_table_populate_partition(struct cds_lfht *ht, unsigned long i,
+                                  unsigned long start, unsigned long len)
+{
+       unsigned long j;
+
+       assert(i > ht->min_alloc_order);
+       ht->cds_lfht_rcu_read_lock();
+       for (j = start; j < start + len; j++) {
+               struct cds_lfht_node *new_node =
+                       (struct cds_lfht_node *) &ht->t.tbl[i]->nodes[j];
+
+               dbg_printf("init populate: i %lu j %lu hash %lu\n",
+                          i, j, (1UL << (i - 1)) + j);
+               new_node->p.reverse_hash =
+                               bit_reverse_ulong((1UL << (i - 1)) + j);
+               _cds_lfht_add(ht, 1UL << (i - 1),
+                               new_node, NULL, 1);
+       }
+       ht->cds_lfht_rcu_read_unlock();
+}
+
+static
+void init_table_populate(struct cds_lfht *ht, unsigned long i,
+                        unsigned long len)
+{
+       assert(nr_cpus_mask != -1);
+       if (nr_cpus_mask < 0 || len < 2 * MIN_PARTITION_PER_THREAD) {
+               ht->cds_lfht_rcu_thread_online();
+               init_table_populate_partition(ht, i, 0, len);
+               ht->cds_lfht_rcu_thread_offline();
+               return;
+       }
+       partition_resize_helper(ht, i, len, init_table_populate_partition);
+}
+
+static
+void init_table(struct cds_lfht *ht,
+               unsigned long first_order, unsigned long last_order)
+{
+       unsigned long i;
+
+       dbg_printf("init table: first_order %lu last_order %lu\n",
+                  first_order, last_order);
+       assert(first_order > ht->min_alloc_order);
+       for (i = first_order; i <= last_order; i++) {
+               unsigned long len;
+
+               len = 1UL << (i - 1);
+               dbg_printf("init order %lu len: %lu\n", i, len);
+
+               /* Stop expand if the resize target changes under us */
+               if (CMM_LOAD_SHARED(ht->t.resize_target) < (1UL << i))
+                       break;
+
+               ht->t.tbl[i] = calloc(1, len * sizeof(struct _cds_lfht_node));
+               assert(ht->t.tbl[i]);
+
+               /*
+                * Set all dummy nodes reverse hash values for a level and
+                * link all dummy nodes into the table.
+                */
+               init_table_populate(ht, i, len);
+
+               /*
+                * Update table size.
+                */
+               cmm_smp_wmb();  /* populate data before RCU size */
+               CMM_STORE_SHARED(ht->t.size, 1UL << i);
+
+               dbg_printf("init new size: %lu\n", 1UL << i);
+               if (CMM_LOAD_SHARED(ht->in_progress_destroy))
+                       break;
+       }
+}
+
+/*
+ * Holding RCU read lock to protect _cds_lfht_remove against memory
+ * reclaim that could be performed by other call_rcu worker threads (ABA
+ * problem).
+ * For a single level, we logically remove and garbage collect each node.
+ *
+ * As a design choice, we perform logical removal and garbage collection on a
+ * node-per-node basis to simplify this algorithm. We also assume keeping good
+ * cache locality of the operation would overweight possible performance gain
+ * that could be achieved by batching garbage collection for multiple levels.
+ * However, this would have to be justified by benchmarks.
+ *
+ * Concurrent removal and add operations are helping us perform garbage
+ * collection of logically removed nodes. We guarantee that all logically
+ * removed nodes have been garbage-collected (unlinked) before call_rcu is
+ * invoked to free a hole level of dummy nodes (after a grace period).
+ *
+ * Logical removal and garbage collection can therefore be done in batch or on a
+ * node-per-node basis, as long as the guarantee above holds.
+ *
+ * When we reach a certain length, we can split this removal over many worker
+ * threads, based on the number of CPUs available in the system. This should
+ * take care of not letting resize process lag behind too many concurrent
+ * updater threads actively inserting into the hash table.
+ */
+static
+void remove_table_partition(struct cds_lfht *ht, unsigned long i,
+                           unsigned long start, unsigned long len)
+{
+       unsigned long j;
+
+       assert(i > ht->min_alloc_order);
+       ht->cds_lfht_rcu_read_lock();
+       for (j = start; j < start + len; j++) {
+               struct cds_lfht_node *fini_node =
+                       (struct cds_lfht_node *) &ht->t.tbl[i]->nodes[j];
+
+               dbg_printf("remove entry: i %lu j %lu hash %lu\n",
+                          i, j, (1UL << (i - 1)) + j);
+               fini_node->p.reverse_hash =
+                       bit_reverse_ulong((1UL << (i - 1)) + j);
+               (void) _cds_lfht_del(ht, 1UL << (i - 1), fini_node, 1);
+       }
+       ht->cds_lfht_rcu_read_unlock();
+}
+
+static
+void remove_table(struct cds_lfht *ht, unsigned long i, unsigned long len)
+{
+
+       assert(nr_cpus_mask != -1);
+       if (nr_cpus_mask < 0 || len < 2 * MIN_PARTITION_PER_THREAD) {
+               ht->cds_lfht_rcu_thread_online();
+               remove_table_partition(ht, i, 0, len);
+               ht->cds_lfht_rcu_thread_offline();
+               return;
+       }
+       partition_resize_helper(ht, i, len, remove_table_partition);
+}
+
+static
+void fini_table(struct cds_lfht *ht,
+               unsigned long first_order, unsigned long last_order)
+{
+       long i;
+       void *free_by_rcu = NULL;
+
+       dbg_printf("fini table: first_order %lu last_order %lu\n",
+                  first_order, last_order);
+       assert(first_order > ht->min_alloc_order);
+       for (i = last_order; i >= first_order; i--) {
+               unsigned long len;
+
+               len = 1UL << (i - 1);
+               dbg_printf("fini order %lu len: %lu\n", i, len);
+
+               /* Stop shrink if the resize target changes under us */
+               if (CMM_LOAD_SHARED(ht->t.resize_target) > (1UL << (i - 1)))
+                       break;
+
+               cmm_smp_wmb();  /* populate data before RCU size */
+               CMM_STORE_SHARED(ht->t.size, 1UL << (i - 1));
+
+               /*
+                * We need to wait for all add operations to reach Q.S. (and
+                * thus use the new table for lookups) before we can start
+                * releasing the old dummy nodes. Otherwise their lookup will
+                * return a logically removed node as insert position.
+                */
+               ht->cds_lfht_synchronize_rcu();
+               if (free_by_rcu)
+                       free(free_by_rcu);
+
+               /*
+                * Set "removed" flag in dummy nodes about to be removed.
+                * Unlink all now-logically-removed dummy node pointers.
+                * Concurrent add/remove operation are helping us doing
+                * the gc.
+                */
+               remove_table(ht, i, len);
+
+               free_by_rcu = ht->t.tbl[i];
+
+               dbg_printf("fini new size: %lu\n", 1UL << i);
+               if (CMM_LOAD_SHARED(ht->in_progress_destroy))
+                       break;
+       }
+
+       if (free_by_rcu) {
+               ht->cds_lfht_synchronize_rcu();
+               free(free_by_rcu);
+       }
+}
+
+static
+void cds_lfht_create_dummy(struct cds_lfht *ht, unsigned long size)
+{
+       struct _cds_lfht_node *prev, *node;
+       unsigned long order, len, i, j;
+
+       ht->t.tbl[0] = calloc(1, ht->min_alloc_size * sizeof(struct _cds_lfht_node));
+       assert(ht->t.tbl[0]);
+
+       dbg_printf("create dummy: order %lu index %lu hash %lu\n", 0, 0, 0);
+       ht->t.tbl[0]->nodes[0].next = flag_dummy(get_end());
+       ht->t.tbl[0]->nodes[0].reverse_hash = 0;
+
+       for (order = 1; order < get_count_order_ulong(size) + 1; order++) {
+               len = 1UL << (order - 1);
+               if (order <= ht->min_alloc_order) {
+                       ht->t.tbl[order] = (struct rcu_level *) (ht->t.tbl[0]->nodes + len);
+               } else {
+                       ht->t.tbl[order] = calloc(1, len * sizeof(struct _cds_lfht_node));
+                       assert(ht->t.tbl[order]);
+               }
+
+               i = 0;
+               prev = ht->t.tbl[i]->nodes;
+               for (j = 0; j < len; j++) {
+                       if (j & (j - 1)) {      /* Between power of 2 */
+                               prev++;
+                       } else if (j) {         /* At each power of 2 */
+                               i++;
+                               prev = ht->t.tbl[i]->nodes;
+                       }
+
+                       node = &ht->t.tbl[order]->nodes[j];
+                       dbg_printf("create dummy: order %lu index %lu hash %lu\n",
+                                  order, j, j + len);
+                       node->next = prev->next;
+                       assert(is_dummy(node->next));
+                       node->reverse_hash = bit_reverse_ulong(j + len);
+                       prev->next = flag_dummy((struct cds_lfht_node *)node);
+               }
+       }
+}
+
+struct cds_lfht *_cds_lfht_new(cds_lfht_hash_fct hash_fct,
+                       cds_lfht_compare_fct compare_fct,
+                       unsigned long hash_seed,
+                       unsigned long init_size,
+                       unsigned long min_alloc_size,
+                       int flags,
+                       void (*cds_lfht_call_rcu)(struct rcu_head *head,
+                                       void (*func)(struct rcu_head *head)),
+                       void (*cds_lfht_synchronize_rcu)(void),
+                       void (*cds_lfht_rcu_read_lock)(void),
+                       void (*cds_lfht_rcu_read_unlock)(void),
+                       void (*cds_lfht_rcu_thread_offline)(void),
+                       void (*cds_lfht_rcu_thread_online)(void),
+                       void (*cds_lfht_rcu_register_thread)(void),
+                       void (*cds_lfht_rcu_unregister_thread)(void),
+                       pthread_attr_t *attr)
+{
+       struct cds_lfht *ht;
+       unsigned long order;
+
+       /* min_alloc_size must be power of two */
+       if (!min_alloc_size || (min_alloc_size & (min_alloc_size - 1)))
+               return NULL;
+       /* init_size must be power of two */
+       if (!init_size || (init_size & (init_size - 1)))
+               return NULL;
+       min_alloc_size = max(min_alloc_size, MIN_TABLE_SIZE);
+       init_size = max(init_size, min_alloc_size);
+       ht = calloc(1, sizeof(struct cds_lfht));
+       assert(ht);
+       ht->flags = flags;
+       ht->hash_fct = hash_fct;
+       ht->compare_fct = compare_fct;
+       ht->hash_seed = hash_seed;
+       ht->cds_lfht_call_rcu = cds_lfht_call_rcu;
+       ht->cds_lfht_synchronize_rcu = cds_lfht_synchronize_rcu;
+       ht->cds_lfht_rcu_read_lock = cds_lfht_rcu_read_lock;
+       ht->cds_lfht_rcu_read_unlock = cds_lfht_rcu_read_unlock;
+       ht->cds_lfht_rcu_thread_offline = cds_lfht_rcu_thread_offline;
+       ht->cds_lfht_rcu_thread_online = cds_lfht_rcu_thread_online;
+       ht->cds_lfht_rcu_register_thread = cds_lfht_rcu_register_thread;
+       ht->cds_lfht_rcu_unregister_thread = cds_lfht_rcu_unregister_thread;
+       ht->resize_attr = attr;
+       alloc_split_items_count(ht);
+       /* this mutex should not nest in read-side C.S. */
+       pthread_mutex_init(&ht->resize_mutex, NULL);
+       order = get_count_order_ulong(init_size);
+       ht->t.resize_target = 1UL << order;
+       ht->min_alloc_size = min_alloc_size;
+       ht->min_alloc_order = get_count_order_ulong(min_alloc_size);
+       cds_lfht_create_dummy(ht, 1UL << order);
+       ht->t.size = 1UL << order;
+       return ht;
+}
+
+void cds_lfht_lookup(struct cds_lfht *ht, void *key, size_t key_len,
+               struct cds_lfht_iter *iter)
+{
+       struct cds_lfht_node *node, *next, *dummy_node;
+       struct _cds_lfht_node *lookup;
+       unsigned long hash, reverse_hash, size;
+
+       hash = ht->hash_fct(key, key_len, ht->hash_seed);
+       reverse_hash = bit_reverse_ulong(hash);
+
+       size = rcu_dereference(ht->t.size);
+       lookup = lookup_bucket(ht, size, hash);
+       dummy_node = (struct cds_lfht_node *) lookup;
+       /* We can always skip the dummy node initially */
+       node = rcu_dereference(dummy_node->p.next);
+       node = clear_flag(node);
+       for (;;) {
+               if (unlikely(is_end(node))) {
+                       node = next = NULL;
+                       break;
+               }
+               if (unlikely(node->p.reverse_hash > reverse_hash)) {
+                       node = next = NULL;
+                       break;
+               }
+               next = rcu_dereference(node->p.next);
+               assert(node == clear_flag(node));
+               if (likely(!is_removed(next))
+                   && !is_dummy(next)
+                   && node->p.reverse_hash == reverse_hash
+                   && likely(!ht->compare_fct(node->key, node->key_len, key, key_len))) {
+                               break;
+               }
+               node = clear_flag(next);
+       }
+       assert(!node || !is_dummy(rcu_dereference(node->p.next)));
+       iter->node = node;
+       iter->next = next;
+}
+
+void cds_lfht_next_duplicate(struct cds_lfht *ht, struct cds_lfht_iter *iter)
+{
+       struct cds_lfht_node *node, *next;
+       unsigned long reverse_hash;
+       void *key;
+       size_t key_len;
+
+       node = iter->node;
+       reverse_hash = node->p.reverse_hash;
+       key = node->key;
+       key_len = node->key_len;
+       next = iter->next;
+       node = clear_flag(next);
+
+       for (;;) {
+               if (unlikely(is_end(node))) {
+                       node = next = NULL;
+                       break;
+               }
+               if (unlikely(node->p.reverse_hash > reverse_hash)) {
+                       node = next = NULL;
+                       break;
+               }
+               next = rcu_dereference(node->p.next);
+               if (likely(!is_removed(next))
+                   && !is_dummy(next)
+                   && likely(!ht->compare_fct(node->key, node->key_len, key, key_len))) {
+                               break;
+               }
+               node = clear_flag(next);
+       }
+       assert(!node || !is_dummy(rcu_dereference(node->p.next)));
+       iter->node = node;
+       iter->next = next;
+}
+
+void cds_lfht_next(struct cds_lfht *ht, struct cds_lfht_iter *iter)
+{
+       struct cds_lfht_node *node, *next;
+
+       node = clear_flag(iter->next);
+       for (;;) {
+               if (unlikely(is_end(node))) {
+                       node = next = NULL;
+                       break;
+               }
+               next = rcu_dereference(node->p.next);
+               if (likely(!is_removed(next))
+                   && !is_dummy(next)) {
+                               break;
+               }
+               node = clear_flag(next);
+       }
+       assert(!node || !is_dummy(rcu_dereference(node->p.next)));
+       iter->node = node;
+       iter->next = next;
+}
+
+void cds_lfht_first(struct cds_lfht *ht, struct cds_lfht_iter *iter)
+{
+       struct _cds_lfht_node *lookup;
+
+       /*
+        * Get next after first dummy node. The first dummy node is the
+        * first node of the linked list.
+        */
+       lookup = &ht->t.tbl[0]->nodes[0];
+       iter->next = lookup->next;
+       cds_lfht_next(ht, iter);
+}
+
+void cds_lfht_add(struct cds_lfht *ht, struct cds_lfht_node *node)
+{
+       unsigned long hash, size;
+
+       hash = ht->hash_fct(node->key, node->key_len, ht->hash_seed);
+       node->p.reverse_hash = bit_reverse_ulong((unsigned long) hash);
+
+       size = rcu_dereference(ht->t.size);
+       _cds_lfht_add(ht, size, node, NULL, 0);
+       ht_count_add(ht, size, hash);
+}
+
+struct cds_lfht_node *cds_lfht_add_unique(struct cds_lfht *ht,
+                               struct cds_lfht_node *node)
+{
+       unsigned long hash, size;
+       struct cds_lfht_iter iter;
+
+       hash = ht->hash_fct(node->key, node->key_len, ht->hash_seed);
+       node->p.reverse_hash = bit_reverse_ulong((unsigned long) hash);
+
+       size = rcu_dereference(ht->t.size);
+       _cds_lfht_add(ht, size, node, &iter, 0);
+       if (iter.node == node)
+               ht_count_add(ht, size, hash);
+       return iter.node;
+}
+
+struct cds_lfht_node *cds_lfht_add_replace(struct cds_lfht *ht,
+                               struct cds_lfht_node *node)
+{
+       unsigned long hash, size;
+       struct cds_lfht_iter iter;
+
+       hash = ht->hash_fct(node->key, node->key_len, ht->hash_seed);
+       node->p.reverse_hash = bit_reverse_ulong((unsigned long) hash);
+
+       size = rcu_dereference(ht->t.size);
+       for (;;) {
+               _cds_lfht_add(ht, size, node, &iter, 0);
+               if (iter.node == node) {
+                       ht_count_add(ht, size, hash);
+                       return NULL;
+               }
+
+               if (!_cds_lfht_replace(ht, size, iter.node, iter.next, node))
+                       return iter.node;
+       }
+}
+
+int cds_lfht_replace(struct cds_lfht *ht, struct cds_lfht_iter *old_iter,
+               struct cds_lfht_node *new_node)
+{
+       unsigned long size;
+
+       size = rcu_dereference(ht->t.size);
+       return _cds_lfht_replace(ht, size, old_iter->node, old_iter->next,
+                       new_node);
+}
+
+int cds_lfht_del(struct cds_lfht *ht, struct cds_lfht_iter *iter)
+{
+       unsigned long size, hash;
+       int ret;
+
+       size = rcu_dereference(ht->t.size);
+       ret = _cds_lfht_del(ht, size, iter->node, 0);
+       if (!ret) {
+               hash = bit_reverse_ulong(iter->node->p.reverse_hash);
+               ht_count_del(ht, size, hash);
+       }
+       return ret;
+}
+
+static
+int cds_lfht_delete_dummy(struct cds_lfht *ht)
+{
+       struct cds_lfht_node *node;
+       struct _cds_lfht_node *lookup;
+       unsigned long order, i, size;
+
+       /* Check that the table is empty */
+       lookup = &ht->t.tbl[0]->nodes[0];
+       node = (struct cds_lfht_node *) lookup;
+       do {
+               node = clear_flag(node)->p.next;
+               if (!is_dummy(node))
+                       return -EPERM;
+               assert(!is_removed(node));
+       } while (!is_end(node));
+       /*
+        * size accessed without rcu_dereference because hash table is
+        * being destroyed.
+        */
+       size = ht->t.size;
+       /* Internal sanity check: all nodes left should be dummy */
+       for (order = 0; order < get_count_order_ulong(size) + 1; order++) {
+               unsigned long len;
+
+               len = !order ? 1 : 1UL << (order - 1);
+               for (i = 0; i < len; i++) {
+                       dbg_printf("delete order %lu i %lu hash %lu\n",
+                               order, i,
+                               bit_reverse_ulong(ht->t.tbl[order]->nodes[i].reverse_hash));
+                       assert(is_dummy(ht->t.tbl[order]->nodes[i].next));
+               }
+
+               if (order == ht->min_alloc_order)
+                       poison_free(ht->t.tbl[0]);
+               else if (order > ht->min_alloc_order)
+                       poison_free(ht->t.tbl[order]);
+               /* Nothing to delete for order < ht->min_alloc_order */
+       }
+       return 0;
+}
+
+/*
+ * Should only be called when no more concurrent readers nor writers can
+ * possibly access the table.
+ */
+int cds_lfht_destroy(struct cds_lfht *ht, pthread_attr_t **attr)
+{
+       int ret;
+
+       /* Wait for in-flight resize operations to complete */
+       _CMM_STORE_SHARED(ht->in_progress_destroy, 1);
+       cmm_smp_mb();   /* Store destroy before load resize */
+       while (uatomic_read(&ht->in_progress_resize))
+               poll(NULL, 0, 100);     /* wait for 100ms */
+       ret = cds_lfht_delete_dummy(ht);
+       if (ret)
+               return ret;
+       free_split_items_count(ht);
+       if (attr)
+               *attr = ht->resize_attr;
+       poison_free(ht);
+       return ret;
+}
+
+void cds_lfht_count_nodes(struct cds_lfht *ht,
+               long *approx_before,
+               unsigned long *count,
+               unsigned long *removed,
+               long *approx_after)
+{
+       struct cds_lfht_node *node, *next;
+       struct _cds_lfht_node *lookup;
+       unsigned long nr_dummy = 0;
+
+       *approx_before = 0;
+       if (ht->split_count) {
+               int i;
+
+               for (i = 0; i < split_count_mask + 1; i++) {
+                       *approx_before += uatomic_read(&ht->split_count[i].add);
+                       *approx_before -= uatomic_read(&ht->split_count[i].del);
+               }
+       }
+
+       *count = 0;
+       *removed = 0;
+
+       /* Count non-dummy nodes in the table */
+       lookup = &ht->t.tbl[0]->nodes[0];
+       node = (struct cds_lfht_node *) lookup;
+       do {
+               next = rcu_dereference(node->p.next);
+               if (is_removed(next)) {
+                       if (!is_dummy(next))
+                               (*removed)++;
+                       else
+                               (nr_dummy)++;
+               } else if (!is_dummy(next))
+                       (*count)++;
+               else
+                       (nr_dummy)++;
+               node = clear_flag(next);
+       } while (!is_end(node));
+       dbg_printf("number of dummy nodes: %lu\n", nr_dummy);
+       *approx_after = 0;
+       if (ht->split_count) {
+               int i;
+
+               for (i = 0; i < split_count_mask + 1; i++) {
+                       *approx_after += uatomic_read(&ht->split_count[i].add);
+                       *approx_after -= uatomic_read(&ht->split_count[i].del);
+               }
+       }
+}
+
+/* called with resize mutex held */
+static
+void _do_cds_lfht_grow(struct cds_lfht *ht,
+               unsigned long old_size, unsigned long new_size)
+{
+       unsigned long old_order, new_order;
+
+       old_order = get_count_order_ulong(old_size);
+       new_order = get_count_order_ulong(new_size);
+       dbg_printf("resize from %lu (order %lu) to %lu (order %lu) buckets\n",
+                  old_size, old_order, new_size, new_order);
+       assert(new_size > old_size);
+       init_table(ht, old_order + 1, new_order);
+}
+
+/* called with resize mutex held */
+static
+void _do_cds_lfht_shrink(struct cds_lfht *ht,
+               unsigned long old_size, unsigned long new_size)
+{
+       unsigned long old_order, new_order;
+
+       new_size = max(new_size, ht->min_alloc_size);
+       old_order = get_count_order_ulong(old_size);
+       new_order = get_count_order_ulong(new_size);
+       dbg_printf("resize from %lu (order %lu) to %lu (order %lu) buckets\n",
+                  old_size, old_order, new_size, new_order);
+       assert(new_size < old_size);
+
+       /* Remove and unlink all dummy nodes to remove. */
+       fini_table(ht, new_order + 1, old_order);
+}
+
+
+/* called with resize mutex held */
+static
+void _do_cds_lfht_resize(struct cds_lfht *ht)
+{
+       unsigned long new_size, old_size;
+
+       /*
+        * Resize table, re-do if the target size has changed under us.
+        */
+       do {
+               assert(uatomic_read(&ht->in_progress_resize));
+               if (CMM_LOAD_SHARED(ht->in_progress_destroy))
+                       break;
+               ht->t.resize_initiated = 1;
+               old_size = ht->t.size;
+               new_size = CMM_LOAD_SHARED(ht->t.resize_target);
+               if (old_size < new_size)
+                       _do_cds_lfht_grow(ht, old_size, new_size);
+               else if (old_size > new_size)
+                       _do_cds_lfht_shrink(ht, old_size, new_size);
+               ht->t.resize_initiated = 0;
+               /* write resize_initiated before read resize_target */
+               cmm_smp_mb();
+       } while (ht->t.size != CMM_LOAD_SHARED(ht->t.resize_target));
+}
+
+static
+unsigned long resize_target_update(struct cds_lfht *ht, unsigned long size,
+                                  int growth_order)
+{
+       return _uatomic_max(&ht->t.resize_target,
+                           size << growth_order);
+}
+
+static
+void resize_target_update_count(struct cds_lfht *ht,
+                               unsigned long count)
+{
+       count = max(count, ht->min_alloc_size);
+       uatomic_set(&ht->t.resize_target, count);
+}
+
+void cds_lfht_resize(struct cds_lfht *ht, unsigned long new_size)
+{
+       resize_target_update_count(ht, new_size);
+       CMM_STORE_SHARED(ht->t.resize_initiated, 1);
+       ht->cds_lfht_rcu_thread_offline();
+       pthread_mutex_lock(&ht->resize_mutex);
+       _do_cds_lfht_resize(ht);
+       pthread_mutex_unlock(&ht->resize_mutex);
+       ht->cds_lfht_rcu_thread_online();
+}
+
+static
+void do_resize_cb(struct rcu_head *head)
+{
+       struct rcu_resize_work *work =
+               caa_container_of(head, struct rcu_resize_work, head);
+       struct cds_lfht *ht = work->ht;
+
+       ht->cds_lfht_rcu_thread_offline();
+       pthread_mutex_lock(&ht->resize_mutex);
+       _do_cds_lfht_resize(ht);
+       pthread_mutex_unlock(&ht->resize_mutex);
+       ht->cds_lfht_rcu_thread_online();
+       poison_free(work);
+       cmm_smp_mb();   /* finish resize before decrement */
+       uatomic_dec(&ht->in_progress_resize);
+}
+
+static
+void cds_lfht_resize_lazy(struct cds_lfht *ht, unsigned long size, int growth)
+{
+       struct rcu_resize_work *work;
+       unsigned long target_size;
+
+       target_size = resize_target_update(ht, size, growth);
+       /* Store resize_target before read resize_initiated */
+       cmm_smp_mb();
+       if (!CMM_LOAD_SHARED(ht->t.resize_initiated) && size < target_size) {
+               uatomic_inc(&ht->in_progress_resize);
+               cmm_smp_mb();   /* increment resize count before load destroy */
+               if (CMM_LOAD_SHARED(ht->in_progress_destroy)) {
+                       uatomic_dec(&ht->in_progress_resize);
+                       return;
+               }
+               work = malloc(sizeof(*work));
+               work->ht = ht;
+               ht->cds_lfht_call_rcu(&work->head, do_resize_cb);
+               CMM_STORE_SHARED(ht->t.resize_initiated, 1);
+       }
+}
+
+static
+void cds_lfht_resize_lazy_count(struct cds_lfht *ht, unsigned long size,
+                               unsigned long count)
+{
+       struct rcu_resize_work *work;
+
+       if (!(ht->flags & CDS_LFHT_AUTO_RESIZE))
+               return;
+       resize_target_update_count(ht, count);
+       /* Store resize_target before read resize_initiated */
+       cmm_smp_mb();
+       if (!CMM_LOAD_SHARED(ht->t.resize_initiated)) {
+               uatomic_inc(&ht->in_progress_resize);
+               cmm_smp_mb();   /* increment resize count before load destroy */
+               if (CMM_LOAD_SHARED(ht->in_progress_destroy)) {
+                       uatomic_dec(&ht->in_progress_resize);
+                       return;
+               }
+               work = malloc(sizeof(*work));
+               work->ht = ht;
+               ht->cds_lfht_call_rcu(&work->head, do_resize_cb);
+               CMM_STORE_SHARED(ht->t.resize_initiated, 1);
+       }
+}
index 44fac55aa938d712d3a91b9c5143d73c2da14d2f..05a8024633c033323405c6c0ac3641c7712f8ef7 100644 (file)
@@ -15,7 +15,7 @@ noinst_PROGRAMS = test_urcu test_urcu_dynamic_link test_urcu_timing \
         test_urcu_bp test_urcu_bp_dynamic_link test_cycles_per_loop \
        test_urcu_lfq test_urcu_wfq test_urcu_lfs test_urcu_wfs \
        test_urcu_wfq_dynlink test_urcu_wfs_dynlink \
-       test_urcu_lfq_dynlink test_urcu_lfs_dynlink
+       test_urcu_lfq_dynlink test_urcu_lfs_dynlink test_urcu_hash
 noinst_HEADERS = rcutorture.h
 
 if COMPAT_ARCH
@@ -175,6 +175,10 @@ test_urcu_wfs_dynlink_SOURCES = test_urcu_wfs.c
 test_urcu_wfs_dynlink_CFLAGS = -DDYNAMIC_LINK_TEST $(AM_CFLAGS)
 test_urcu_wfs_dynlink_LDADD = $(URCU_COMMON_LIB)
 
+test_urcu_hash_SOURCES = test_urcu_hash.c $(COMPAT)
+test_urcu_hash_CFLAGS = -DRCU_QSBR $(AM_CFLAGS)
+test_urcu_hash_LDADD = $(URCU_QSBR_LIB) $(URCU_CDS_LIB)
+
 urcutorture.c: api.h
 
 check-am:
diff --git a/tests/test_urcu_hash.c b/tests/test_urcu_hash.c
new file mode 100644 (file)
index 0000000..8f850ac
--- /dev/null
@@ -0,0 +1,1019 @@
+/*
+ * test_urcu_hash.c
+ *
+ * Userspace RCU library - test program
+ *
+ * Copyright February 2009 - Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#define _GNU_SOURCE
+#include <stdio.h>
+#include <pthread.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <assert.h>
+#include <sched.h>
+#include <errno.h>
+
+#ifdef __linux__
+#include <syscall.h>
+#endif
+
+#define DEFAULT_HASH_SIZE      32
+#define DEFAULT_MIN_ALLOC_SIZE 1
+#define DEFAULT_RAND_POOL      1000000
+
+/* Make this big enough to include the POWER5+ L3 cacheline size of 256B */
+#define CACHE_LINE_SIZE 4096
+
+/* hardcoded number of CPUs */
+#define NR_CPUS 16384
+
+#ifdef POISON_FREE
+#define poison_free(ptr)                               \
+       do {                                            \
+               memset(ptr, 0x42, sizeof(*(ptr)));      \
+               free(ptr);                              \
+       } while (0)
+#else
+#define poison_free(ptr)       free(ptr)
+#endif
+
+
+
+#if defined(_syscall0)
+_syscall0(pid_t, gettid)
+#elif defined(__NR_gettid)
+static inline pid_t gettid(void)
+{
+       return syscall(__NR_gettid);
+}
+#else
+#warning "use pid as tid"
+static inline pid_t gettid(void)
+{
+       return getpid();
+}
+#endif
+
+#ifndef DYNAMIC_LINK_TEST
+#define _LGPL_SOURCE
+#else
+#define debug_yield_read()
+#endif
+#include <urcu-qsbr.h>
+#include <urcu/rculfhash.h>
+#include <urcu-call-rcu.h>
+
+struct wr_count {
+       unsigned long update_ops;
+       unsigned long add;
+       unsigned long add_exist;
+       unsigned long remove;
+};
+
+static unsigned int __thread rand_lookup;
+static unsigned long __thread nr_add;
+static unsigned long __thread nr_addexist;
+static unsigned long __thread nr_del;
+static unsigned long __thread nr_delnoent;
+static unsigned long __thread lookup_fail;
+static unsigned long __thread lookup_ok;
+
+static struct cds_lfht *test_ht;
+
+struct test_data {
+       int a;
+       int b;
+};
+
+static volatile int test_go, test_stop;
+
+static unsigned long wdelay;
+
+static unsigned long duration;
+
+/* read-side C.S. duration, in loops */
+static unsigned long rduration;
+
+static unsigned long init_hash_size = DEFAULT_HASH_SIZE;
+static unsigned long min_hash_alloc_size = DEFAULT_MIN_ALLOC_SIZE;
+static unsigned long init_populate;
+static int opt_auto_resize;
+static int add_only, add_unique, add_replace;
+
+static unsigned long init_pool_offset, lookup_pool_offset, write_pool_offset;
+static unsigned long init_pool_size = DEFAULT_RAND_POOL,
+       lookup_pool_size = DEFAULT_RAND_POOL,
+       write_pool_size = DEFAULT_RAND_POOL;
+static int validate_lookup;
+
+static int count_pipe[2];
+
+static inline void loop_sleep(unsigned long l)
+{
+       while(l-- != 0)
+               caa_cpu_relax();
+}
+
+static int verbose_mode;
+
+#define printf_verbose(fmt, args...)           \
+       do {                                    \
+               if (verbose_mode)               \
+                       printf(fmt, ## args);   \
+       } while (0)
+
+static unsigned int cpu_affinities[NR_CPUS];
+static unsigned int next_aff = 0;
+static int use_affinity = 0;
+
+pthread_mutex_t affinity_mutex = PTHREAD_MUTEX_INITIALIZER;
+
+static void set_affinity(void)
+{
+       cpu_set_t mask;
+       int cpu;
+       int ret;
+
+       if (!use_affinity)
+               return;
+
+       ret = pthread_mutex_lock(&affinity_mutex);
+       if (ret) {
+               perror("Error in pthread mutex lock");
+               exit(-1);
+       }
+       cpu = cpu_affinities[next_aff++];
+       ret = pthread_mutex_unlock(&affinity_mutex);
+       if (ret) {
+               perror("Error in pthread mutex unlock");
+               exit(-1);
+       }
+       CPU_ZERO(&mask);
+       CPU_SET(cpu, &mask);
+       sched_setaffinity(0, sizeof(mask), &mask);
+}
+
+static enum {
+       AR_RANDOM = 0,
+       AR_ADD = 1,
+       AR_REMOVE = -1,
+} addremove;   /* 1: add, -1 remove, 0: random */
+
+static
+void sigusr1_handler(int signo)
+{
+       switch (addremove) {
+       case AR_ADD:
+               printf("Add/Remove: random.\n");
+               addremove = AR_RANDOM;
+               break;
+       case AR_RANDOM:
+               printf("Add/Remove: remove only.\n");
+               addremove = AR_REMOVE;
+               break;
+       case AR_REMOVE:
+               printf("Add/Remove: add only.\n");
+               addremove = AR_ADD;
+               break;
+       }
+}
+
+static
+void sigusr2_handler(int signo)
+{
+       char msg[1] = { 0x42 };
+       ssize_t ret;
+
+       do {
+               ret = write(count_pipe[1], msg, 1);     /* wakeup thread */
+       } while (ret == -1L && errno == EINTR);
+}
+
+/*
+ * returns 0 if test should end.
+ */
+static int test_duration_write(void)
+{
+       return !test_stop;
+}
+
+static int test_duration_read(void)
+{
+       return !test_stop;
+}
+
+static unsigned long long __thread nr_writes;
+static unsigned long long __thread nr_reads;
+
+static unsigned int nr_readers;
+static unsigned int nr_writers;
+
+pthread_mutex_t rcu_copy_mutex = PTHREAD_MUTEX_INITIALIZER;
+
+void rcu_copy_mutex_lock(void)
+{
+       int ret;
+       ret = pthread_mutex_lock(&rcu_copy_mutex);
+       if (ret) {
+               perror("Error in pthread mutex lock");
+               exit(-1);
+       }
+}
+
+void rcu_copy_mutex_unlock(void)
+{
+       int ret;
+
+       ret = pthread_mutex_unlock(&rcu_copy_mutex);
+       if (ret) {
+               perror("Error in pthread mutex unlock");
+               exit(-1);
+       }
+}
+
+/*
+ * Hash function
+ * Source: http://burtleburtle.net/bob/c/lookup3.c
+ * Originally Public Domain
+ */
+
+#define rot(x, k) (((x) << (k)) | ((x) >> (32 - (k))))
+
+#define mix(a, b, c) \
+do { \
+       a -= c; a ^= rot(c,  4); c += b; \
+       b -= a; b ^= rot(a,  6); a += c; \
+       c -= b; c ^= rot(b,  8); b += a; \
+       a -= c; a ^= rot(c, 16); c += b; \
+       b -= a; b ^= rot(a, 19); a += c; \
+       c -= b; c ^= rot(b,  4); b += a; \
+} while (0)
+
+#define final(a, b, c) \
+{ \
+       c ^= b; c -= rot(b, 14); \
+       a ^= c; a -= rot(c, 11); \
+       b ^= a; b -= rot(a, 25); \
+       c ^= b; c -= rot(b, 16); \
+       a ^= c; a -= rot(c,  4);\
+       b ^= a; b -= rot(a, 14); \
+       c ^= b; c -= rot(b, 24); \
+}
+
+static __attribute__((unused))
+uint32_t hash_u32(
+       const uint32_t *k,      /* the key, an array of uint32_t values */
+       size_t length,          /* the length of the key, in uint32_ts */
+       uint32_t initval)       /* the previous hash, or an arbitrary value */
+{
+       uint32_t a, b, c;
+
+       /* Set up the internal state */
+       a = b = c = 0xdeadbeef + (((uint32_t) length) << 2) + initval;
+
+       /*----------------------------------------- handle most of the key */
+       while (length > 3) {
+               a += k[0];
+               b += k[1];
+               c += k[2];
+               mix(a, b, c);
+               length -= 3;
+               k += 3;
+       }
+
+       /*----------------------------------- handle the last 3 uint32_t's */
+       switch (length) {       /* all the case statements fall through */
+       case 3: c += k[2];
+       case 2: b += k[1];
+       case 1: a += k[0];
+               final(a, b, c);
+       case 0:                 /* case 0: nothing left to add */
+               break;
+       }
+       /*---------------------------------------------- report the result */
+       return c;
+}
+
+static
+void hashword2(
+       const uint32_t *k,      /* the key, an array of uint32_t values */
+       size_t length,          /* the length of the key, in uint32_ts */
+       uint32_t *pc,           /* IN: seed OUT: primary hash value */
+       uint32_t *pb)           /* IN: more seed OUT: secondary hash value */
+{
+       uint32_t a, b, c;
+
+       /* Set up the internal state */
+       a = b = c = 0xdeadbeef + ((uint32_t) (length << 2)) + *pc;
+       c += *pb;
+
+       /*----------------------------------------- handle most of the key */
+       while (length > 3) {
+               a += k[0];
+               b += k[1];
+               c += k[2];
+               mix(a, b, c);
+               length -= 3;
+               k += 3;
+       }
+
+       /*----------------------------------- handle the last 3 uint32_t's */
+       switch (length) {       /* all the case statements fall through */
+       case 3: c += k[2];
+       case 2: b += k[1];
+       case 1: a += k[0];
+               final(a, b, c);
+       case 0:                 /* case 0: nothing left to add */
+               break;
+       }
+       /*---------------------------------------------- report the result */
+       *pc = c;
+       *pb = b;
+}
+
+#if (CAA_BITS_PER_LONG == 32)
+static
+unsigned long test_hash(void *_key, size_t length, unsigned long seed)
+{
+       unsigned int key = (unsigned int) _key;
+
+       assert(length == sizeof(unsigned int));
+       return hash_u32(&key, 1, seed);
+}
+#else
+static
+unsigned long test_hash(void *_key, size_t length, unsigned long seed)
+{
+       union {
+               uint64_t v64;
+               uint32_t v32[2];
+       } v;
+       union {
+               uint64_t v64;
+               uint32_t v32[2];
+       } key;
+
+       assert(length == sizeof(unsigned long));
+       v.v64 = (uint64_t) seed;
+       key.v64 = (uint64_t) _key;
+       hashword2(key.v32, 2, &v.v32[0], &v.v32[1]);
+       return v.v64;
+}
+#endif
+
+static
+unsigned long test_compare(void *key1, size_t key1_len,
+                           void *key2, size_t key2_len)
+{
+       if (unlikely(key1_len != key2_len))
+               return -1;
+       assert(key1_len == sizeof(unsigned long));
+       if (key1 == key2)
+               return 0;
+       else
+               return 1;
+}
+
+void *thr_count(void *arg)
+{
+       printf_verbose("thread_begin %s, thread id : %lx, tid %lu\n",
+                       "counter", pthread_self(), (unsigned long)gettid());
+
+       rcu_register_thread();
+
+       for (;;) {
+               unsigned long count, removed;
+               long approx_before, approx_after;
+               ssize_t len;
+               char buf[1];
+
+               rcu_thread_offline();
+               len = read(count_pipe[0], buf, 1);
+               rcu_thread_online();
+               if (unlikely(!test_duration_read()))
+                       break;
+               if (len != 1)
+                       continue;
+               /* Accounting */
+               printf("Counting nodes... ");
+               fflush(stdout);
+               rcu_read_lock();
+               cds_lfht_count_nodes(test_ht, &approx_before, &count, &removed,
+                               &approx_after);
+               rcu_read_unlock();
+               printf("done.\n");
+               printf("Approximation before node accounting: %ld nodes.\n",
+                       approx_before);
+               printf("Accounting of nodes in the hash table: "
+                       "%lu nodes + %lu logically removed.\n",
+                       count, removed);
+               printf("Approximation after node accounting: %ld nodes.\n",
+                       approx_after);
+       }
+       rcu_unregister_thread();
+       return NULL;
+}
+
+void *thr_reader(void *_count)
+{
+       unsigned long long *count = _count;
+       struct cds_lfht_node *node;
+       struct cds_lfht_iter iter;
+
+       printf_verbose("thread_begin %s, thread id : %lx, tid %lu\n",
+                       "reader", pthread_self(), (unsigned long)gettid());
+
+       set_affinity();
+
+       rcu_register_thread();
+
+       while (!test_go)
+       {
+       }
+       cmm_smp_mb();
+
+       for (;;) {
+               rcu_read_lock();
+               cds_lfht_lookup(test_ht,
+                       (void *)(((unsigned long) rand_r(&rand_lookup) % lookup_pool_size) + lookup_pool_offset),
+                       sizeof(void *), &iter);
+               node = cds_lfht_iter_get_node(&iter);
+               if (node == NULL) {
+                       if (validate_lookup) {
+                               printf("[ERROR] Lookup cannot find initial node.\n");
+                               exit(-1);
+                       }
+                       lookup_fail++;
+               } else {
+                       lookup_ok++;
+               }
+               debug_yield_read();
+               if (unlikely(rduration))
+                       loop_sleep(rduration);
+               rcu_read_unlock();
+               nr_reads++;
+               if (unlikely(!test_duration_read()))
+                       break;
+               if (unlikely((nr_reads & ((1 << 10) - 1)) == 0))
+                       rcu_quiescent_state();
+       }
+
+       rcu_unregister_thread();
+
+       *count = nr_reads;
+       printf_verbose("thread_end %s, thread id : %lx, tid %lu\n",
+                       "reader", pthread_self(), (unsigned long)gettid());
+       printf_verbose("readid : %lx, lookupfail %lu, lookupok %lu\n",
+                       pthread_self(), lookup_fail, lookup_ok);
+       return ((void*)1);
+
+}
+
+static
+void free_node_cb(struct rcu_head *head)
+{
+       struct cds_lfht_node *node =
+               caa_container_of(head, struct cds_lfht_node, head);
+       free(node);
+}
+
+void *thr_writer(void *_count)
+{
+       struct cds_lfht_node *node, *ret_node;
+       struct cds_lfht_iter iter;
+       struct wr_count *count = _count;
+       int ret;
+
+       printf_verbose("thread_begin %s, thread id : %lx, tid %lu\n",
+                       "writer", pthread_self(), (unsigned long)gettid());
+
+       set_affinity();
+
+       rcu_register_thread();
+
+       while (!test_go)
+       {
+       }
+       cmm_smp_mb();
+
+       for (;;) {
+               if ((addremove == AR_ADD || add_only)
+                               || (addremove == AR_RANDOM && rand_r(&rand_lookup) & 1)) {
+                       node = malloc(sizeof(struct cds_lfht_node));
+                       cds_lfht_node_init(node,
+                               (void *)(((unsigned long) rand_r(&rand_lookup) % write_pool_size) + write_pool_offset),
+                               sizeof(void *));
+                       rcu_read_lock();
+                       if (add_unique) {
+                               ret_node = cds_lfht_add_unique(test_ht, node);
+                       } else {
+                               if (add_replace)
+                                       ret_node = cds_lfht_add_replace(test_ht, node);
+                               else
+                                       cds_lfht_add(test_ht, node);
+                       }
+                       rcu_read_unlock();
+                       if (add_unique && ret_node != node) {
+                               free(node);
+                               nr_addexist++;
+                       } else {
+                               if (add_replace && ret_node) {
+                                       call_rcu(&ret_node->head, free_node_cb);
+                                       nr_addexist++;
+                               } else {
+                                       nr_add++;
+                               }
+                       }
+               } else {
+                       /* May delete */
+                       rcu_read_lock();
+                       cds_lfht_lookup(test_ht,
+                               (void *)(((unsigned long) rand_r(&rand_lookup) % write_pool_size) + write_pool_offset),
+                               sizeof(void *), &iter);
+                       ret = cds_lfht_del(test_ht, &iter);
+                       rcu_read_unlock();
+                       if (ret == 0) {
+                               node = cds_lfht_iter_get_node(&iter);
+                               call_rcu(&node->head, free_node_cb);
+                               nr_del++;
+                       } else
+                               nr_delnoent++;
+               }
+#if 0
+               //if (nr_writes % 100000 == 0) {
+               if (nr_writes % 1000 == 0) {
+                       rcu_read_lock();
+                       if (rand_r(&rand_lookup) & 1) {
+                               ht_resize(test_ht, 1);
+                       } else {
+                               ht_resize(test_ht, -1);
+                       }
+                       rcu_read_unlock();
+               }
+#endif //0
+               nr_writes++;
+               if (unlikely(!test_duration_write()))
+                       break;
+               if (unlikely(wdelay))
+                       loop_sleep(wdelay);
+               if (unlikely((nr_writes & ((1 << 10) - 1)) == 0))
+                       rcu_quiescent_state();
+       }
+
+       rcu_unregister_thread();
+
+       printf_verbose("thread_end %s, thread id : %lx, tid %lu\n",
+                       "writer", pthread_self(), (unsigned long)gettid());
+       printf_verbose("info id %lx: nr_add %lu, nr_addexist %lu, nr_del %lu, "
+                       "nr_delnoent %lu\n", pthread_self(), nr_add,
+                       nr_addexist, nr_del, nr_delnoent);
+       count->update_ops = nr_writes;
+       count->add = nr_add;
+       count->add_exist = nr_addexist;
+       count->remove = nr_del;
+       return ((void*)2);
+}
+
+static int populate_hash(void)
+{
+       struct cds_lfht_node *node, *ret_node;
+
+       if (!init_populate)
+               return 0;
+
+       if ((add_unique || add_replace) && init_populate * 10 > init_pool_size) {
+               printf("WARNING: required to populate %lu nodes (-k), but random "
+"pool is quite small (%lu values) and we are in add_unique (-u) or add_replace (-s) mode. Try with a "
+"larger random pool (-p option). This may take a while...\n", init_populate, init_pool_size);
+       }
+
+       while (nr_add < init_populate) {
+               node = malloc(sizeof(struct cds_lfht_node));
+               cds_lfht_node_init(node,
+                       (void *)(((unsigned long) rand_r(&rand_lookup) % init_pool_size) + init_pool_offset),
+                       sizeof(void *));
+               rcu_read_lock();
+               if (add_unique) {
+                       ret_node = cds_lfht_add_unique(test_ht, node);
+               } else {
+                       if (add_replace)
+                               ret_node = cds_lfht_add_replace(test_ht, node);
+                       else
+                               cds_lfht_add(test_ht, node);
+               }
+               rcu_read_unlock();
+               if (add_unique && ret_node != node) {
+                       free(node);
+                       nr_addexist++;
+               } else {
+                       if (add_replace && ret_node) {
+                               call_rcu(&ret_node->head, free_node_cb);
+                               nr_addexist++;
+                       } else {
+                               nr_add++;
+                       }
+               }
+               nr_writes++;
+       }
+       return 0;
+}
+
+static
+void test_delete_all_nodes(struct cds_lfht *ht)
+{
+       struct cds_lfht_iter iter;
+       struct cds_lfht_node *node;
+       unsigned long count = 0;
+
+       cds_lfht_first(ht, &iter);
+       while ((node = cds_lfht_iter_get_node(&iter)) != NULL) {
+               int ret;
+
+               ret = cds_lfht_del(test_ht, &iter);
+               assert(!ret);
+               call_rcu(&node->head, free_node_cb);
+               cds_lfht_next(ht, &iter);
+               count++;
+       }
+       printf("deleted %lu nodes.\n", count);
+}
+
+void show_usage(int argc, char **argv)
+{
+       printf("Usage : %s nr_readers nr_writers duration (s)\n", argv[0]);
+#ifdef DEBUG_YIELD
+       printf("        [-r] [-w] (yield reader and/or writer)\n");
+#endif
+       printf("        [-d delay] (writer period (us))\n");
+       printf("        [-c duration] (reader C.S. duration (in loops))\n");
+       printf("        [-v] (verbose output)\n");
+       printf("        [-a cpu#] [-a cpu#]... (affinity)\n");
+       printf("        [-h size] (initial hash table size)\n");
+       printf("        [-m size] (minimum hash alloc size)\n");
+       printf("        [not -u nor -s] Add entries (supports redundant keys).\n");
+       printf("        [-u] Uniquify add (no redundant keys).\n");
+       printf("        [-s] Replace (swap) entries.\n");
+       printf("        [-i] Add only (no removal).\n");
+       printf("        [-k nr_nodes] Number of nodes to insert initially.\n");
+       printf("        [-A] Automatically resize hash table.\n");
+       printf("        [-R offset] Lookup pool offset.\n");
+       printf("        [-S offset] Write pool offset.\n");
+       printf("        [-T offset] Init pool offset.\n");
+       printf("        [-M size] Lookup pool size.\n");
+       printf("        [-N size] Write pool size.\n");
+       printf("        [-O size] Init pool size.\n");
+       printf("        [-V] Validate lookups of init values (use with filled init pool, same lookup range, with different write range).\n");
+       printf("\n\n");
+}
+
+int main(int argc, char **argv)
+{
+       int err;
+       pthread_t *tid_reader, *tid_writer;
+       pthread_t tid_count;
+       void *tret;
+       unsigned long long *count_reader;
+       struct wr_count *count_writer;
+       unsigned long long tot_reads = 0, tot_writes = 0,
+               tot_add = 0, tot_add_exist = 0, tot_remove = 0;
+       unsigned long count, removed;
+       long approx_before, approx_after;
+       int i, a, ret;
+       struct sigaction act;
+       unsigned int remain;
+
+       if (argc < 4) {
+               show_usage(argc, argv);
+               return -1;
+       }
+
+       err = sscanf(argv[1], "%u", &nr_readers);
+       if (err != 1) {
+               show_usage(argc, argv);
+               return -1;
+       }
+
+       err = sscanf(argv[2], "%u", &nr_writers);
+       if (err != 1) {
+               show_usage(argc, argv);
+               return -1;
+       }
+       
+       err = sscanf(argv[3], "%lu", &duration);
+       if (err != 1) {
+               show_usage(argc, argv);
+               return -1;
+       }
+
+       for (i = 4; i < argc; i++) {
+               if (argv[i][0] != '-')
+                       continue;
+               switch (argv[i][1]) {
+#ifdef DEBUG_YIELD
+               case 'r':
+                       yield_active |= YIELD_READ;
+                       break;
+               case 'w':
+                       yield_active |= YIELD_WRITE;
+                       break;
+#endif
+               case 'a':
+                       if (argc < i + 2) {
+                               show_usage(argc, argv);
+                               return -1;
+                       }
+                       a = atoi(argv[++i]);
+                       cpu_affinities[next_aff++] = a;
+                       use_affinity = 1;
+                       printf_verbose("Adding CPU %d affinity\n", a);
+                       break;
+               case 'c':
+                       if (argc < i + 2) {
+                               show_usage(argc, argv);
+                               return -1;
+                       }
+                       rduration = atol(argv[++i]);
+                       break;
+               case 'd':
+                       if (argc < i + 2) {
+                               show_usage(argc, argv);
+                               return -1;
+                       }
+                       wdelay = atol(argv[++i]);
+                       break;
+               case 'v':
+                       verbose_mode = 1;
+                       break;
+               case 'h':
+                       if (argc < i + 2) {
+                               show_usage(argc, argv);
+                               return -1;
+                       }
+                       init_hash_size = atol(argv[++i]);
+                       break;
+               case 'm':
+                       if (argc < i + 2) {
+                               show_usage(argc, argv);
+                               return -1;
+                       }
+                       min_hash_alloc_size = atol(argv[++i]);
+                       break;
+               case 'u':
+                       if (add_replace) {
+                               printf("Please specify at most one of -s or -u.\n");
+                               exit(-1);
+                       }
+                       add_unique = 1;
+                       break;
+               case 's':
+                       if (add_unique) {
+                               printf("Please specify at most one of -s or -u.\n");
+                               exit(-1);
+                       }
+                       add_replace = 1;
+                       break;
+               case 'i':
+                       add_only = 1;
+                       break;
+               case 'k':
+                       init_populate = atol(argv[++i]);
+                       break;
+               case 'A':
+                       opt_auto_resize = 1;
+                       break;
+               case 'R':
+                       lookup_pool_offset = atol(argv[++i]);
+                       break;
+               case 'S':
+                       write_pool_offset = atol(argv[++i]);
+                       break;
+               case 'T':
+                       init_pool_offset = atol(argv[++i]);
+                       break;
+               case 'M':
+                       lookup_pool_size = atol(argv[++i]);
+                       break;
+               case 'N':
+                       write_pool_size = atol(argv[++i]);
+                       break;
+               case 'O':
+                       init_pool_size = atol(argv[++i]);
+                       break;
+               case 'V':
+                       validate_lookup = 1;
+                       break;
+
+               }
+       }
+
+       /* Check if hash size is power of 2 */
+       if (init_hash_size && init_hash_size & (init_hash_size - 1)) {
+               printf("Error: Hash table size %lu is not a power of 2.\n",
+                       init_hash_size);
+               return -1;
+       }
+
+       if (min_hash_alloc_size && min_hash_alloc_size * (min_hash_alloc_size - 1)) {
+               printf("Error: Min hash alloc size %lu is not a power of 2.\n",
+                       min_hash_alloc_size);
+               return -1;
+       }
+
+       memset(&act, 0, sizeof(act));
+       ret = sigemptyset(&act.sa_mask);
+       if (ret == -1) {
+               perror("sigemptyset");
+               return -1;
+       }
+       act.sa_handler = sigusr1_handler;
+       act.sa_flags = SA_RESTART;
+       ret = sigaction(SIGUSR1, &act, NULL);
+       if (ret == -1) {
+               perror("sigaction");
+               return -1;
+       }
+
+       ret = pipe(count_pipe);
+       if (ret == -1) {
+               perror("pipe");
+               return -1;
+       }
+
+       /* spawn counter thread */
+       err = pthread_create(&tid_count, NULL, thr_count,
+                            NULL);
+       if (err != 0)
+               exit(1);
+
+       act.sa_handler = sigusr2_handler;
+       act.sa_flags = SA_RESTART;
+       ret = sigaction(SIGUSR2, &act, NULL);
+       if (ret == -1) {
+               perror("sigaction");
+               return -1;
+       }
+
+       printf_verbose("running test for %lu seconds, %u readers, %u writers.\n",
+               duration, nr_readers, nr_writers);
+       printf_verbose("Writer delay : %lu loops.\n", wdelay);
+       printf_verbose("Reader duration : %lu loops.\n", rduration);
+       printf_verbose("Mode:%s%s.\n",
+               add_only ? " add only" : " add/remove",
+               add_unique ? " uniquify" : ( add_replace ? " replace" : " insert"));
+       printf_verbose("Initial hash table size: %lu buckets.\n", init_hash_size);
+       printf_verbose("Minimum hash alloc size: %lu buckets.\n", min_hash_alloc_size);
+       printf_verbose("Init pool size offset %lu size %lu.\n",
+               init_pool_offset, init_pool_size);
+       printf_verbose("Lookup pool size offset %lu size %lu.\n",
+               lookup_pool_offset, lookup_pool_size);
+       printf_verbose("Update pool size offset %lu size %lu.\n",
+               write_pool_offset, write_pool_size);
+       printf_verbose("thread %-6s, thread id : %lx, tid %lu\n",
+                       "main", pthread_self(), (unsigned long)gettid());
+
+       tid_reader = malloc(sizeof(*tid_reader) * nr_readers);
+       tid_writer = malloc(sizeof(*tid_writer) * nr_writers);
+       count_reader = malloc(sizeof(*count_reader) * nr_readers);
+       count_writer = malloc(sizeof(*count_writer) * nr_writers);
+
+       err = create_all_cpu_call_rcu_data(0);
+        assert(!err);
+
+       /*
+        * Hash creation and population needs to be seen as a RCU reader
+        * thread from the point of view of resize.
+        */
+       rcu_register_thread();
+       test_ht = cds_lfht_new(test_hash, test_compare, 0x42UL,
+                       init_hash_size, min_hash_alloc_size,
+                       (opt_auto_resize ? CDS_LFHT_AUTO_RESIZE : 0) |
+                       CDS_LFHT_ACCOUNTING, NULL);
+       ret = populate_hash();
+       assert(!ret);
+
+       rcu_thread_offline();
+
+       next_aff = 0;
+
+       for (i = 0; i < nr_readers; i++) {
+               err = pthread_create(&tid_reader[i], NULL, thr_reader,
+                                    &count_reader[i]);
+               if (err != 0)
+                       exit(1);
+       }
+       for (i = 0; i < nr_writers; i++) {
+               err = pthread_create(&tid_writer[i], NULL, thr_writer,
+                                    &count_writer[i]);
+               if (err != 0)
+                       exit(1);
+       }
+
+       cmm_smp_mb();
+
+       test_go = 1;
+
+       remain = duration;
+       do {
+               remain = sleep(remain);
+       } while (remain > 0);
+
+       test_stop = 1;
+
+       for (i = 0; i < nr_readers; i++) {
+               err = pthread_join(tid_reader[i], &tret);
+               if (err != 0)
+                       exit(1);
+               tot_reads += count_reader[i];
+       }
+       for (i = 0; i < nr_writers; i++) {
+               err = pthread_join(tid_writer[i], &tret);
+               if (err != 0)
+                       exit(1);
+               tot_writes += count_writer[i].update_ops;
+               tot_add += count_writer[i].add;
+               tot_add_exist += count_writer[i].add_exist;
+               tot_remove += count_writer[i].remove;
+       }
+
+       /* teardown counter thread */
+       act.sa_handler = SIG_IGN;
+       act.sa_flags = SA_RESTART;
+       ret = sigaction(SIGUSR2, &act, NULL);
+       if (ret == -1) {
+               perror("sigaction");
+               return -1;
+       }
+       {
+               char msg[1] = { 0x42 };
+               ssize_t ret;
+
+               do {
+                       ret = write(count_pipe[1], msg, 1);     /* wakeup thread */
+               } while (ret == -1L && errno == EINTR);
+       }
+       err = pthread_join(tid_count, &tret);
+       if (err != 0)
+               exit(1);
+
+       fflush(stdout);
+       rcu_thread_online();
+       rcu_read_lock();
+       printf("Counting nodes... ");
+       cds_lfht_count_nodes(test_ht, &approx_before, &count, &removed,
+               &approx_after);
+       printf("done.\n");
+       test_delete_all_nodes(test_ht);
+       rcu_read_unlock();
+       rcu_thread_offline();
+       if (count || removed) {
+               printf("Approximation before node accounting: %ld nodes.\n",
+                       approx_before);
+               printf("Nodes deleted from hash table before destroy: "
+                       "%lu nodes + %lu logically removed.\n",
+                       count, removed);
+               printf("Approximation after node accounting: %ld nodes.\n",
+                       approx_after);
+       }
+       ret = cds_lfht_destroy(test_ht, NULL);
+       if (ret)
+               printf_verbose("final delete aborted\n");
+       else
+               printf_verbose("final delete success\n");
+       printf_verbose("total number of reads : %llu, writes %llu\n", tot_reads,
+              tot_writes);
+       printf("SUMMARY %-25s testdur %4lu nr_readers %3u rdur %6lu "
+               "nr_writers %3u "
+               "wdelay %6lu nr_reads %12llu nr_writes %12llu nr_ops %12llu "
+               "nr_add %12llu nr_add_fail %12llu nr_remove %12llu nr_leaked %12lld\n",
+               argv[0], duration, nr_readers, rduration,
+               nr_writers, wdelay, tot_reads, tot_writes,
+               tot_reads + tot_writes, tot_add, tot_add_exist, tot_remove,
+               (long long) tot_add + init_populate - tot_remove - count);
+       rcu_unregister_thread();
+       free_all_cpu_call_rcu_data();
+       free(tid_reader);
+       free(tid_writer);
+       free(count_reader);
+       free(count_writer);
+       return 0;
+}
index a59a87a72df2178aa02f83d9a2a0b0d18e80a97c..553029578a210383df955fb384d6102ad807d51e 100644 (file)
@@ -349,7 +349,11 @@ void rcu_unregister_thread(void)
 
 void rcu_exit(void)
 {
-       assert(cds_list_empty(&registry));
+       /*
+        * Assertion disabled because call_rcu threads are now rcu
+        * readers, and left running at exit.
+        * assert(cds_list_empty(&registry));
+        */
 }
 
 #include "urcu-call-rcu-impl.h"
index f37a63a43abad56fc2d6811722a535c2a2e9a7f3..ea74cf1c8f0407e21fd791a04d7a8f5ccd170759 100644 (file)
@@ -29,6 +29,7 @@
 #include <urcu/rculist.h>
 #include <urcu/rculfqueue.h>
 #include <urcu/rculfstack.h>
+#include <urcu/rculfhash.h>
 #include <urcu/wfqueue.h>
 #include <urcu/wfstack.h>
 
diff --git a/urcu/jhash.h b/urcu/jhash.h
new file mode 100644 (file)
index 0000000..def03b8
--- /dev/null
@@ -0,0 +1,89 @@
+#ifndef _KCOMPAT_JHASH_H
+#define _KCOMPAT_JHASH_H
+
+/* jhash.h: Jenkins hash support.
+ *
+ * Copyright (C) 1996 Bob Jenkins (bob_jenkins@burtleburtle.net)
+ *
+ * http://burtleburtle.net/bob/hash/
+ *
+ * These are the credits from Bob's sources:
+ *
+ * lookup2.c, by Bob Jenkins, December 1996, Public Domain.
+ * hash(), hash2(), hash3, and mix() are externally useful functions.
+ * Routines to test the hash are included if SELF_TEST is defined.
+ * You can use this free for any purpose.  It has no warranty.
+ *
+ * Copyright (C) 2003 David S. Miller (davem@redhat.com)
+ *
+ * I've modified Bob's hash to be useful in the Linux kernel, and
+ * any bugs present are surely my fault.  -DaveM
+ */
+
+#include <stdint.h>
+
+typedef uint8_t u8;
+typedef uint32_t u32;
+
+/* NOTE: Arguments are modified. */
+#define __jhash_mix(a, b, c) \
+{ \
+  a -= b; a -= c; a ^= (c>>13); \
+  b -= c; b -= a; b ^= (a<<8); \
+  c -= a; c -= b; c ^= (b>>13); \
+  a -= b; a -= c; a ^= (c>>12);  \
+  b -= c; b -= a; b ^= (a<<16); \
+  c -= a; c -= b; c ^= (b>>5); \
+  a -= b; a -= c; a ^= (c>>3);  \
+  b -= c; b -= a; b ^= (a<<10); \
+  c -= a; c -= b; c ^= (b>>15); \
+}
+
+/* The golden ration: an arbitrary value */
+#define JHASH_GOLDEN_RATIO     0x9e3779b9
+
+/* The most generic version, hashes an arbitrary sequence
+ * of bytes.  No alignment or length assumptions are made about
+ * the input key.
+ */
+static inline u32 jhash(const void *key, u32 length, u32 initval)
+{
+       u32 a, b, c, len;
+       const u8 *k = key;
+
+       len = length;
+       a = b = JHASH_GOLDEN_RATIO;
+       c = initval;
+
+       while (len >= 12) {
+               a += (k[0] +((u32)k[1]<<8) +((u32)k[2]<<16) +((u32)k[3]<<24));
+               b += (k[4] +((u32)k[5]<<8) +((u32)k[6]<<16) +((u32)k[7]<<24));
+               c += (k[8] +((u32)k[9]<<8) +((u32)k[10]<<16)+((u32)k[11]<<24));
+
+               __jhash_mix(a,b,c);
+
+               k += 12;
+               len -= 12;
+       }
+
+       c += length;
+       switch (len) {
+       case 11: c += ((u32)k[10]<<24);
+       case 10: c += ((u32)k[9]<<16);
+       case 9 : c += ((u32)k[8]<<8);
+       case 8 : b += ((u32)k[7]<<24);
+       case 7 : b += ((u32)k[6]<<16);
+       case 6 : b += ((u32)k[5]<<8);
+       case 5 : b += k[4];
+       case 4 : a += ((u32)k[3]<<24);
+       case 3 : a += ((u32)k[2]<<16);
+       case 2 : a += ((u32)k[1]<<8);
+       case 1 : a += k[0];
+       };
+
+       __jhash_mix(a,b,c);
+
+       return c;
+}
+
+#endif /* _KCOMPAT_JHASH_H */
diff --git a/urcu/rculfhash.h b/urcu/rculfhash.h
new file mode 100644 (file)
index 0000000..008b1d4
--- /dev/null
@@ -0,0 +1,352 @@
+#ifndef _URCU_RCULFHASH_H
+#define _URCU_RCULFHASH_H
+
+/*
+ * urcu/rculfhash.h
+ *
+ * Userspace RCU library - Lock-Free RCU Hash Table
+ *
+ * Copyright 2011 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * Include this file _after_ including your URCU flavor.
+ */
+
+#include <stdint.h>
+#include <urcu-call-rcu.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * struct cds_lfht_node and struct _cds_lfht_node should be aligned on
+ * 4-bytes boundaries because the two lower bits are used as flags.
+ */
+
+/*
+ * _cds_lfht_node: Contains the internal pointers and reverse-hash
+ * value required for lookup and traversal of the hash table.
+ */
+struct _cds_lfht_node {
+       struct cds_lfht_node *next;     /* ptr | DUMMY_FLAG | REMOVED_FLAG */
+       unsigned long reverse_hash;
+} __attribute__((aligned(4)));
+
+/*
+ * cds_lfht_node: Contains the full key and length required to check for
+ * an actual match, and also contains an rcu_head structure that is used
+ * by RCU to track a node through a given RCU grace period.  There is an
+ * instance of _cds_lfht_node enclosed as a field within each
+ * _cds_lfht_node structure.
+ *
+ * struct cds_lfht_node can be embedded into a structure (as a field).
+ * caa_container_of() can be used to get the structure from the struct
+ * cds_lfht_node after a lookup.
+ */
+struct cds_lfht_node {
+       /* cache-hot for iteration */
+       struct _cds_lfht_node p;          /* needs to be first field */
+       void *key;
+       unsigned int key_len;
+       /* cache-cold for iteration */
+       struct rcu_head head;
+};
+
+/* cds_lfht_iter: Used to track state while traversing a hash chain. */
+struct cds_lfht_iter {
+       struct cds_lfht_node *node, *next;
+};
+
+static inline
+struct cds_lfht_node *cds_lfht_iter_get_node(struct cds_lfht_iter *iter)
+{
+       return iter->node;
+}
+
+struct cds_lfht;
+
+/*
+ * Caution !
+ * Ensure reader and writer threads are registered as urcu readers.
+ */
+
+typedef unsigned long (*cds_lfht_hash_fct)(void *key, size_t length,
+                                       unsigned long seed);
+typedef unsigned long (*cds_lfht_compare_fct)(void *key1, size_t key1_len,
+                                       void *key2, size_t key2_len);
+
+/*
+ * cds_lfht_node_init - initialize a hash table node
+ */
+static inline
+void cds_lfht_node_init(struct cds_lfht_node *node, void *key,
+                       size_t key_len)
+{
+       node->key = key;
+       node->key_len = key_len;
+}
+
+/*
+ * Hash table creation flags.
+ */
+enum {
+       CDS_LFHT_AUTO_RESIZE = (1U << 0),
+       CDS_LFHT_ACCOUNTING = (1U << 1),
+};
+
+/*
+ * _cds_lfht_new - API used by cds_lfht_new wrapper. Do not use directly.
+ */
+struct cds_lfht *_cds_lfht_new(cds_lfht_hash_fct hash_fct,
+                       cds_lfht_compare_fct compare_fct,
+                       unsigned long hash_seed,
+                       unsigned long init_size,
+                       unsigned long min_alloc_size,
+                       int flags,
+                       void (*cds_lfht_call_rcu)(struct rcu_head *head,
+                               void (*func)(struct rcu_head *head)),
+                       void (*cds_lfht_synchronize_rcu)(void),
+                       void (*cds_lfht_rcu_read_lock)(void),
+                       void (*cds_lfht_rcu_read_unlock)(void),
+                       void (*cds_lfht_rcu_thread_offline)(void),
+                       void (*cds_lfht_rcu_thread_online)(void),
+                       void (*cds_lfht_rcu_register_thread)(void),
+                       void (*cds_lfht_rcu_unregister_thread)(void),
+                       pthread_attr_t *attr);
+
+/*
+ * cds_lfht_new - allocate a hash table.
+ * @hash_fct: the hashing function.
+ * @compare_fct: the key comparison function.
+ * @hash_seed: the seed for hash function.
+ * @init_size: number of nodes to allocate initially. Must be power of two.
+ * @min_alloc_size: the smallest allocation size to use. Must be power of two.
+ * @flags: hash table creation flags (can be combined with bitwise or: '|').
+ *           0: no flags.
+ *           CDS_LFHT_AUTO_RESIZE: automatically resize hash table.
+ * @attr: optional resize worker thread attributes. NULL for default.
+ *
+ * Return NULL on error.
+ * Note: the RCU flavor must be already included before the hash table header.
+ *
+ * The programmer is responsible for ensuring that resize operation has a
+ * priority equal to hash table updater threads. It should be performed by
+ * specifying the appropriate priority in the pthread "attr" argument, and,
+ * for CDS_LFHT_AUTO_RESIZE, by ensuring that call_rcu worker threads also have
+ * this priority level. Having lower priority for call_rcu and resize threads
+ * does not pose any correctness issue, but the resize operations could be
+ * starved by updates, thus leading to long hash table bucket chains.
+ * Threads calling this API need to be registered RCU read-side threads.
+ */
+static inline
+struct cds_lfht *cds_lfht_new(cds_lfht_hash_fct hash_fct,
+                       cds_lfht_compare_fct compare_fct,
+                       unsigned long hash_seed,
+                       unsigned long init_size,
+                       unsigned long min_alloc_size,
+                       int flags,
+                       pthread_attr_t *attr)
+{
+       return _cds_lfht_new(hash_fct, compare_fct, hash_seed,
+                       init_size, min_alloc_size, flags,
+                       call_rcu, synchronize_rcu, rcu_read_lock,
+                       rcu_read_unlock, rcu_thread_offline,
+                       rcu_thread_online, rcu_register_thread,
+                       rcu_unregister_thread, attr);
+}
+
+/*
+ * cds_lfht_destroy - destroy a hash table.
+ * @ht: the hash table to destroy.
+ * @attr: (output) resize worker thread attributes, as received by cds_lfht_new.
+ *        The caller will typically want to free this pointer if dynamically
+ *        allocated. The attr point can be NULL if the caller does not
+ *        need to be informed of the value passed to cds_lfht_new().
+ *
+ * Return 0 on success, negative error value on error.
+ * Threads calling this API need to be registered RCU read-side threads.
+ */
+int cds_lfht_destroy(struct cds_lfht *ht, pthread_attr_t **attr);
+
+/*
+ * cds_lfht_count_nodes - count the number of nodes in the hash table.
+ * @ht: the hash table.
+ * @split_count_before: Sample the node count split-counter before traversal.
+ * @count: Traverse the hash table, count the number of nodes observed.
+ * @removed: Number of logically removed nodes observed during traversal.
+ * @split_count_after: Sample the node count split-counter after traversal.
+ * Call with rcu_read_lock held.
+ * Threads calling this API need to be registered RCU read-side threads.
+ */
+void cds_lfht_count_nodes(struct cds_lfht *ht,
+               long *split_count_before,
+               unsigned long *count,
+               unsigned long *removed,
+               long *split_count_after);
+
+/*
+ * cds_lfht_lookup - lookup a node by key.
+ *
+ * Output in "*iter". *iter->node set to NULL if not found.
+ * Call with rcu_read_lock held.
+ * Threads calling this API need to be registered RCU read-side threads.
+ */
+void cds_lfht_lookup(struct cds_lfht *ht, void *key, size_t key_len,
+               struct cds_lfht_iter *iter);
+
+/*
+ * cds_lfht_next_duplicate - get the next item with same key (after a lookup).
+ *
+ * Uses an iterator initialized by a lookup.
+ * Sets *iter-node to the following node with same key.
+ * Sets *iter->node to NULL if no following node exists with same key.
+ * RCU read-side lock must be held across cds_lfht_lookup and
+ * cds_lfht_next calls, and also between cds_lfht_next calls using the
+ * node returned by a previous cds_lfht_next.
+ * Call with rcu_read_lock held.
+ * Threads calling this API need to be registered RCU read-side threads.
+ */
+void cds_lfht_next_duplicate(struct cds_lfht *ht, struct cds_lfht_iter *iter);
+
+/*
+ * cds_lfht_first - get the first node in the table.
+ *
+ * Output in "*iter". *iter->node set to NULL if table is empty.
+ * Call with rcu_read_lock held.
+ * Threads calling this API need to be registered RCU read-side threads.
+ */
+void cds_lfht_first(struct cds_lfht *ht, struct cds_lfht_iter *iter);
+
+/*
+ * cds_lfht_next - get the next node in the table.
+ *
+ * Input/Output in "*iter". *iter->node set to NULL if *iter was
+ * pointing to the last table node.
+ * Call with rcu_read_lock held.
+ * Threads calling this API need to be registered RCU read-side threads.
+ */
+void cds_lfht_next(struct cds_lfht *ht, struct cds_lfht_iter *iter);
+
+/*
+ * cds_lfht_add - add a node to the hash table.
+ *
+ * This function supports adding redundant keys into the table.
+ * Call with rcu_read_lock held.
+ * Threads calling this API need to be registered RCU read-side threads.
+ */
+void cds_lfht_add(struct cds_lfht *ht, struct cds_lfht_node *node);
+
+/*
+ * cds_lfht_add_unique - add a node to hash table, if key is not present.
+ *
+ * Return the node added upon success.
+ * Return the unique node already present upon failure. If
+ * cds_lfht_add_unique fails, the node passed as parameter should be
+ * freed by the caller.
+ * Call with rcu_read_lock held.
+ * Threads calling this API need to be registered RCU read-side threads.
+ *
+ * The semantic of this function is that if only this function is used
+ * to add keys into the table, no duplicated keys should ever be
+ * observable in the table. The same guarantee apply for combination of
+ * add_unique and add_replace (see below).
+ */
+struct cds_lfht_node *cds_lfht_add_unique(struct cds_lfht *ht,
+               struct cds_lfht_node *node);
+
+/*
+ * cds_lfht_add_replace - replace or add a node within hash table.
+ *
+ * Return the node replaced upon success. If no node matching the key
+ * was present, return NULL, which also means the operation succeeded.
+ * This replacement operation should never fail.
+ * Call with rcu_read_lock held.
+ * Threads calling this API need to be registered RCU read-side threads.
+ * After successful replacement, a grace period must be waited for before
+ * freeing the memory reserved for the returned node.
+ *
+ * The semantic of replacement vs lookups is the following: if lookups
+ * are performed between a key unique insertion and its removal, we
+ * guarantee that the lookups and get next will always find exactly one
+ * instance of the key if it is replaced concurrently with the lookups.
+ *
+ * Providing this semantic allows us to ensure that replacement-only
+ * schemes will never generate duplicated keys. It also allows us to
+ * guarantee that a combination of add_replace and add_unique updates
+ * will never generate duplicated keys.
+ */
+struct cds_lfht_node *cds_lfht_add_replace(struct cds_lfht *ht,
+               struct cds_lfht_node *node);
+
+/*
+ * cds_lfht_replace - replace a node pointer to by iter within hash table.
+ *
+ * Return 0 if replacement is successful, negative value otherwise.
+ * Replacing a NULL old node or an already removed node will fail with a
+ * negative value.
+ * Old node can be looked up with cds_lfht_lookup and cds_lfht_next.
+ * RCU read-side lock must be held between lookup and replacement.
+ * Call with rcu_read_lock held.
+ * Threads calling this API need to be registered RCU read-side threads.
+ * After successful replacement, a grace period must be waited for before
+ * freeing the memory reserved for the old node (which can be accessed
+ * with cds_lfht_iter_get_node).
+ *
+ * The semantic of replacement vs lookups is the following: if lookups
+ * are performed between a key unique insertion and its removal, we
+ * guarantee that the lookups and get next will always find exactly one
+ * instance of the key if it is replaced concurrently with the lookups.
+ *
+ * Providing this semantic allows us to ensure that replacement-only
+ * schemes will never generate duplicated keys. It also allows us to
+ * guarantee that a combination of add_replace and add_unique updates
+ * will never generate duplicated keys.
+ */
+int cds_lfht_replace(struct cds_lfht *ht, struct cds_lfht_iter *old_iter,
+               struct cds_lfht_node *new_node);
+
+/*
+ * cds_lfht_del - remove node pointed to by iterator from hash table.
+ *
+ * Return 0 if the node is successfully removed, negative value
+ * otherwise.
+ * Replacing a NULL node or an already removed node will fail with a
+ * negative value.
+ * Node can be looked up with cds_lfht_lookup and cds_lfht_next.
+ * cds_lfht_iter_get_node.
+ * RCU read-side lock must be held between lookup and removal.
+ * Call with rcu_read_lock held.
+ * Threads calling this API need to be registered RCU read-side threads.
+ * After successful removal, a grace period must be waited for before
+ * freeing the memory reserved for old node (which can be accessed with
+ * cds_lfht_iter_get_node).
+ */
+int cds_lfht_del(struct cds_lfht *ht, struct cds_lfht_iter *iter);
+
+/*
+ * cds_lfht_resize - Force a hash table resize
+ * @new_size: update to this hash table size.
+ *
+ * Threads calling this API need to be registered RCU read-side threads.
+ */
+void cds_lfht_resize(struct cds_lfht *ht, unsigned long new_size);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _URCU_RCULFHASH_H */
This page took 0.100824 seconds and 4 git commands to generate.