/*
* rculfhash.c
*
- * Userspace RCU library - Lock-Free Expandable RCU Hash Table
+ * Userspace RCU library - Lock-Free Resizable RCU Hash Table
*
* Copyright 2010-2011 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
*
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
+/*
+ * Based on the following articles:
+ * - Ori Shalev and Nir Shavit. Split-ordered lists: Lock-free
+ * extensible hash tables. J. ACM 53, 3 (May 2006), 379-405.
+ * - Michael, M. M. High performance dynamic lock-free hash tables
+ * and list-based sets. In Proceedings of the fourteenth annual ACM
+ * symposium on Parallel algorithms and architectures, ACM Press,
+ * (2002), 73-82.
+ *
+ * Some specificities of this Lock-Free Resizable RCU Hash Table
+ * implementation:
+ *
+ * - RCU read-side critical section allows readers to perform hash
+ * table lookups and use the returned objects safely by delaying
+ * memory reclaim of a grace period.
+ * - Add and remove operations are lock-free, and do not need to
+ * allocate memory. They need to be executed within RCU read-side
+ * critical section to ensure the objects they read are valid and to
+ * deal with the cmpxchg ABA problem.
+ * - add and add_unique operations are supported. add_unique checks if
+ * the node key already exists in the hash table. It ensures no key
+ * duplicata exists.
+ * - The resize operation executes concurrently with add/remove/lookup.
+ * - Hash table nodes are contained within a split-ordered list. This
+ * list is ordered by incrementing reversed-bits-hash value.
+ * - An index of dummy nodes is kept. These dummy nodes are the hash
+ * table "buckets", and they are also chained together in the
+ * split-ordered list, which allows recursive expansion.
+ * - The resize operation for small tables only allows expanding the hash table.
+ * It is triggered automatically by detecting long chains in the add
+ * operation.
+ * - The resize operation for larger tables (and available through an
+ * API) allows both expanding and shrinking the hash table.
+ * - Per-CPU Split-counters are used to keep track of the number of
+ * nodes within the hash table for automatic resize triggering.
+ * - Resize operation initiated by long chain detection is executed by a
+ * call_rcu thread, which keeps lock-freedom of add and remove.
+ * - Resize operations are protected by a mutex.
+ * - The removal operation is split in two parts: first, a "removed"
+ * flag is set in the next pointer within the node to remove. Then,
+ * a "garbage collection" is performed in the bucket containing the
+ * removed node (from the start of the bucket up to the removed node).
+ * All encountered nodes with "removed" flag set in their next
+ * pointers are removed from the linked-list. If the cmpxchg used for
+ * removal fails (due to concurrent garbage-collection or concurrent
+ * add), we retry from the beginning of the bucket. This ensures that
+ * the node with "removed" flag set is removed from the hash table
+ * (not visible to lookups anymore) before the RCU read-side critical
+ * section held across removal ends. Furthermore, this ensures that
+ * the node with "removed" flag set is removed from the linked-list
+ * before its memory is reclaimed. Only the thread which removal
+ * successfully set the "removed" flag (with a cmpxchg) into a node's
+ * next pointer is considered to have succeeded its removal (and thus
+ * owns the node to reclaim). Because we garbage-collect starting from
+ * an invariant node (the start-of-bucket dummy node) up to the
+ * "removed" node (or find a reverse-hash that is higher), we are sure
+ * that a successful traversal of the chain leads to a chain that is
+ * present in the linked-list (the start node is never removed) and
+ * that is does not contain the "removed" node anymore, even if
+ * concurrent delete/add operations are changing the structure of the
+ * list concurrently.
+ * - The add operation performs gargage collection of buckets if it
+ * encounters nodes with removed flag set in the bucket where it wants
+ * to add its new node. This ensures lock-freedom of add operation by
+ * helping the remover unlink nodes from the list rather than to wait
+ * for it do to so.
+ * - A RCU "order table" indexed by log2(hash index) is copied and
+ * expanded by the resize operation. This order table allows finding
+ * the "dummy node" tables.
+ * - There is one dummy node table per hash index order. The size of
+ * each dummy node table is half the number of hashes contained in
+ * this order.
+ * - call_rcu is used to garbage-collect the old order table.
+ * - The per-order dummy node tables contain a compact version of the
+ * hash table nodes. These tables are invariant after they are
+ * populated into the hash table.
+ *
+ * A bit of ascii art explanation:
+ *
+ * Order index is the off-by-one compare to the actual power of 2 because
+ * we use index 0 to deal with the 0 special-case.
+ *
+ * This shows the nodes for a small table ordered by reversed bits:
+ *
+ * bits reverse
+ * 0 000 000
+ * 4 100 001
+ * 2 010 010
+ * 6 110 011
+ * 1 001 100
+ * 5 101 101
+ * 3 011 110
+ * 7 111 111
+ *
+ * This shows the nodes in order of non-reversed bits, linked by
+ * reversed-bit order.
+ *
+ * order bits reverse
+ * 0 0 000 000
+ * |
+ * 1 | 1 001 100 <-
+ * | | |
+ * 2 | | 2 010 010 |
+ * | | | 3 011 110 <- |
+ * | | | | | |
+ * 3 -> | | | 4 100 001 | |
+ * -> | | 5 101 101 |
+ * -> | 6 110 011
+ * -> 7 111 111
+ */
+
#define _LGPL_SOURCE
#include <stdlib.h>
#include <errno.h>
#include <stdint.h>
#include <string.h>
+#include "config.h"
#include <urcu.h>
#include <urcu-call-rcu.h>
#include <urcu/arch.h>
#include <stdio.h>
#include <pthread.h>
-//#define DEBUG /* Test */
-
#ifdef DEBUG
-#define dbg_printf(args...) printf(args)
+#define dbg_printf(fmt, args...) printf("[debug rculfhash] " fmt, ## args)
#else
-#define dbg_printf(args...)
+#define dbg_printf(fmt, args...)
#endif
-#define CHAIN_LEN_TARGET 4
-#define CHAIN_LEN_RESIZE_THRESHOLD 8
+/*
+ * Per-CPU split-counters lazily update the global counter each 1024
+ * addition/removal. It automatically keeps track of resize required.
+ * We use the bucket length as indicator for need to expand for small
+ * tables and machines lacking per-cpu data suppport.
+ */
+#define COUNT_COMMIT_ORDER 10
+#define CHAIN_LEN_TARGET 1
+#define CHAIN_LEN_RESIZE_THRESHOLD 3
#ifndef max
#define max(a, b) ((a) > (b) ? (a) : (b))
#define DUMMY_FLAG (1UL << 1)
#define FLAGS_MASK ((1UL << 2) - 1)
+struct ht_items_count {
+ unsigned long add, remove;
+} __attribute__((aligned(CAA_CACHE_LINE_SIZE)));
+
+struct rcu_level {
+ struct rcu_head head;
+ struct _cds_lfht_node nodes[0];
+};
+
struct rcu_table {
unsigned long size; /* always a power of 2 */
unsigned long resize_target;
int resize_initiated;
struct rcu_head head;
- struct _rcu_ht_node *tbl[0];
+ struct rcu_level *tbl[0];
};
-struct rcu_ht {
+struct cds_lfht {
struct rcu_table *t; /* shared */
- ht_hash_fct hash_fct;
- ht_compare_fct compare_fct;
+ cds_lfht_hash_fct hash_fct;
+ cds_lfht_compare_fct compare_fct;
unsigned long hash_seed;
pthread_mutex_t resize_mutex; /* resize mutex: add/del mutex */
- unsigned int in_progress_resize;
- void (*ht_call_rcu)(struct rcu_head *head,
+ unsigned int in_progress_resize, in_progress_destroy;
+ void (*cds_lfht_call_rcu)(struct rcu_head *head,
void (*func)(struct rcu_head *head));
+ void (*cds_lfht_synchronize_rcu)(void);
+ unsigned long count; /* global approximate item count */
+ struct ht_items_count *percpu_count; /* per-cpu item count */
};
struct rcu_resize_work {
struct rcu_head head;
- struct rcu_ht *ht;
+ struct cds_lfht *ht;
};
/*
}
static
-void ht_resize_lazy(struct rcu_ht *ht, struct rcu_table *t, int growth);
+void cds_lfht_resize_lazy(struct cds_lfht *ht, struct rcu_table *t, int growth);
+
+/*
+ * If the sched_getcpu() and sysconf(_SC_NPROCESSORS_CONF) calls are
+ * available, then we support hash table item accounting.
+ * In the unfortunate event the number of CPUs reported would be
+ * inaccurate, we use modulo arithmetic on the number of CPUs we got.
+ */
+#if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF)
+
+static
+void cds_lfht_resize_lazy_count(struct cds_lfht *ht, struct rcu_table *t,
+ unsigned long count);
+
+static long nr_cpus_mask = -1;
+
+static
+struct ht_items_count *alloc_per_cpu_items_count(void)
+{
+ struct ht_items_count *count;
+
+ switch (nr_cpus_mask) {
+ case -2:
+ return NULL;
+ case -1:
+ {
+ long maxcpus;
+
+ maxcpus = sysconf(_SC_NPROCESSORS_CONF);
+ if (maxcpus <= 0) {
+ nr_cpus_mask = -2;
+ return NULL;
+ }
+ /*
+ * round up number of CPUs to next power of two, so we
+ * can use & for modulo.
+ */
+ maxcpus = 1UL << get_count_order_ulong(maxcpus);
+ nr_cpus_mask = maxcpus - 1;
+ }
+ /* Fall-through */
+ default:
+ return calloc(nr_cpus_mask + 1, sizeof(*count));
+ }
+}
+
+static
+void free_per_cpu_items_count(struct ht_items_count *count)
+{
+ free(count);
+}
+
+static
+int ht_get_cpu(void)
+{
+ int cpu;
+
+ assert(nr_cpus_mask >= 0);
+ cpu = sched_getcpu();
+ if (unlikely(cpu < 0))
+ return cpu;
+ else
+ return cpu & nr_cpus_mask;
+}
+
+static
+void ht_count_add(struct cds_lfht *ht, struct rcu_table *t)
+{
+ unsigned long percpu_count;
+ int cpu;
+
+ if (unlikely(!ht->percpu_count))
+ return;
+ cpu = ht_get_cpu();
+ if (unlikely(cpu < 0))
+ return;
+ percpu_count = uatomic_add_return(&ht->percpu_count[cpu].add, 1);
+ if (unlikely(!(percpu_count & ((1UL << COUNT_COMMIT_ORDER) - 1)))) {
+ unsigned long count;
+
+ dbg_printf("add percpu %lu\n", percpu_count);
+ count = uatomic_add_return(&ht->count,
+ 1UL << COUNT_COMMIT_ORDER);
+ /* If power of 2 */
+ if (!(count & (count - 1))) {
+ if ((count >> CHAIN_LEN_RESIZE_THRESHOLD)
+ < t->size)
+ return;
+ dbg_printf("add set global %lu\n", count);
+ cds_lfht_resize_lazy_count(ht, t,
+ count >> (CHAIN_LEN_TARGET - 1));
+ }
+ }
+}
+
+static
+void ht_count_remove(struct cds_lfht *ht, struct rcu_table *t)
+{
+ unsigned long percpu_count;
+ int cpu;
+
+ if (unlikely(!ht->percpu_count))
+ return;
+ cpu = ht_get_cpu();
+ if (unlikely(cpu < 0))
+ return;
+ percpu_count = uatomic_add_return(&ht->percpu_count[cpu].remove, -1);
+ if (unlikely(!(percpu_count & ((1UL << COUNT_COMMIT_ORDER) - 1)))) {
+ unsigned long count;
+
+ dbg_printf("remove percpu %lu\n", percpu_count);
+ count = uatomic_add_return(&ht->count,
+ -(1UL << COUNT_COMMIT_ORDER));
+ /* If power of 2 */
+ if (!(count & (count - 1))) {
+ if ((count >> CHAIN_LEN_RESIZE_THRESHOLD)
+ >= t->size)
+ return;
+ dbg_printf("remove set global %lu\n", count);
+ cds_lfht_resize_lazy_count(ht, t,
+ count >> (CHAIN_LEN_TARGET - 1));
+ }
+ }
+}
+
+#else /* #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF) */
+
+static const long nr_cpus_mask = -1;
+
+static
+struct ht_items_count *alloc_per_cpu_items_count(void)
+{
+ return NULL;
+}
+
+static
+void free_per_cpu_items_count(struct ht_items_count *count)
+{
+}
+
+static
+void ht_count_add(struct cds_lfht *ht, struct rcu_table *t)
+{
+}
static
-void check_resize(struct rcu_ht *ht, struct rcu_table *t,
+void ht_count_remove(struct cds_lfht *ht, struct rcu_table *t)
+{
+}
+
+#endif /* #else #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF) */
+
+
+static
+void check_resize(struct cds_lfht *ht, struct rcu_table *t,
uint32_t chain_len)
{
+ unsigned long count;
+
+ count = uatomic_read(&ht->count);
+ /*
+ * Use bucket-local length for small table expand and for
+ * environments lacking per-cpu data support.
+ */
+ if (count >= (1UL << COUNT_COMMIT_ORDER))
+ return;
if (chain_len > 100)
- dbg_printf("rculfhash: WARNING: large chain length: %u.\n",
+ dbg_printf("WARNING: large chain length: %u.\n",
chain_len);
if (chain_len >= CHAIN_LEN_RESIZE_THRESHOLD)
- ht_resize_lazy(ht, t,
+ cds_lfht_resize_lazy(ht, t,
get_count_order_u32(chain_len - (CHAIN_LEN_TARGET - 1)));
}
static
-struct rcu_ht_node *clear_flag(struct rcu_ht_node *node)
+struct cds_lfht_node *clear_flag(struct cds_lfht_node *node)
{
- return (struct rcu_ht_node *) (((unsigned long) node) & ~FLAGS_MASK);
+ return (struct cds_lfht_node *) (((unsigned long) node) & ~FLAGS_MASK);
}
static
-int is_removed(struct rcu_ht_node *node)
+int is_removed(struct cds_lfht_node *node)
{
return ((unsigned long) node) & REMOVED_FLAG;
}
static
-struct rcu_ht_node *flag_removed(struct rcu_ht_node *node)
+struct cds_lfht_node *flag_removed(struct cds_lfht_node *node)
{
- return (struct rcu_ht_node *) (((unsigned long) node) | REMOVED_FLAG);
+ return (struct cds_lfht_node *) (((unsigned long) node) | REMOVED_FLAG);
}
static
-int is_dummy(struct rcu_ht_node *node)
+int is_dummy(struct cds_lfht_node *node)
{
return ((unsigned long) node) & DUMMY_FLAG;
}
static
-struct rcu_ht_node *flag_dummy(struct rcu_ht_node *node)
+struct cds_lfht_node *flag_dummy(struct cds_lfht_node *node)
{
- return (struct rcu_ht_node *) (((unsigned long) node) | DUMMY_FLAG);
+ return (struct cds_lfht_node *) (((unsigned long) node) | DUMMY_FLAG);
}
static
return v;
}
+static
+void cds_lfht_free_table_cb(struct rcu_head *head)
+{
+ struct rcu_table *t =
+ caa_container_of(head, struct rcu_table, head);
+ free(t);
+}
+
+static
+void cds_lfht_free_level(struct rcu_head *head)
+{
+ struct rcu_level *l =
+ caa_container_of(head, struct rcu_level, head);
+ free(l);
+}
+
/*
* Remove all logically deleted nodes from a bucket up to a certain node key.
*/
static
-void _ht_gc_bucket(struct rcu_ht_node *dummy, struct rcu_ht_node *node)
+void _cds_lfht_gc_bucket(struct cds_lfht_node *dummy, struct cds_lfht_node *node)
{
- struct rcu_ht_node *iter_prev, *iter, *next, *new_next;
+ struct cds_lfht_node *iter_prev, *iter, *next, *new_next;
for (;;) {
iter_prev = dummy;
for (;;) {
if (unlikely(!clear_flag(iter)))
return;
- if (clear_flag(iter)->p.reverse_hash > node->p.reverse_hash)
+ if (likely(clear_flag(iter)->p.reverse_hash > node->p.reverse_hash))
return;
next = rcu_dereference(clear_flag(iter)->p.next);
- if (is_removed(next))
+ if (likely(is_removed(next)))
break;
iter_prev = clear_flag(iter);
iter = next;
}
static
-struct rcu_ht_node *_ht_add(struct rcu_ht *ht, struct rcu_table *t,
- struct rcu_ht_node *node, int unique, int dummy)
+struct cds_lfht_node *_cds_lfht_add(struct cds_lfht *ht, struct rcu_table *t,
+ struct cds_lfht_node *node, int unique, int dummy)
{
- struct rcu_ht_node *iter_prev, *iter, *next, *new_node, *new_next,
+ struct cds_lfht_node *iter_prev, *iter, *next, *new_node, *new_next,
*dummy_node;
- struct _rcu_ht_node *lookup;
+ struct _cds_lfht_node *lookup;
unsigned long hash, index, order;
if (!t->size) {
*/
index = hash & (t->size - 1);
order = get_count_order_ulong(index + 1);
- lookup = &t->tbl[order][index & ((1UL << (order - 1)) - 1)];
- iter_prev = (struct rcu_ht_node *) lookup;
+ lookup = &t->tbl[order]->nodes[index & ((!order ? 0 : (1UL << (order - 1))) - 1)];
+ iter_prev = (struct cds_lfht_node *) lookup;
/* We can always skip the dummy node initially */
iter = rcu_dereference(iter_prev->p.next);
assert(iter_prev->p.reverse_hash <= node->p.reverse_hash);
for (;;) {
if (unlikely(!clear_flag(iter)))
goto insert;
- if (clear_flag(iter)->p.reverse_hash > node->p.reverse_hash)
+ if (likely(clear_flag(iter)->p.reverse_hash > node->p.reverse_hash))
goto insert;
next = rcu_dereference(clear_flag(iter)->p.next);
- if (is_removed(next))
+ if (unlikely(is_removed(next)))
goto gc_node;
if (unique
&& !is_dummy(next)
/* Garbage collect logically removed nodes in the bucket */
index = hash & (t->size - 1);
order = get_count_order_ulong(index + 1);
- lookup = &t->tbl[order][index & ((1UL << (order - 1)) - 1)];
- dummy_node = (struct rcu_ht_node *) lookup;
- _ht_gc_bucket(dummy_node, node);
+ lookup = &t->tbl[order]->nodes[index & (!order ? 0 : ((1UL << (order - 1)) - 1))];
+ dummy_node = (struct cds_lfht_node *) lookup;
+ _cds_lfht_gc_bucket(dummy_node, node);
return node;
}
static
-int _ht_remove(struct rcu_ht *ht, struct rcu_table *t, struct rcu_ht_node *node)
+int _cds_lfht_remove(struct cds_lfht *ht, struct rcu_table *t,
+ struct cds_lfht_node *node, int dummy_removal)
{
- struct rcu_ht_node *dummy, *next, *old;
- struct _rcu_ht_node *lookup;
+ struct cds_lfht_node *dummy, *next, *old;
+ struct _cds_lfht_node *lookup;
int flagged = 0;
unsigned long hash, index, order;
old = rcu_dereference(node->p.next);
do {
next = old;
- if (is_removed(next))
+ if (unlikely(is_removed(next)))
goto end;
- assert(!is_dummy(next));
+ if (dummy_removal)
+ assert(is_dummy(next));
+ else
+ assert(!is_dummy(next));
old = uatomic_cmpxchg(&node->p.next, next,
flag_removed(next));
} while (old != next);
/* We performed the (logical) deletion. */
flagged = 1;
+ if (dummy_removal)
+ node = clear_flag(node);
+
/*
* Ensure that the node is not visible to readers anymore: lookup for
* the node, and remove it (along with any other logically removed node)
* if found.
*/
hash = bit_reverse_ulong(node->p.reverse_hash);
+ assert(t->size > 0);
index = hash & (t->size - 1);
order = get_count_order_ulong(index + 1);
- lookup = &t->tbl[order][index & ((1UL << (order - 1)) - 1)];
- dummy = (struct rcu_ht_node *) lookup;
- _ht_gc_bucket(dummy, node);
+ lookup = &t->tbl[order]->nodes[index & (!order ? 0 : ((1UL << (order - 1)) - 1))];
+ dummy = (struct cds_lfht_node *) lookup;
+ _cds_lfht_gc_bucket(dummy, node);
end:
/*
* Only the flagging action indicated that we (and no other)
}
static
-void init_table(struct rcu_ht *ht, struct rcu_table *t,
+void init_table(struct cds_lfht *ht, struct rcu_table *t,
unsigned long first_order, unsigned long len_order)
{
unsigned long i, end_order;
- dbg_printf("rculfhash: init table: first_order %lu end_order %lu\n",
+ dbg_printf("init table: first_order %lu end_order %lu\n",
first_order, first_order + len_order);
end_order = first_order + len_order;
t->size = !first_order ? 0 : (1UL << (first_order - 1));
unsigned long j, len;
len = !i ? 1 : 1UL << (i - 1);
- dbg_printf("rculfhash: init order %lu len: %lu\n", i, len);
- t->tbl[i] = calloc(len, sizeof(struct _rcu_ht_node));
+ dbg_printf("init order %lu len: %lu\n", i, len);
+ t->tbl[i] = calloc(1, sizeof(struct rcu_level)
+ + (len * sizeof(struct _cds_lfht_node)));
for (j = 0; j < len; j++) {
- dbg_printf("rculfhash: init entry: i %lu j %lu hash %lu\n",
+ struct cds_lfht_node *new_node =
+ (struct cds_lfht_node *) &t->tbl[i]->nodes[j];
+
+ dbg_printf("init entry: i %lu j %lu hash %lu\n",
i, j, !i ? 0 : (1UL << (i - 1)) + j);
- struct rcu_ht_node *new_node =
- (struct rcu_ht_node *) &t->tbl[i][j];
new_node->p.reverse_hash =
bit_reverse_ulong(!i ? 0 : (1UL << (i - 1)) + j);
- (void) _ht_add(ht, t, new_node, 0, 1);
+ (void) _cds_lfht_add(ht, t, new_node, 0, 1);
+ if (CMM_LOAD_SHARED(ht->in_progress_destroy))
+ break;
}
/* Update table size */
t->size = !i ? 1 : (1UL << i);
- dbg_printf("rculfhash: init new size: %lu\n", t->size);
+ dbg_printf("init new size: %lu\n", t->size);
+ if (CMM_LOAD_SHARED(ht->in_progress_destroy))
+ break;
+ }
+ t->resize_target = t->size;
+ t->resize_initiated = 0;
+}
+
+static
+void fini_table(struct cds_lfht *ht, struct rcu_table *t,
+ unsigned long first_order, unsigned long len_order)
+{
+ long i, end_order;
+
+ dbg_printf("fini table: first_order %lu end_order %lu\n",
+ first_order, first_order + len_order);
+ end_order = first_order + len_order;
+ assert(first_order > 0);
+ assert(t->size == (1UL << (end_order - 1)));
+ for (i = end_order - 1; i >= first_order; i--) {
+ unsigned long j, len;
+
+ len = !i ? 1 : 1UL << (i - 1);
+ dbg_printf("fini order %lu len: %lu\n", i, len);
+ /* Update table size */
+ t->size = 1UL << (i - 1);
+ /* Unlink */
+ for (j = 0; j < len; j++) {
+ struct cds_lfht_node *new_node =
+ (struct cds_lfht_node *) &t->tbl[i]->nodes[j];
+
+ dbg_printf("fini entry: i %lu j %lu hash %lu\n",
+ i, j, !i ? 0 : (1UL << (i - 1)) + j);
+ new_node->p.reverse_hash =
+ bit_reverse_ulong(!i ? 0 : (1UL << (i - 1)) + j);
+ (void) _cds_lfht_remove(ht, t, new_node, 1);
+ if (CMM_LOAD_SHARED(ht->in_progress_destroy))
+ break;
+ }
+ ht->cds_lfht_call_rcu(&t->tbl[i]->head, cds_lfht_free_level);
+ dbg_printf("fini new size: %lu\n", t->size);
+ if (CMM_LOAD_SHARED(ht->in_progress_destroy))
+ break;
}
t->resize_target = t->size;
t->resize_initiated = 0;
}
-struct rcu_ht *ht_new(ht_hash_fct hash_fct,
- ht_compare_fct compare_fct,
- unsigned long hash_seed,
- unsigned long init_size,
- void (*ht_call_rcu)(struct rcu_head *head,
- void (*func)(struct rcu_head *head)))
+struct cds_lfht *cds_lfht_new(cds_lfht_hash_fct hash_fct,
+ cds_lfht_compare_fct compare_fct,
+ unsigned long hash_seed,
+ unsigned long init_size,
+ void (*cds_lfht_call_rcu)(struct rcu_head *head,
+ void (*func)(struct rcu_head *head)),
+ void (*cds_lfht_synchronize_rcu)(void))
{
- struct rcu_ht *ht;
+ struct cds_lfht *ht;
unsigned long order;
- ht = calloc(1, sizeof(struct rcu_ht));
+ /* init_size must be power of two */
+ if (init_size && (init_size & (init_size - 1)))
+ return NULL;
+ ht = calloc(1, sizeof(struct cds_lfht));
ht->hash_fct = hash_fct;
ht->compare_fct = compare_fct;
ht->hash_seed = hash_seed;
- ht->ht_call_rcu = ht_call_rcu;
+ ht->cds_lfht_call_rcu = cds_lfht_call_rcu;
+ ht->cds_lfht_synchronize_rcu = cds_lfht_synchronize_rcu;
ht->in_progress_resize = 0;
+ ht->percpu_count = alloc_per_cpu_items_count();
/* this mutex should not nest in read-side C.S. */
pthread_mutex_init(&ht->resize_mutex, NULL);
order = get_count_order_ulong(max(init_size, 1)) + 1;
- ht->t = calloc(1, sizeof(struct rcu_table)
- + (order * sizeof(struct _rcu_ht_node *)));
+ ht->t = calloc(1, sizeof(struct cds_lfht)
+ + (order * sizeof(struct rcu_level *)));
ht->t->size = 0;
pthread_mutex_lock(&ht->resize_mutex);
init_table(ht, ht->t, 0, order);
return ht;
}
-struct rcu_ht_node *ht_lookup(struct rcu_ht *ht, void *key, size_t key_len)
+struct cds_lfht_node *cds_lfht_lookup(struct cds_lfht *ht, void *key, size_t key_len)
{
struct rcu_table *t;
- struct rcu_ht_node *node, *next;
- struct _rcu_ht_node *lookup;
+ struct cds_lfht_node *node, *next;
+ struct _cds_lfht_node *lookup;
unsigned long hash, reverse_hash, index, order;
hash = ht->hash_fct(key, key_len, ht->hash_seed);
t = rcu_dereference(ht->t);
index = hash & (t->size - 1);
order = get_count_order_ulong(index + 1);
- lookup = &t->tbl[order][index & ((1UL << (order - 1)) - 1)];
- dbg_printf("rculfhash: lookup hash %lu index %lu order %lu aridx %lu\n",
- hash, index, order, index & ((1UL << (order - 1)) - 1));
- node = (struct rcu_ht_node *) lookup;
+ lookup = &t->tbl[order]->nodes[index & (!order ? 0 : ((1UL << (order - 1))) - 1)];
+ dbg_printf("lookup hash %lu index %lu order %lu aridx %lu\n",
+ hash, index, order, index & (!order ? 0 : ((1UL << (order - 1)) - 1)));
+ node = (struct cds_lfht_node *) lookup;
for (;;) {
if (unlikely(!node))
break;
return node;
}
-void ht_add(struct rcu_ht *ht, struct rcu_ht_node *node)
+struct cds_lfht_node *cds_lfht_next(struct cds_lfht *ht,
+ struct cds_lfht_node *node)
+{
+ struct cds_lfht_node *next;
+ unsigned long reverse_hash;
+ void *key;
+ size_t key_len;
+
+ reverse_hash = node->p.reverse_hash;
+ key = node->key;
+ key_len = node->key_len;
+ next = rcu_dereference(node->p.next);
+ node = clear_flag(next);
+
+ for (;;) {
+ if (unlikely(!node))
+ break;
+ if (unlikely(node->p.reverse_hash > reverse_hash)) {
+ node = NULL;
+ break;
+ }
+ next = rcu_dereference(node->p.next);
+ if (likely(!is_removed(next))
+ && !is_dummy(next)
+ && likely(!ht->compare_fct(node->key, node->key_len, key, key_len))) {
+ break;
+ }
+ node = clear_flag(next);
+ }
+ assert(!node || !is_dummy(rcu_dereference(node->p.next)));
+ return node;
+}
+
+void cds_lfht_add(struct cds_lfht *ht, struct cds_lfht_node *node)
{
struct rcu_table *t;
unsigned long hash;
node->p.reverse_hash = bit_reverse_ulong((unsigned long) hash);
t = rcu_dereference(ht->t);
- (void) _ht_add(ht, t, node, 0, 0);
+ (void) _cds_lfht_add(ht, t, node, 0, 0);
+ ht_count_add(ht, t);
}
-struct rcu_ht_node *ht_add_unique(struct rcu_ht *ht, struct rcu_ht_node *node)
+struct cds_lfht_node *cds_lfht_add_unique(struct cds_lfht *ht,
+ struct cds_lfht_node *node)
{
struct rcu_table *t;
unsigned long hash;
+ struct cds_lfht_node *ret;
hash = ht->hash_fct(node->key, node->key_len, ht->hash_seed);
node->p.reverse_hash = bit_reverse_ulong((unsigned long) hash);
t = rcu_dereference(ht->t);
- return _ht_add(ht, t, node, 1, 0);
+ ret = _cds_lfht_add(ht, t, node, 1, 0);
+ if (ret != node)
+ ht_count_add(ht, t);
+ return ret;
}
-int ht_remove(struct rcu_ht *ht, struct rcu_ht_node *node)
+int cds_lfht_remove(struct cds_lfht *ht, struct cds_lfht_node *node)
{
struct rcu_table *t;
+ int ret;
t = rcu_dereference(ht->t);
- return _ht_remove(ht, t, node);
+ ret = _cds_lfht_remove(ht, t, node, 0);
+ if (!ret)
+ ht_count_remove(ht, t);
+ return ret;
}
static
-int ht_delete_dummy(struct rcu_ht *ht)
+int cds_lfht_delete_dummy(struct cds_lfht *ht)
{
struct rcu_table *t;
- struct rcu_ht_node *node;
- struct _rcu_ht_node *lookup;
+ struct cds_lfht_node *node;
+ struct _cds_lfht_node *lookup;
unsigned long order, i;
t = ht->t;
/* Check that the table is empty */
- lookup = &t->tbl[0][0];
- node = (struct rcu_ht_node *) lookup;
+ lookup = &t->tbl[0]->nodes[0];
+ node = (struct cds_lfht_node *) lookup;
do {
node = clear_flag(node)->p.next;
if (!is_dummy(node))
len = !order ? 1 : 1UL << (order - 1);
for (i = 0; i < len; i++) {
- dbg_printf("rculfhash: delete order %lu i %lu hash %lu\n",
+ dbg_printf("delete order %lu i %lu hash %lu\n",
order, i,
- bit_reverse_ulong(t->tbl[order][i].reverse_hash));
- assert(is_dummy(t->tbl[order][i].next));
+ bit_reverse_ulong(t->tbl[order]->nodes[i].reverse_hash));
+ assert(is_dummy(t->tbl[order]->nodes[i].next));
}
free(t->tbl[order]);
}
* Should only be called when no more concurrent readers nor writers can
* possibly access the table.
*/
-int ht_destroy(struct rcu_ht *ht)
+int cds_lfht_destroy(struct cds_lfht *ht)
{
int ret;
/* Wait for in-flight resize operations to complete */
+ CMM_STORE_SHARED(ht->in_progress_destroy, 1);
while (uatomic_read(&ht->in_progress_resize))
poll(NULL, 0, 100); /* wait for 100ms */
- ret = ht_delete_dummy(ht);
+ ret = cds_lfht_delete_dummy(ht);
if (ret)
return ret;
free(ht->t);
+ free_per_cpu_items_count(ht->percpu_count);
free(ht);
return ret;
}
-void ht_count_nodes(struct rcu_ht *ht,
+void cds_lfht_count_nodes(struct cds_lfht *ht,
unsigned long *count,
unsigned long *removed)
{
struct rcu_table *t;
- struct rcu_ht_node *node, *next;
- struct _rcu_ht_node *lookup;
+ struct cds_lfht_node *node, *next;
+ struct _cds_lfht_node *lookup;
unsigned long nr_dummy = 0;
*count = 0;
t = rcu_dereference(ht->t);
/* Count non-dummy nodes in the table */
- lookup = &t->tbl[0][0];
- node = (struct rcu_ht_node *) lookup;
+ lookup = &t->tbl[0]->nodes[0];
+ node = (struct cds_lfht_node *) lookup;
do {
next = rcu_dereference(node->p.next);
if (is_removed(next)) {
(nr_dummy)++;
node = clear_flag(next);
} while (node);
- dbg_printf("rculfhash: number of dummy nodes: %lu\n", nr_dummy);
+ dbg_printf("number of dummy nodes: %lu\n", nr_dummy);
}
+/* called with resize mutex held */
static
-void ht_free_table_cb(struct rcu_head *head)
+void _do_cds_lfht_grow(struct cds_lfht *ht, struct rcu_table *old_t,
+ unsigned long old_size, unsigned long new_size)
{
- struct rcu_table *t =
- caa_container_of(head, struct rcu_table, head);
- free(t);
+ unsigned long old_order, new_order;
+ struct rcu_table *new_t;
+
+ old_order = get_count_order_ulong(old_size) + 1;
+ new_order = get_count_order_ulong(new_size) + 1;
+ printf("resize from %lu (order %lu) to %lu (order %lu) buckets\n",
+ old_size, old_order, new_size, new_order);
+ new_t = malloc(sizeof(struct cds_lfht)
+ + (new_order * sizeof(struct rcu_level *)));
+ assert(new_size > old_size);
+ memcpy(&new_t->tbl, &old_t->tbl,
+ old_order * sizeof(struct rcu_level *));
+ init_table(ht, new_t, old_order, new_order - old_order);
+ /* Changing table and size atomically wrt lookups */
+ rcu_assign_pointer(ht->t, new_t);
+ ht->cds_lfht_call_rcu(&old_t->head, cds_lfht_free_table_cb);
}
/* called with resize mutex held */
static
-void _do_ht_resize(struct rcu_ht *ht)
+void _do_cds_lfht_shrink(struct cds_lfht *ht, struct rcu_table *old_t,
+ unsigned long old_size, unsigned long new_size)
{
- unsigned long new_size, old_size, old_order, new_order;
- struct rcu_table *new_t, *old_t;
+ unsigned long old_order, new_order;
+ struct rcu_table *new_t;
- old_t = ht->t;
- old_size = old_t->size;
+ new_size = max(new_size, 1);
old_order = get_count_order_ulong(old_size) + 1;
-
- new_size = CMM_LOAD_SHARED(old_t->resize_target);
- if (old_size == new_size)
- return;
new_order = get_count_order_ulong(new_size) + 1;
- printf("rculfhash: resize from %lu (order %lu) to %lu (order %lu) buckets\n",
+ printf("resize from %lu (order %lu) to %lu (order %lu) buckets\n",
old_size, old_order, new_size, new_order);
- new_t = malloc(sizeof(struct rcu_table)
- + (new_order * sizeof(struct _rcu_ht_node *)));
- assert(new_size > old_size);
+ new_t = malloc(sizeof(struct cds_lfht)
+ + (new_order * sizeof(struct rcu_level *)));
+ assert(new_size < old_size);
memcpy(&new_t->tbl, &old_t->tbl,
- old_order * sizeof(struct _rcu_ht_node *));
- init_table(ht, new_t, old_order, new_order - old_order);
+ new_order * sizeof(struct rcu_level *));
+ new_t->size = !new_order ? 1 : (1UL << (new_order - 1));
+ new_t->resize_target = new_t->size;
+ new_t->resize_initiated = 0;
+
/* Changing table and size atomically wrt lookups */
rcu_assign_pointer(ht->t, new_t);
- ht->ht_call_rcu(&old_t->head, ht_free_table_cb);
+
+ /*
+ * We need to wait for all reader threads to reach Q.S. (and
+ * thus use the new table for lookups) before we can start
+ * releasing the old dummy nodes.
+ */
+ ht->cds_lfht_synchronize_rcu();
+
+ /* Unlink and remove all now-unused dummy node pointers. */
+ fini_table(ht, old_t, new_order, old_order - new_order);
+ ht->cds_lfht_call_rcu(&old_t->head, cds_lfht_free_table_cb);
+}
+
+
+/* called with resize mutex held */
+static
+void _do_cds_lfht_resize(struct cds_lfht *ht)
+{
+ unsigned long new_size, old_size;
+ struct rcu_table *old_t;
+
+ old_t = ht->t;
+ old_size = old_t->size;
+ new_size = CMM_LOAD_SHARED(old_t->resize_target);
+ if (old_size < new_size)
+ _do_cds_lfht_grow(ht, old_t, old_size, new_size);
+ else if (old_size > new_size)
+ _do_cds_lfht_shrink(ht, old_t, old_size, new_size);
+ else
+ CMM_STORE_SHARED(old_t->resize_initiated, 0);
}
static
t->size << growth_order);
}
-void ht_resize(struct rcu_ht *ht, int growth)
+static
+unsigned long resize_target_update_count(struct rcu_table *t,
+ unsigned long count)
+{
+ count = max(count, 1);
+ return uatomic_set(&t->resize_target, count);
+}
+
+void cds_lfht_resize(struct cds_lfht *ht, unsigned long new_size)
{
struct rcu_table *t = rcu_dereference(ht->t);
unsigned long target_size;
- target_size = resize_target_update(t, growth);
- if (t->size < target_size) {
- CMM_STORE_SHARED(t->resize_initiated, 1);
- pthread_mutex_lock(&ht->resize_mutex);
- _do_ht_resize(ht);
- pthread_mutex_unlock(&ht->resize_mutex);
- }
+ target_size = resize_target_update_count(t, new_size);
+ CMM_STORE_SHARED(t->resize_initiated, 1);
+ pthread_mutex_lock(&ht->resize_mutex);
+ _do_cds_lfht_resize(ht);
+ pthread_mutex_unlock(&ht->resize_mutex);
}
static
{
struct rcu_resize_work *work =
caa_container_of(head, struct rcu_resize_work, head);
- struct rcu_ht *ht = work->ht;
+ struct cds_lfht *ht = work->ht;
pthread_mutex_lock(&ht->resize_mutex);
- _do_ht_resize(ht);
+ _do_cds_lfht_resize(ht);
pthread_mutex_unlock(&ht->resize_mutex);
free(work);
cmm_smp_mb(); /* finish resize before decrement */
}
static
-void ht_resize_lazy(struct rcu_ht *ht, struct rcu_table *t, int growth)
+void cds_lfht_resize_lazy(struct cds_lfht *ht, struct rcu_table *t, int growth)
{
struct rcu_resize_work *work;
unsigned long target_size;
cmm_smp_mb(); /* increment resize count before calling it */
work = malloc(sizeof(*work));
work->ht = ht;
- ht->ht_call_rcu(&work->head, do_resize_cb);
+ ht->cds_lfht_call_rcu(&work->head, do_resize_cb);
CMM_STORE_SHARED(t->resize_initiated, 1);
}
}
+
+#if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF)
+
+static
+void cds_lfht_resize_lazy_count(struct cds_lfht *ht, struct rcu_table *t,
+ unsigned long count)
+{
+ struct rcu_resize_work *work;
+ unsigned long target_size;
+
+ target_size = resize_target_update_count(t, count);
+ if (!CMM_LOAD_SHARED(t->resize_initiated)) {
+ uatomic_inc(&ht->in_progress_resize);
+ cmm_smp_mb(); /* increment resize count before calling it */
+ work = malloc(sizeof(*work));
+ work->ht = ht;
+ ht->cds_lfht_call_rcu(&work->head, do_resize_cb);
+ CMM_STORE_SHARED(t->resize_initiated, 1);
+ }
+}
+
+#endif