*/
struct cds_lfht {
struct rcu_table t;
- cds_lfht_hash_fct hash_fct;
- cds_lfht_compare_fct compare_fct;
unsigned long min_alloc_order;
unsigned long min_alloc_size;
- unsigned long hash_seed;
int flags;
/*
* We need to put the work threads offline (QSBR) when taking this
static
void _cds_lfht_add(struct cds_lfht *ht,
+ cds_lfht_match_fct match,
unsigned long size,
struct cds_lfht_node *node,
struct cds_lfht_iter *unique_ret,
*/
static
void _cds_lfht_add(struct cds_lfht *ht,
+ cds_lfht_match_fct match,
unsigned long size,
struct cds_lfht_node *node,
struct cds_lfht_iter *unique_ret,
* (including observe one node by one node
* by forward iterations)
*/
- cds_lfht_next_duplicate(ht, &d_iter);
+ cds_lfht_next_duplicate(ht, match, &d_iter);
if (!d_iter.node)
goto insert;
i, j, (1UL << (i - 1)) + j);
new_node->p.reverse_hash =
bit_reverse_ulong((1UL << (i - 1)) + j);
- _cds_lfht_add(ht, 1UL << (i - 1),
+ _cds_lfht_add(ht, NULL, 1UL << (i - 1),
new_node, NULL, 1);
}
ht->cds_lfht_rcu_read_unlock();
}
}
-struct cds_lfht *_cds_lfht_new(cds_lfht_hash_fct hash_fct,
- cds_lfht_compare_fct compare_fct,
- unsigned long hash_seed,
- unsigned long init_size,
+struct cds_lfht *_cds_lfht_new(unsigned long init_size,
unsigned long min_alloc_size,
int flags,
void (*cds_lfht_call_rcu)(struct rcu_head *head,
ht = calloc(1, sizeof(struct cds_lfht));
assert(ht);
ht->flags = flags;
- ht->hash_fct = hash_fct;
- ht->compare_fct = compare_fct;
- ht->hash_seed = hash_seed;
ht->cds_lfht_call_rcu = cds_lfht_call_rcu;
ht->cds_lfht_synchronize_rcu = cds_lfht_synchronize_rcu;
ht->cds_lfht_rcu_read_lock = cds_lfht_rcu_read_lock;
return ht;
}
-void cds_lfht_lookup(struct cds_lfht *ht, void *key, size_t key_len,
- struct cds_lfht_iter *iter)
+void cds_lfht_lookup(struct cds_lfht *ht, cds_lfht_match_fct match,
+ unsigned long hash, void *key, struct cds_lfht_iter *iter)
{
struct cds_lfht_node *node, *next, *dummy_node;
struct _cds_lfht_node *lookup;
- unsigned long hash, reverse_hash, size;
+ unsigned long reverse_hash, size;
- hash = ht->hash_fct(key, key_len, ht->hash_seed);
reverse_hash = bit_reverse_ulong(hash);
size = rcu_dereference(ht->t.size);
if (caa_likely(!is_removed(next))
&& !is_dummy(next)
&& node->p.reverse_hash == reverse_hash
- && caa_likely(!ht->compare_fct(node->key, node->key_len, key, key_len))) {
+ && caa_likely(match(node, key))) {
break;
}
node = clear_flag(next);
iter->next = next;
}
-void cds_lfht_next_duplicate(struct cds_lfht *ht, struct cds_lfht_iter *iter)
+void cds_lfht_next_duplicate(struct cds_lfht *ht, cds_lfht_match_fct match,
+ struct cds_lfht_iter *iter)
{
struct cds_lfht_node *node, *next;
unsigned long reverse_hash;
void *key;
- size_t key_len;
node = iter->node;
reverse_hash = node->p.reverse_hash;
key = node->key;
- key_len = node->key_len;
next = iter->next;
node = clear_flag(next);
next = rcu_dereference(node->p.next);
if (caa_likely(!is_removed(next))
&& !is_dummy(next)
- && caa_likely(!ht->compare_fct(node->key, node->key_len, key, key_len))) {
+ && caa_likely(match(node->key, key))) {
break;
}
node = clear_flag(next);
cds_lfht_next(ht, iter);
}
-void cds_lfht_add(struct cds_lfht *ht, struct cds_lfht_node *node)
+void cds_lfht_add(struct cds_lfht *ht, unsigned long hash,
+ struct cds_lfht_node *node)
{
- unsigned long hash, size;
+ unsigned long size;
- hash = ht->hash_fct(node->key, node->key_len, ht->hash_seed);
node->p.reverse_hash = bit_reverse_ulong((unsigned long) hash);
-
size = rcu_dereference(ht->t.size);
- _cds_lfht_add(ht, size, node, NULL, 0);
+ _cds_lfht_add(ht, NULL, size, node, NULL, 0);
ht_count_add(ht, size, hash);
}
struct cds_lfht_node *cds_lfht_add_unique(struct cds_lfht *ht,
+ cds_lfht_match_fct match,
+ unsigned long hash,
struct cds_lfht_node *node)
{
- unsigned long hash, size;
+ unsigned long size;
struct cds_lfht_iter iter;
- hash = ht->hash_fct(node->key, node->key_len, ht->hash_seed);
node->p.reverse_hash = bit_reverse_ulong((unsigned long) hash);
-
size = rcu_dereference(ht->t.size);
- _cds_lfht_add(ht, size, node, &iter, 0);
+ _cds_lfht_add(ht, match, size, node, &iter, 0);
if (iter.node == node)
ht_count_add(ht, size, hash);
return iter.node;
}
struct cds_lfht_node *cds_lfht_add_replace(struct cds_lfht *ht,
+ cds_lfht_match_fct match,
+ unsigned long hash,
struct cds_lfht_node *node)
{
- unsigned long hash, size;
+ unsigned long size;
struct cds_lfht_iter iter;
- hash = ht->hash_fct(node->key, node->key_len, ht->hash_seed);
node->p.reverse_hash = bit_reverse_ulong((unsigned long) hash);
-
size = rcu_dereference(ht->t.size);
for (;;) {
- _cds_lfht_add(ht, size, node, &iter, 0);
+ _cds_lfht_add(ht, match, size, node, &iter, 0);
if (iter.node == node) {
ht_count_add(ht, size, hash);
return NULL;
}
static
-void cds_lfht_resize_lazy_grow(struct cds_lfht *ht, unsigned long size, int growth)
+void __cds_lfht_resize_lazy_launch(struct cds_lfht *ht)
{
struct rcu_resize_work *work;
- unsigned long target_size = size << growth;
-
- if (resize_target_grow(ht, target_size) >= target_size)
- return;
/* Store resize_target before read resize_initiated */
cmm_smp_mb();
}
}
+static
+void cds_lfht_resize_lazy_grow(struct cds_lfht *ht, unsigned long size, int growth)
+{
+ unsigned long target_size = size << growth;
+
+ if (resize_target_grow(ht, target_size) >= target_size)
+ return;
+
+ __cds_lfht_resize_lazy_launch(ht);
+}
+
+/*
+ * We favor grow operations over shrink. A shrink operation never occurs
+ * if a grow operation is queued for lazy execution. A grow operation
+ * cancels any pending shrink lazy execution.
+ */
static
void cds_lfht_resize_lazy_count(struct cds_lfht *ht, unsigned long size,
unsigned long count)
{
- struct rcu_resize_work *work;
-
if (!(ht->flags & CDS_LFHT_AUTO_RESIZE))
return;
- resize_target_update_count(ht, count);
- /* Store resize_target before read resize_initiated */
- cmm_smp_mb();
- if (!CMM_LOAD_SHARED(ht->t.resize_initiated)) {
- uatomic_inc(&ht->in_progress_resize);
- cmm_smp_mb(); /* increment resize count before load destroy */
- if (CMM_LOAD_SHARED(ht->in_progress_destroy)) {
- uatomic_dec(&ht->in_progress_resize);
+ count = max(count, ht->min_alloc_size);
+ if (count == size)
+ return; /* Already the right size, no resize needed */
+ if (count > size) { /* lazy grow */
+ if (resize_target_grow(ht, count) >= count)
return;
+ } else { /* lazy shrink */
+ for (;;) {
+ unsigned long s;
+
+ s = uatomic_cmpxchg(&ht->t.resize_target, size, count);
+ if (s == size)
+ break; /* no resize needed */
+ if (s > size)
+ return; /* growing is/(was just) in progress */
+ if (s <= count)
+ return; /* some other thread do shrink */
+ size = s;
}
- work = malloc(sizeof(*work));
- work->ht = ht;
- ht->cds_lfht_call_rcu(&work->head, do_resize_cb);
- CMM_STORE_SHARED(ht->t.resize_initiated, 1);
}
+ __cds_lfht_resize_lazy_launch(ht);
}