} __attribute__((aligned(CAA_CACHE_LINE_SIZE)));
struct rcu_level {
- struct rcu_head head;
+ /* Note: manually update allocation length when adding a field */
struct _cds_lfht_node nodes[0];
};
};
struct partition_resize_work {
- struct rcu_head head;
+ pthread_t thread_id;
struct cds_lfht *ht;
unsigned long i, start, len;
void (*fct)(struct cds_lfht *ht, unsigned long i,
#endif
}
+/*
+ * Return the minimum order for which x <= (1UL << order).
+ * Return -1 if x is 0.
+ */
int get_count_order_u32(uint32_t x)
{
- int order;
+ if (!x)
+ return -1;
- order = fls_u32(x) - 1;
- if (x & (x - 1))
- order++;
- return order;
+ return fls_u32(x - 1);
}
+/*
+ * Return the minimum order for which x <= (1UL << order).
+ * Return -1 if x is 0.
+ */
int get_count_order_ulong(unsigned long x)
{
- int order;
+ if (!x)
+ return -1;
- order = fls_ulong(x) - 1;
- if (x & (x - 1))
- order++;
- return order;
+ return fls_ulong(x - 1);
}
#ifdef POISON_FREE
}
static
-void cds_lfht_free_level(struct rcu_head *head)
+struct _cds_lfht_node *lookup_bucket(struct cds_lfht *ht, unsigned long size,
+ unsigned long hash)
{
- struct rcu_level *l =
- caa_container_of(head, struct rcu_level, head);
- poison_free(l);
+ unsigned long index, order;
+
+ assert(size > 0);
+ index = hash & (size - 1);
+ order = get_count_order_ulong(index + 1);
+
+ dbg_printf("lookup hash %lu index %lu order %lu aridx %lu\n",
+ hash, index, order, index & (!order ? 0 : ((1UL << (order - 1)) - 1)));
+
+ return &ht->t.tbl[order]->nodes[index & (!order ? 0 : ((1UL << (order - 1)) - 1))];
}
/*
struct cds_lfht_node *dummy, *old_next;
struct _cds_lfht_node *lookup;
int flagged = 0;
- unsigned long hash, index, order;
if (!old_node) /* Return -ENOENT if asked to replace NULL node */
goto end;
* lookup for the node, and remove it (along with any other
* logically removed node) if found.
*/
- hash = bit_reverse_ulong(old_node->p.reverse_hash);
- assert(size > 0);
- index = hash & (size - 1);
- order = get_count_order_ulong(index + 1);
- lookup = &ht->t.tbl[order]->nodes[index & (!order ? 0 : ((1UL << (order - 1)) - 1))];
+ lookup = lookup_bucket(ht, size, bit_reverse_ulong(old_node->p.reverse_hash));
dummy = (struct cds_lfht_node *) lookup;
_cds_lfht_gc_bucket(dummy, new_node);
end:
struct cds_lfht_node *iter_prev, *iter, *next, *new_node, *new_next,
*dummy_node, *return_node;
struct _cds_lfht_node *lookup;
- unsigned long hash, index, order;
assert(!is_dummy(node));
assert(!is_removed(node));
node->p.next = flag_dummy(get_end());
return node; /* Initial first add (head) */
}
- hash = bit_reverse_ulong(node->p.reverse_hash);
+ lookup = lookup_bucket(ht, size, bit_reverse_ulong(node->p.reverse_hash));
for (;;) {
uint32_t chain_len = 0;
* iter_prev points to the non-removed node prior to the
* insert location.
*/
- index = hash & (size - 1);
- order = get_count_order_ulong(index + 1);
- lookup = &ht->t.tbl[order]->nodes[index & ((!order ? 0 : (1UL << (order - 1))) - 1)];
iter_prev = (struct cds_lfht_node *) lookup;
/* We can always skip the dummy node initially */
iter = rcu_dereference(iter_prev->p.next);
}
gc_end:
/* Garbage collect logically removed nodes in the bucket */
- index = hash & (size - 1);
- order = get_count_order_ulong(index + 1);
- lookup = &ht->t.tbl[order]->nodes[index & (!order ? 0 : ((1UL << (order - 1)) - 1))];
dummy_node = (struct cds_lfht_node *) lookup;
_cds_lfht_gc_bucket(dummy_node, node);
end:
struct cds_lfht_node *dummy, *next, *old;
struct _cds_lfht_node *lookup;
int flagged = 0;
- unsigned long hash, index, order;
if (!node) /* Return -ENOENT if asked to delete NULL node */
goto end;
* the node, and remove it (along with any other logically removed node)
* if found.
*/
- hash = bit_reverse_ulong(node->p.reverse_hash);
- assert(size > 0);
- index = hash & (size - 1);
- order = get_count_order_ulong(index + 1);
- lookup = &ht->t.tbl[order]->nodes[index & (!order ? 0 : ((1UL << (order - 1)) - 1))];
+ lookup = lookup_bucket(ht, size, bit_reverse_ulong(node->p.reverse_hash));
dummy = (struct cds_lfht_node *) lookup;
_cds_lfht_gc_bucket(dummy, node);
end:
struct partition_resize_work *work;
int thread, ret;
unsigned long nr_threads;
- pthread_t *thread_id;
/*
* Note: nr_cpus_mask + 1 is always power of 2.
}
partition_len = len >> get_count_order_ulong(nr_threads);
work = calloc(nr_threads, sizeof(*work));
- thread_id = calloc(nr_threads, sizeof(*thread_id));
assert(work);
for (thread = 0; thread < nr_threads; thread++) {
work[thread].ht = ht;
work[thread].len = partition_len;
work[thread].start = thread * partition_len;
work[thread].fct = fct;
- ret = pthread_create(&thread_id[thread], ht->resize_attr,
+ ret = pthread_create(&(work[thread].thread_id), ht->resize_attr,
partition_resize_thread, &work[thread]);
assert(!ret);
}
for (thread = 0; thread < nr_threads; thread++) {
- ret = pthread_join(thread_id[thread], NULL);
+ ret = pthread_join(work[thread].thread_id, NULL);
assert(!ret);
}
free(work);
- free(thread_id);
}
/*
bit_reverse_ulong(!i ? 0 : (1UL << (i - 1)) + j);
(void) _cds_lfht_add(ht, !i ? 0 : (1UL << (i - 1)),
new_node, ADD_DEFAULT, 1);
- if (CMM_LOAD_SHARED(ht->in_progress_destroy))
- break;
}
ht->cds_lfht_rcu_read_unlock();
}
if (CMM_LOAD_SHARED(ht->t.resize_target) < (!i ? 1 : (1UL << i)))
break;
- ht->t.tbl[i] = calloc(1, sizeof(struct rcu_level)
- + (len * sizeof(struct _cds_lfht_node)));
+ ht->t.tbl[i] = calloc(1, len * sizeof(struct _cds_lfht_node));
assert(ht->t.tbl[i]);
/*
bit_reverse_ulong(!i ? 0 : (1UL << (i - 1)) + j);
(void) _cds_lfht_del(ht, !i ? 0 : (1UL << (i - 1)),
fini_node, 1);
- if (CMM_LOAD_SHARED(ht->in_progress_destroy))
- break;
}
ht->cds_lfht_rcu_read_unlock();
}
unsigned long first_order, unsigned long len_order)
{
long i, end_order;
+ void *free_by_rcu = NULL;
dbg_printf("fini table: first_order %lu end_order %lu\n",
first_order, first_order + len_order);
* return a logically removed node as insert position.
*/
ht->cds_lfht_synchronize_rcu();
+ if (free_by_rcu)
+ free(free_by_rcu);
/*
* Set "removed" flag in dummy nodes about to be removed.
*/
remove_table(ht, i, len);
- ht->cds_lfht_call_rcu(&ht->t.tbl[i]->head, cds_lfht_free_level);
+ free_by_rcu = ht->t.tbl[i];
dbg_printf("fini new size: %lu\n", 1UL << i);
if (CMM_LOAD_SHARED(ht->in_progress_destroy))
break;
}
+
+ if (free_by_rcu) {
+ ht->cds_lfht_synchronize_rcu();
+ free(free_by_rcu);
+ }
}
struct cds_lfht *_cds_lfht_new(cds_lfht_hash_fct hash_fct,
{
struct cds_lfht_node *node, *next, *dummy_node;
struct _cds_lfht_node *lookup;
- unsigned long hash, reverse_hash, index, order, size;
+ unsigned long hash, reverse_hash, size;
hash = ht->hash_fct(key, key_len, ht->hash_seed);
reverse_hash = bit_reverse_ulong(hash);
size = rcu_dereference(ht->t.size);
- index = hash & (size - 1);
- order = get_count_order_ulong(index + 1);
- lookup = &ht->t.tbl[order]->nodes[index & (!order ? 0 : ((1UL << (order - 1))) - 1)];
- dbg_printf("lookup hash %lu index %lu order %lu aridx %lu\n",
- hash, index, order, index & (!order ? 0 : ((1UL << (order - 1)) - 1)));
+ lookup = lookup_bucket(ht, size, hash);
dummy_node = (struct cds_lfht_node *) lookup;
/* We can always skip the dummy node initially */
node = rcu_dereference(dummy_node->p.next);
int ret;
/* Wait for in-flight resize operations to complete */
- CMM_STORE_SHARED(ht->in_progress_destroy, 1);
+ _CMM_STORE_SHARED(ht->in_progress_destroy, 1);
+ cmm_smp_mb(); /* Store destroy before load resize */
while (uatomic_read(&ht->in_progress_resize))
poll(NULL, 0, 100); /* wait for 100ms */
ret = cds_lfht_delete_dummy(ht);
old_order = get_count_order_ulong(old_size) + 1;
new_order = get_count_order_ulong(new_size) + 1;
- printf("resize from %lu (order %lu) to %lu (order %lu) buckets\n",
- old_size, old_order, new_size, new_order);
+ dbg_printf("resize from %lu (order %lu) to %lu (order %lu) buckets\n",
+ old_size, old_order, new_size, new_order);
assert(new_size > old_size);
init_table(ht, old_order, new_order - old_order);
}
new_size = max(new_size, MIN_TABLE_SIZE);
old_order = get_count_order_ulong(old_size) + 1;
new_order = get_count_order_ulong(new_size) + 1;
- printf("resize from %lu (order %lu) to %lu (order %lu) buckets\n",
- old_size, old_order, new_size, new_order);
+ dbg_printf("resize from %lu (order %lu) to %lu (order %lu) buckets\n",
+ old_size, old_order, new_size, new_order);
assert(new_size < old_size);
/* Remove and unlink all dummy nodes to remove. */
* Resize table, re-do if the target size has changed under us.
*/
do {
+ assert(uatomic_read(&ht->in_progress_resize));
+ if (CMM_LOAD_SHARED(ht->in_progress_destroy))
+ break;
ht->t.resize_initiated = 1;
old_size = ht->t.size;
new_size = CMM_LOAD_SHARED(ht->t.resize_target);
cmm_smp_mb();
if (!CMM_LOAD_SHARED(ht->t.resize_initiated) && size < target_size) {
uatomic_inc(&ht->in_progress_resize);
- cmm_smp_mb(); /* increment resize count before calling it */
+ cmm_smp_mb(); /* increment resize count before load destroy */
+ if (CMM_LOAD_SHARED(ht->in_progress_destroy)) {
+ uatomic_dec(&ht->in_progress_resize);
+ return;
+ }
work = malloc(sizeof(*work));
work->ht = ht;
ht->cds_lfht_call_rcu(&work->head, do_resize_cb);
cmm_smp_mb();
if (!CMM_LOAD_SHARED(ht->t.resize_initiated)) {
uatomic_inc(&ht->in_progress_resize);
- cmm_smp_mb(); /* increment resize count before calling it */
+ cmm_smp_mb(); /* increment resize count before load destroy */
+ if (CMM_LOAD_SHARED(ht->in_progress_destroy)) {
+ uatomic_dec(&ht->in_progress_resize);
+ return;
+ }
work = malloc(sizeof(*work));
work->ht = ht;
ht->cds_lfht_call_rcu(&work->head, do_resize_cb);