#include <urcu-call-rcu.h>
#include <urcu/arch.h>
#include <urcu/uatomic.h>
-#include <urcu/jhash.h>
#include <urcu/compiler.h>
#include <urcu/rculfhash.h>
#include <stdio.h>
} __attribute__((aligned(CAA_CACHE_LINE_SIZE)));
struct rcu_level {
- struct rcu_head head;
+ /* Note: manually update allocation length when adding a field */
struct _cds_lfht_node nodes[0];
};
};
struct partition_resize_work {
- struct rcu_head head;
+ pthread_t thread_id;
struct cds_lfht *ht;
unsigned long i, start, len;
void (*fct)(struct cds_lfht *ht, unsigned long i,
#endif
}
+/*
+ * Return the minimum order for which x <= (1UL << order).
+ * Return -1 if x is 0.
+ */
int get_count_order_u32(uint32_t x)
{
- int order;
+ if (!x)
+ return -1;
- order = fls_u32(x) - 1;
- if (x & (x - 1))
- order++;
- return order;
+ return fls_u32(x - 1);
}
+/*
+ * Return the minimum order for which x <= (1UL << order).
+ * Return -1 if x is 0.
+ */
int get_count_order_ulong(unsigned long x)
{
- int order;
+ if (!x)
+ return -1;
- order = fls_ulong(x) - 1;
- if (x & (x - 1))
- order++;
- return order;
+ return fls_ulong(x - 1);
}
#ifdef POISON_FREE
#else /* #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF) */
-static const long nr_cpus_mask = -1;
+static const long nr_cpus_mask = -2;
static
struct ht_items_count *alloc_per_cpu_items_count(void)
return v;
}
-static
-void cds_lfht_free_level(struct rcu_head *head)
-{
- struct rcu_level *l =
- caa_container_of(head, struct rcu_level, head);
- poison_free(l);
-}
-
/*
* Remove all logically deleted nodes from a bucket up to a certain node key.
*/
struct partition_resize_work *work;
int thread, ret;
unsigned long nr_threads;
- pthread_t *thread_id;
/*
* Note: nr_cpus_mask + 1 is always power of 2.
* We spawn just the number of threads we need to satisfy the minimum
* partition size, up to the number of CPUs in the system.
*/
- nr_threads = min(nr_cpus_mask + 1,
- len >> MIN_PARTITION_PER_THREAD_ORDER);
+ if (nr_cpus_mask > 0) {
+ nr_threads = min(nr_cpus_mask + 1,
+ len >> MIN_PARTITION_PER_THREAD_ORDER);
+ } else {
+ nr_threads = 1;
+ }
partition_len = len >> get_count_order_ulong(nr_threads);
work = calloc(nr_threads, sizeof(*work));
- thread_id = calloc(nr_threads, sizeof(*thread_id));
assert(work);
for (thread = 0; thread < nr_threads; thread++) {
work[thread].ht = ht;
work[thread].len = partition_len;
work[thread].start = thread * partition_len;
work[thread].fct = fct;
- ret = pthread_create(&thread_id[thread], ht->resize_attr,
+ ret = pthread_create(&(work[thread].thread_id), ht->resize_attr,
partition_resize_thread, &work[thread]);
assert(!ret);
}
for (thread = 0; thread < nr_threads; thread++) {
- ret = pthread_join(thread_id[thread], NULL);
+ ret = pthread_join(work[thread].thread_id, NULL);
assert(!ret);
}
free(work);
- free(thread_id);
}
/*
bit_reverse_ulong(!i ? 0 : (1UL << (i - 1)) + j);
(void) _cds_lfht_add(ht, !i ? 0 : (1UL << (i - 1)),
new_node, ADD_DEFAULT, 1);
- if (CMM_LOAD_SHARED(ht->in_progress_destroy))
- break;
}
ht->cds_lfht_rcu_read_unlock();
}
if (CMM_LOAD_SHARED(ht->t.resize_target) < (!i ? 1 : (1UL << i)))
break;
- ht->t.tbl[i] = calloc(1, sizeof(struct rcu_level)
- + (len * sizeof(struct _cds_lfht_node)));
+ ht->t.tbl[i] = calloc(1, len * sizeof(struct _cds_lfht_node));
assert(ht->t.tbl[i]);
/*
bit_reverse_ulong(!i ? 0 : (1UL << (i - 1)) + j);
(void) _cds_lfht_del(ht, !i ? 0 : (1UL << (i - 1)),
fini_node, 1);
- if (CMM_LOAD_SHARED(ht->in_progress_destroy))
- break;
}
ht->cds_lfht_rcu_read_unlock();
}
unsigned long first_order, unsigned long len_order)
{
long i, end_order;
+ void *free_by_rcu = NULL;
dbg_printf("fini table: first_order %lu end_order %lu\n",
first_order, first_order + len_order);
* return a logically removed node as insert position.
*/
ht->cds_lfht_synchronize_rcu();
+ if (free_by_rcu)
+ free(free_by_rcu);
/*
* Set "removed" flag in dummy nodes about to be removed.
*/
remove_table(ht, i, len);
- ht->cds_lfht_call_rcu(&ht->t.tbl[i]->head, cds_lfht_free_level);
+ free_by_rcu = ht->t.tbl[i];
dbg_printf("fini new size: %lu\n", 1UL << i);
if (CMM_LOAD_SHARED(ht->in_progress_destroy))
break;
}
+
+ if (free_by_rcu) {
+ ht->cds_lfht_synchronize_rcu();
+ free(free_by_rcu);
+ }
}
struct cds_lfht *_cds_lfht_new(cds_lfht_hash_fct hash_fct,
iter->next = next;
}
-void cds_lfht_next(struct cds_lfht *ht, struct cds_lfht_iter *iter)
+void cds_lfht_next_duplicate(struct cds_lfht *ht, struct cds_lfht_iter *iter)
{
struct cds_lfht_node *node, *next;
unsigned long reverse_hash;
iter->next = next;
}
+void cds_lfht_next(struct cds_lfht *ht, struct cds_lfht_iter *iter)
+{
+ struct cds_lfht_node *node, *next;
+
+ node = clear_flag(iter->next);
+ for (;;) {
+ if (unlikely(is_end(node))) {
+ node = next = NULL;
+ break;
+ }
+ next = rcu_dereference(node->p.next);
+ if (likely(!is_removed(next))
+ && !is_dummy(next)) {
+ break;
+ }
+ node = clear_flag(next);
+ }
+ assert(!node || !is_dummy(rcu_dereference(node->p.next)));
+ iter->node = node;
+ iter->next = next;
+}
+
+void cds_lfht_first(struct cds_lfht *ht, struct cds_lfht_iter *iter)
+{
+ struct _cds_lfht_node *lookup;
+
+ /*
+ * Get next after first dummy node. The first dummy node is the
+ * first node of the linked list.
+ */
+ lookup = &ht->t.tbl[0]->nodes[0];
+ iter->next = lookup->next;
+ cds_lfht_next(ht, iter);
+}
+
void cds_lfht_add(struct cds_lfht *ht, struct cds_lfht_node *node)
{
unsigned long hash, size;
int ret;
/* Wait for in-flight resize operations to complete */
- CMM_STORE_SHARED(ht->in_progress_destroy, 1);
+ _CMM_STORE_SHARED(ht->in_progress_destroy, 1);
+ cmm_smp_mb(); /* Store destroy before load resize */
while (uatomic_read(&ht->in_progress_resize))
poll(NULL, 0, 100); /* wait for 100ms */
ret = cds_lfht_delete_dummy(ht);
old_order = get_count_order_ulong(old_size) + 1;
new_order = get_count_order_ulong(new_size) + 1;
- printf("resize from %lu (order %lu) to %lu (order %lu) buckets\n",
- old_size, old_order, new_size, new_order);
+ dbg_printf("resize from %lu (order %lu) to %lu (order %lu) buckets\n",
+ old_size, old_order, new_size, new_order);
assert(new_size > old_size);
init_table(ht, old_order, new_order - old_order);
}
new_size = max(new_size, MIN_TABLE_SIZE);
old_order = get_count_order_ulong(old_size) + 1;
new_order = get_count_order_ulong(new_size) + 1;
- printf("resize from %lu (order %lu) to %lu (order %lu) buckets\n",
- old_size, old_order, new_size, new_order);
+ dbg_printf("resize from %lu (order %lu) to %lu (order %lu) buckets\n",
+ old_size, old_order, new_size, new_order);
assert(new_size < old_size);
/* Remove and unlink all dummy nodes to remove. */
* Resize table, re-do if the target size has changed under us.
*/
do {
+ assert(uatomic_read(&ht->in_progress_resize));
+ if (CMM_LOAD_SHARED(ht->in_progress_destroy))
+ break;
ht->t.resize_initiated = 1;
old_size = ht->t.size;
new_size = CMM_LOAD_SHARED(ht->t.resize_target);
cmm_smp_mb();
if (!CMM_LOAD_SHARED(ht->t.resize_initiated) && size < target_size) {
uatomic_inc(&ht->in_progress_resize);
- cmm_smp_mb(); /* increment resize count before calling it */
+ cmm_smp_mb(); /* increment resize count before load destroy */
+ if (CMM_LOAD_SHARED(ht->in_progress_destroy)) {
+ uatomic_dec(&ht->in_progress_resize);
+ return;
+ }
work = malloc(sizeof(*work));
work->ht = ht;
ht->cds_lfht_call_rcu(&work->head, do_resize_cb);
cmm_smp_mb();
if (!CMM_LOAD_SHARED(ht->t.resize_initiated)) {
uatomic_inc(&ht->in_progress_resize);
- cmm_smp_mb(); /* increment resize count before calling it */
+ cmm_smp_mb(); /* increment resize count before load destroy */
+ if (CMM_LOAD_SHARED(ht->in_progress_destroy)) {
+ uatomic_dec(&ht->in_progress_resize);
+ return;
+ }
work = malloc(sizeof(*work));
work->ht = ht;
ht->cds_lfht_call_rcu(&work->head, do_resize_cb);