+
+void cds_lfht_count_nodes(struct cds_lfht *ht,
+ unsigned long *count,
+ unsigned long *removed)
+{
+ struct cds_lfht_node *node, *next;
+ struct _cds_lfht_node *lookup;
+ unsigned long nr_dummy = 0;
+
+ *count = 0;
+ *removed = 0;
+
+ /* Count non-dummy nodes in the table */
+ lookup = &ht->t.tbl[0]->nodes[0];
+ node = (struct cds_lfht_node *) lookup;
+ do {
+ next = rcu_dereference(node->p.next);
+ if (is_removed(next)) {
+ assert(!is_dummy(next));
+ (*removed)++;
+ } else if (!is_dummy(next))
+ (*count)++;
+ else
+ (nr_dummy)++;
+ node = clear_flag(next);
+ } while (!is_end(node));
+ dbg_printf("number of dummy nodes: %lu\n", nr_dummy);
+}
+
+/* called with resize mutex held */
+static
+void _do_cds_lfht_grow(struct cds_lfht *ht,
+ unsigned long old_size, unsigned long new_size)
+{
+ unsigned long old_order, new_order;
+
+ old_order = get_count_order_ulong(old_size) + 1;
+ new_order = get_count_order_ulong(new_size) + 1;
+ printf("resize from %lu (order %lu) to %lu (order %lu) buckets\n",
+ old_size, old_order, new_size, new_order);
+ assert(new_size > old_size);
+ init_table(ht, old_order, new_order - old_order);
+}
+
+/* called with resize mutex held */
+static
+void _do_cds_lfht_shrink(struct cds_lfht *ht,
+ unsigned long old_size, unsigned long new_size)
+{
+ unsigned long old_order, new_order;
+
+ new_size = max(new_size, MIN_TABLE_SIZE);
+ old_order = get_count_order_ulong(old_size) + 1;
+ new_order = get_count_order_ulong(new_size) + 1;
+ printf("resize from %lu (order %lu) to %lu (order %lu) buckets\n",
+ old_size, old_order, new_size, new_order);
+ assert(new_size < old_size);
+
+ /* Remove and unlink all dummy nodes to remove. */
+ fini_table(ht, new_order, old_order - new_order);
+}
+
+
+/* called with resize mutex held */
+static
+void _do_cds_lfht_resize(struct cds_lfht *ht)
+{
+ unsigned long new_size, old_size;
+
+ /*
+ * Resize table, re-do if the target size has changed under us.
+ */
+ do {
+ ht->t.resize_initiated = 1;
+ old_size = ht->t.size;
+ new_size = CMM_LOAD_SHARED(ht->t.resize_target);
+ if (old_size < new_size)
+ _do_cds_lfht_grow(ht, old_size, new_size);
+ else if (old_size > new_size)
+ _do_cds_lfht_shrink(ht, old_size, new_size);
+ ht->t.resize_initiated = 0;
+ /* write resize_initiated before read resize_target */
+ cmm_smp_mb();
+ } while (ht->t.size != CMM_LOAD_SHARED(ht->t.resize_target));
+}
+
+static
+unsigned long resize_target_update(struct cds_lfht *ht, unsigned long size,
+ int growth_order)
+{
+ return _uatomic_max(&ht->t.resize_target,
+ size << growth_order);
+}
+
+static
+void resize_target_update_count(struct cds_lfht *ht,
+ unsigned long count)
+{
+ count = max(count, MIN_TABLE_SIZE);
+ uatomic_set(&ht->t.resize_target, count);
+}
+
+void cds_lfht_resize(struct cds_lfht *ht, unsigned long new_size)
+{
+ resize_target_update_count(ht, new_size);
+ CMM_STORE_SHARED(ht->t.resize_initiated, 1);
+ ht->cds_lfht_rcu_thread_offline();
+ pthread_mutex_lock(&ht->resize_mutex);
+ _do_cds_lfht_resize(ht);
+ pthread_mutex_unlock(&ht->resize_mutex);
+ ht->cds_lfht_rcu_thread_online();
+}
+
+static
+void do_resize_cb(struct rcu_head *head)
+{
+ struct rcu_resize_work *work =
+ caa_container_of(head, struct rcu_resize_work, head);
+ struct cds_lfht *ht = work->ht;
+
+ ht->cds_lfht_rcu_thread_offline();
+ pthread_mutex_lock(&ht->resize_mutex);
+ _do_cds_lfht_resize(ht);
+ pthread_mutex_unlock(&ht->resize_mutex);
+ ht->cds_lfht_rcu_thread_online();
+ poison_free(work);
+ cmm_smp_mb(); /* finish resize before decrement */
+ uatomic_dec(&ht->in_progress_resize);
+}
+
+static
+void cds_lfht_resize_lazy(struct cds_lfht *ht, unsigned long size, int growth)
+{
+ struct rcu_resize_work *work;
+ unsigned long target_size;
+
+ target_size = resize_target_update(ht, size, growth);
+ /* Store resize_target before read resize_initiated */
+ cmm_smp_mb();
+ if (!CMM_LOAD_SHARED(ht->t.resize_initiated) && size < target_size) {
+ uatomic_inc(&ht->in_progress_resize);
+ cmm_smp_mb(); /* increment resize count before calling it */
+ work = malloc(sizeof(*work));
+ work->ht = ht;
+ ht->cds_lfht_call_rcu(&work->head, do_resize_cb);
+ CMM_STORE_SHARED(ht->t.resize_initiated, 1);
+ }
+}
+
+#if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF)
+
+static
+void cds_lfht_resize_lazy_count(struct cds_lfht *ht, unsigned long size,
+ unsigned long count)
+{
+ struct rcu_resize_work *work;
+
+ if (!(ht->flags & CDS_LFHT_AUTO_RESIZE))
+ return;
+ resize_target_update_count(ht, count);
+ /* Store resize_target before read resize_initiated */
+ cmm_smp_mb();
+ if (!CMM_LOAD_SHARED(ht->t.resize_initiated)) {
+ uatomic_inc(&ht->in_progress_resize);
+ cmm_smp_mb(); /* increment resize count before calling it */
+ work = malloc(sizeof(*work));
+ work->ht = ht;
+ ht->cds_lfht_call_rcu(&work->head, do_resize_cb);
+ CMM_STORE_SHARED(ht->t.resize_initiated, 1);
+ }
+}
+
+#endif