+/*
+ * Holding RCU read lock to protect _cds_lfht_remove against memory
+ * reclaim that could be performed by other call_rcu worker threads (ABA
+ * problem).
+ */
+static
+void fini_table(struct cds_lfht *ht, struct rcu_table *t,
+ unsigned long first_order, unsigned long len_order)
+{
+ long i, end_order;
+
+ dbg_printf("fini table: first_order %lu end_order %lu\n",
+ first_order, first_order + len_order);
+ end_order = first_order + len_order;
+ assert(first_order > 0);
+ assert(t->size == (1UL << (end_order - 1)));
+ for (i = end_order - 1; i >= first_order; i--) {
+ unsigned long j, len;
+
+ len = !i ? 1 : 1UL << (i - 1);
+ dbg_printf("fini order %lu len: %lu\n", i, len);
+ /*
+ * Update table size. Need to shrink this table prior to
+ * removal so gc lookups use non-logically-removed dummy
+ * nodes.
+ */
+ t->size = 1UL << (i - 1);
+ /* Unlink */
+ ht->cds_lfht_rcu_read_lock();
+ for (j = 0; j < len; j++) {
+ struct cds_lfht_node *fini_node =
+ (struct cds_lfht_node *) &t->tbl[i]->nodes[j];
+
+ dbg_printf("fini entry: i %lu j %lu hash %lu\n",
+ i, j, !i ? 0 : (1UL << (i - 1)) + j);
+ fini_node->p.reverse_hash =
+ bit_reverse_ulong(!i ? 0 : (1UL << (i - 1)) + j);
+ (void) _cds_lfht_remove(ht, t, fini_node, 1);
+ if (CMM_LOAD_SHARED(ht->in_progress_destroy))
+ break;
+ }
+ ht->cds_lfht_rcu_read_unlock();
+ ht->cds_lfht_call_rcu(&t->tbl[i]->head, cds_lfht_free_level);
+ dbg_printf("fini new size: %lu\n", t->size);
+ if (CMM_LOAD_SHARED(ht->in_progress_destroy))
+ break;
+ }
+ t->resize_target = t->size;
+ t->resize_initiated = 0;
+}
+