+}
+
+/*
+ * Holding RCU read lock to protect _cds_lfht_remove against memory
+ * reclaim that could be performed by other call_rcu worker threads (ABA
+ * problem).
+ * For a single level, we logically remove and garbage collect each node.
+ *
+ * As a design choice, we perform logical removal and garbage collection on a
+ * node-per-node basis to simplify this algorithm. We also assume keeping good
+ * cache locality of the operation would overweight possible performance gain
+ * that could be achieved by batching garbage collection for multiple levels.
+ * However, this would have to be justified by benchmarks.
+ *
+ * Concurrent removal and add operations are helping us perform garbage
+ * collection of logically removed nodes. We guarantee that all logically
+ * removed nodes have been garbage-collected (unlinked) before call_rcu is
+ * invoked to free a hole level of dummy nodes (after a grace period).
+ *
+ * Logical removal and garbage collection can therefore be done in batch or on a
+ * node-per-node basis, as long as the guarantee above holds.
+ *
+ * When we reach a certain length, we can split this removal over many worker
+ * threads, based on the number of CPUs available in the system. This should
+ * take care of not letting resize process lag behind too many concurrent
+ * updater threads actively inserting into the hash table.
+ */
+static
+void remove_table_partition(struct cds_lfht *ht, unsigned long i,
+ unsigned long start, unsigned long len)
+{
+ unsigned long j;
+
+ assert(i > ht->min_alloc_order);
+ ht->cds_lfht_rcu_read_lock();
+ for (j = start; j < start + len; j++) {
+ struct cds_lfht_node *fini_node =
+ (struct cds_lfht_node *) &ht->t.tbl[i]->nodes[j];
+
+ dbg_printf("remove entry: i %lu j %lu hash %lu\n",
+ i, j, (1UL << (i - 1)) + j);
+ fini_node->p.reverse_hash =
+ bit_reverse_ulong((1UL << (i - 1)) + j);
+ (void) _cds_lfht_del(ht, 1UL << (i - 1), fini_node, 1);
+ }
+ ht->cds_lfht_rcu_read_unlock();