+ /*
+ * Update table size.
+ */
+ cmm_smp_wmb(); /* populate data before RCU size */
+ CMM_STORE_SHARED(ht->t.size, !i ? 1 : (1UL << i));
+
+ dbg_printf("init new size: %lu\n", !i ? 1 : (1UL << i));
+ if (CMM_LOAD_SHARED(ht->in_progress_destroy))
+ break;
+ }
+}
+
+/*
+ * Holding RCU read lock to protect _cds_lfht_remove against memory
+ * reclaim that could be performed by other call_rcu worker threads (ABA
+ * problem).
+ * For a single level, we logically remove and garbage collect each node.
+ *
+ * As a design choice, we perform logical removal and garbage collection on a
+ * node-per-node basis to simplify this algorithm. We also assume keeping good
+ * cache locality of the operation would overweight possible performance gain
+ * that could be achieved by batching garbage collection for multiple levels.
+ * However, this would have to be justified by benchmarks.
+ *
+ * Concurrent removal and add operations are helping us perform garbage
+ * collection of logically removed nodes. We guarantee that all logically
+ * removed nodes have been garbage-collected (unlinked) before call_rcu is
+ * invoked to free a hole level of dummy nodes (after a grace period).
+ *
+ * Logical removal and garbage collection can therefore be done in batch or on a
+ * node-per-node basis, as long as the guarantee above holds.
+ */
+static
+void remove_table(struct cds_lfht *ht, unsigned long i, unsigned long len)
+{
+ unsigned long j;
+
+ ht->cds_lfht_rcu_thread_online();
+ ht->cds_lfht_rcu_read_lock();
+ for (j = 0; j < len; j++) {
+ struct cds_lfht_node *fini_node =
+ (struct cds_lfht_node *) &ht->t.tbl[i]->nodes[j];
+
+ dbg_printf("remove entry: i %lu j %lu hash %lu\n",
+ i, j, !i ? 0 : (1UL << (i - 1)) + j);
+ fini_node->p.reverse_hash =
+ bit_reverse_ulong(!i ? 0 : (1UL << (i - 1)) + j);
+ (void) _cds_lfht_remove(ht, !i ? 0 : (1UL << (i - 1)),
+ fini_node, 1);
+ if (CMM_LOAD_SHARED(ht->in_progress_destroy))
+ break;
+ }
+ ht->cds_lfht_rcu_read_unlock();
+ ht->cds_lfht_rcu_thread_offline();
+}
+
+static
+void fini_table(struct cds_lfht *ht,
+ unsigned long first_order, unsigned long len_order)
+{
+ long i, end_order;
+
+ dbg_printf("fini table: first_order %lu end_order %lu\n",
+ first_order, first_order + len_order);
+ end_order = first_order + len_order;
+ assert(first_order > 0);
+ for (i = end_order - 1; i >= first_order; i--) {
+ unsigned long len;
+
+ len = !i ? 1 : 1UL << (i - 1);
+ dbg_printf("fini order %lu len: %lu\n", i, len);
+
+ /* Stop shrink if the resize target changes under us */
+ if (CMM_LOAD_SHARED(ht->t.resize_target) > (1UL << (i - 1)))
+ break;
+
+ cmm_smp_wmb(); /* populate data before RCU size */
+ CMM_STORE_SHARED(ht->t.size, 1UL << (i - 1));
+
+ /*
+ * We need to wait for all add operations to reach Q.S. (and
+ * thus use the new table for lookups) before we can start
+ * releasing the old dummy nodes. Otherwise their lookup will
+ * return a logically removed node as insert position.
+ */
+ ht->cds_lfht_synchronize_rcu();
+
+ /*
+ * Set "removed" flag in dummy nodes about to be removed.
+ * Unlink all now-logically-removed dummy node pointers.
+ * Concurrent add/remove operation are helping us doing
+ * the gc.
+ */
+ remove_table(ht, i, len);
+
+ ht->cds_lfht_call_rcu(&ht->t.tbl[i]->head, cds_lfht_free_level);
+
+ dbg_printf("fini new size: %lu\n", 1UL << i);
+ if (CMM_LOAD_SHARED(ht->in_progress_destroy))
+ break;
+ }
+}
+
+struct cds_lfht *cds_lfht_new(cds_lfht_hash_fct hash_fct,
+ cds_lfht_compare_fct compare_fct,
+ unsigned long hash_seed,
+ unsigned long init_size,
+ int flags,
+ void (*cds_lfht_call_rcu)(struct rcu_head *head,
+ void (*func)(struct rcu_head *head)),
+ void (*cds_lfht_synchronize_rcu)(void),
+ void (*cds_lfht_rcu_read_lock)(void),
+ void (*cds_lfht_rcu_read_unlock)(void),
+ void (*cds_lfht_rcu_thread_offline)(void),
+ void (*cds_lfht_rcu_thread_online)(void))
+{
+ struct cds_lfht *ht;
+ unsigned long order;
+
+ /* init_size must be power of two */
+ if (init_size && (init_size & (init_size - 1)))
+ return NULL;
+ ht = calloc(1, sizeof(struct cds_lfht));