+ next = CMM_LOAD_SHARED(node->next); /* next is not dereferenced */
+ if (caa_unlikely(is_removed(next)))
+ return -ENOENT;
+ assert(!is_bucket(next));
+ /*
+ * The del operation semantic guarantees a full memory barrier
+ * before the uatomic_or atomic commit of the deletion flag.
+ */
+ cmm_smp_mb__before_uatomic_or();
+ /*
+ * We set the REMOVED_FLAG unconditionally. Note that there may
+ * be more than one concurrent thread setting this flag.
+ * Knowing which wins the race will be known after the garbage
+ * collection phase, stay tuned!
+ */
+ uatomic_or(&node->next, REMOVED_FLAG);
+ /* We performed the (logical) deletion. */
+
+ /*
+ * Ensure that the node is not visible to readers anymore: lookup for
+ * the node, and remove it (along with any other logically removed node)
+ * if found.
+ */
+ bucket = lookup_bucket(ht, size, bit_reverse_ulong(node->reverse_hash));
+ _cds_lfht_gc_bucket(bucket, node);
+
+ assert(is_removed(CMM_LOAD_SHARED(node->next)));
+ /*
+ * Last phase: atomically exchange node->next with a version
+ * having "REMOVAL_OWNER_FLAG" set. If the returned node->next
+ * pointer did _not_ have "REMOVAL_OWNER_FLAG" set, we now own
+ * the node and win the removal race.
+ * It is interesting to note that all "add" paths are forbidden
+ * to change the next pointer starting from the point where the
+ * REMOVED_FLAG is set, so here using a read, followed by a
+ * xchg() suffice to guarantee that the xchg() will ever only
+ * set the "REMOVAL_OWNER_FLAG" (or change nothing if the flag
+ * was already set).
+ */
+ if (!is_removal_owner(uatomic_xchg(&node->next,
+ flag_removal_owner(node->next))))
+ return 0;
+ else
+ return -ENOENT;
+}
+
+static
+void *partition_resize_thread(void *arg)
+{
+ struct partition_resize_work *work = arg;
+
+ work->ht->flavor->register_thread();
+ work->fct(work->ht, work->i, work->start, work->len);
+ work->ht->flavor->unregister_thread();
+ return NULL;
+}
+
+static
+void partition_resize_helper(struct cds_lfht *ht, unsigned long i,
+ unsigned long len,
+ void (*fct)(struct cds_lfht *ht, unsigned long i,
+ unsigned long start, unsigned long len))
+{
+ unsigned long partition_len;
+ struct partition_resize_work *work;
+ int thread, ret;
+ unsigned long nr_threads;
+
+ /*
+ * Note: nr_cpus_mask + 1 is always power of 2.
+ * We spawn just the number of threads we need to satisfy the minimum
+ * partition size, up to the number of CPUs in the system.
+ */
+ if (nr_cpus_mask > 0) {
+ nr_threads = min(nr_cpus_mask + 1,
+ len >> MIN_PARTITION_PER_THREAD_ORDER);
+ } else {
+ nr_threads = 1;
+ }
+ partition_len = len >> cds_lfht_get_count_order_ulong(nr_threads);
+ work = calloc(nr_threads, sizeof(*work));
+ if (!work) {
+ dbg_printf("error allocating for resize, single-threading\n");
+ goto fallback;
+ }
+ for (thread = 0; thread < nr_threads; thread++) {
+ work[thread].ht = ht;
+ work[thread].i = i;
+ work[thread].len = partition_len;
+ work[thread].start = thread * partition_len;
+ work[thread].fct = fct;
+ ret = pthread_create(&(work[thread].thread_id), ht->resize_attr,
+ partition_resize_thread, &work[thread]);
+ assert(!ret);
+ }
+ for (thread = 0; thread < nr_threads; thread++) {
+ ret = pthread_join(work[thread].thread_id, NULL);
+ assert(!ret);
+ }
+ free(work);
+ return;
+fallback:
+ ht->flavor->thread_online();
+ fct(ht, i, 0, len);
+ ht->flavor->thread_offline();
+}
+
+/*
+ * Holding RCU read lock to protect _cds_lfht_add against memory
+ * reclaim that could be performed by other call_rcu worker threads (ABA
+ * problem).
+ *
+ * When we reach a certain length, we can split this population phase over
+ * many worker threads, based on the number of CPUs available in the system.
+ * This should therefore take care of not having the expand lagging behind too
+ * many concurrent insertion threads by using the scheduler's ability to
+ * schedule bucket node population fairly with insertions.
+ */
+static
+void init_table_populate_partition(struct cds_lfht *ht, unsigned long i,
+ unsigned long start, unsigned long len)
+{
+ unsigned long j, size = 1UL << (i - 1);
+
+ assert(i > MIN_TABLE_ORDER);
+ ht->flavor->read_lock();
+ for (j = size + start; j < size + start + len; j++) {
+ struct cds_lfht_node *new_node = bucket_at(ht, j);
+
+ assert(j >= size && j < (size << 1));
+ dbg_printf("init populate: order %lu index %lu hash %lu\n",
+ i, j, j);
+ new_node->reverse_hash = bit_reverse_ulong(j);
+ _cds_lfht_add(ht, j, NULL, NULL, size, new_node, NULL, 1);
+ }
+ ht->flavor->read_unlock();
+}
+
+static
+void init_table_populate(struct cds_lfht *ht, unsigned long i,
+ unsigned long len)
+{
+ assert(nr_cpus_mask != -1);
+ if (nr_cpus_mask < 0 || len < 2 * MIN_PARTITION_PER_THREAD) {
+ ht->flavor->thread_online();
+ init_table_populate_partition(ht, i, 0, len);
+ ht->flavor->thread_offline();
+ return;
+ }
+ partition_resize_helper(ht, i, len, init_table_populate_partition);
+}
+
+static
+void init_table(struct cds_lfht *ht,
+ unsigned long first_order, unsigned long last_order)
+{
+ unsigned long i;
+
+ dbg_printf("init table: first_order %lu last_order %lu\n",
+ first_order, last_order);
+ assert(first_order > MIN_TABLE_ORDER);
+ for (i = first_order; i <= last_order; i++) {
+ unsigned long len;
+
+ len = 1UL << (i - 1);
+ dbg_printf("init order %lu len: %lu\n", i, len);
+
+ /* Stop expand if the resize target changes under us */
+ if (CMM_LOAD_SHARED(ht->resize_target) < (1UL << i))
+ break;
+
+ cds_lfht_alloc_bucket_table(ht, i);
+
+ /*
+ * Set all bucket nodes reverse hash values for a level and
+ * link all bucket nodes into the table.
+ */
+ init_table_populate(ht, i, len);
+
+ /*
+ * Update table size.
+ */
+ cmm_smp_wmb(); /* populate data before RCU size */
+ CMM_STORE_SHARED(ht->size, 1UL << i);
+
+ dbg_printf("init new size: %lu\n", 1UL << i);
+ if (CMM_LOAD_SHARED(ht->in_progress_destroy))
+ break;
+ }
+}
+
+/*
+ * Holding RCU read lock to protect _cds_lfht_remove against memory
+ * reclaim that could be performed by other call_rcu worker threads (ABA
+ * problem).
+ * For a single level, we logically remove and garbage collect each node.
+ *
+ * As a design choice, we perform logical removal and garbage collection on a
+ * node-per-node basis to simplify this algorithm. We also assume keeping good
+ * cache locality of the operation would overweight possible performance gain
+ * that could be achieved by batching garbage collection for multiple levels.
+ * However, this would have to be justified by benchmarks.
+ *
+ * Concurrent removal and add operations are helping us perform garbage
+ * collection of logically removed nodes. We guarantee that all logically
+ * removed nodes have been garbage-collected (unlinked) before call_rcu is
+ * invoked to free a hole level of bucket nodes (after a grace period).
+ *
+ * Logical removal and garbage collection can therefore be done in batch
+ * or on a node-per-node basis, as long as the guarantee above holds.
+ *
+ * When we reach a certain length, we can split this removal over many worker
+ * threads, based on the number of CPUs available in the system. This should
+ * take care of not letting resize process lag behind too many concurrent
+ * updater threads actively inserting into the hash table.
+ */
+static
+void remove_table_partition(struct cds_lfht *ht, unsigned long i,
+ unsigned long start, unsigned long len)
+{
+ unsigned long j, size = 1UL << (i - 1);
+
+ assert(i > MIN_TABLE_ORDER);
+ ht->flavor->read_lock();
+ for (j = size + start; j < size + start + len; j++) {
+ struct cds_lfht_node *fini_bucket = bucket_at(ht, j);
+ struct cds_lfht_node *parent_bucket = bucket_at(ht, j - size);
+
+ assert(j >= size && j < (size << 1));
+ dbg_printf("remove entry: order %lu index %lu hash %lu\n",
+ i, j, j);
+ /* Set the REMOVED_FLAG to freeze the ->next for gc */
+ uatomic_or(&fini_bucket->next, REMOVED_FLAG);
+ _cds_lfht_gc_bucket(parent_bucket, fini_bucket);
+ }
+ ht->flavor->read_unlock();
+}
+
+static
+void remove_table(struct cds_lfht *ht, unsigned long i, unsigned long len)
+{
+
+ assert(nr_cpus_mask != -1);
+ if (nr_cpus_mask < 0 || len < 2 * MIN_PARTITION_PER_THREAD) {
+ ht->flavor->thread_online();
+ remove_table_partition(ht, i, 0, len);
+ ht->flavor->thread_offline();
+ return;
+ }
+ partition_resize_helper(ht, i, len, remove_table_partition);
+}
+
+/*
+ * fini_table() is never called for first_order == 0, which is why
+ * free_by_rcu_order == 0 can be used as criterion to know if free must
+ * be called.
+ */
+static
+void fini_table(struct cds_lfht *ht,
+ unsigned long first_order, unsigned long last_order)
+{
+ long i;
+ unsigned long free_by_rcu_order = 0;
+
+ dbg_printf("fini table: first_order %lu last_order %lu\n",
+ first_order, last_order);
+ assert(first_order > MIN_TABLE_ORDER);
+ for (i = last_order; i >= first_order; i--) {
+ unsigned long len;
+
+ len = 1UL << (i - 1);
+ dbg_printf("fini order %lu len: %lu\n", i, len);
+
+ /* Stop shrink if the resize target changes under us */
+ if (CMM_LOAD_SHARED(ht->resize_target) > (1UL << (i - 1)))
+ break;
+
+ cmm_smp_wmb(); /* populate data before RCU size */
+ CMM_STORE_SHARED(ht->size, 1UL << (i - 1));
+
+ /*
+ * We need to wait for all add operations to reach Q.S. (and
+ * thus use the new table for lookups) before we can start
+ * releasing the old bucket nodes. Otherwise their lookup will
+ * return a logically removed node as insert position.
+ */
+ ht->flavor->update_synchronize_rcu();
+ if (free_by_rcu_order)
+ cds_lfht_free_bucket_table(ht, free_by_rcu_order);
+
+ /*
+ * Set "removed" flag in bucket nodes about to be removed.
+ * Unlink all now-logically-removed bucket node pointers.
+ * Concurrent add/remove operation are helping us doing
+ * the gc.
+ */
+ remove_table(ht, i, len);
+
+ free_by_rcu_order = i;
+
+ dbg_printf("fini new size: %lu\n", 1UL << i);
+ if (CMM_LOAD_SHARED(ht->in_progress_destroy))
+ break;
+ }
+
+ if (free_by_rcu_order) {
+ ht->flavor->update_synchronize_rcu();
+ cds_lfht_free_bucket_table(ht, free_by_rcu_order);
+ }