+ assert(size > 0);
+ index = hash & (size - 1);
+ order = get_count_order_ulong(index + 1);
+
+ dbg_printf("lookup hash %lu index %lu order %lu aridx %lu\n",
+ hash, index, order, index & (!order ? 0 : ((1UL << (order - 1)) - 1)));
+
+ return &ht->t.tbl[order]->nodes[index & (!order ? 0 : ((1UL << (order - 1)) - 1))];
+}
+
+/*
+ * Remove all logically deleted nodes from a bucket up to a certain node key.
+ */
+static
+void _cds_lfht_gc_bucket(struct cds_lfht_node *dummy, struct cds_lfht_node *node)
+{
+ struct cds_lfht_node *iter_prev, *iter, *next, *new_next;
+
+ assert(!is_dummy(dummy));
+ assert(!is_removed(dummy));
+ assert(!is_dummy(node));
+ assert(!is_removed(node));
+ for (;;) {
+ iter_prev = dummy;
+ /* We can always skip the dummy node initially */
+ iter = rcu_dereference(iter_prev->p.next);
+ assert(iter_prev->p.reverse_hash <= node->p.reverse_hash);
+ /*
+ * We should never be called with dummy (start of chain)
+ * and logically removed node (end of path compression
+ * marker) being the actual same node. This would be a
+ * bug in the algorithm implementation.
+ */
+ assert(dummy != node);
+ for (;;) {
+ if (unlikely(is_end(iter)))
+ return;
+ if (likely(clear_flag(iter)->p.reverse_hash > node->p.reverse_hash))
+ return;
+ next = rcu_dereference(clear_flag(iter)->p.next);
+ if (likely(is_removed(next)))
+ break;
+ iter_prev = clear_flag(iter);
+ iter = next;
+ }
+ assert(!is_removed(iter));
+ if (is_dummy(iter))
+ new_next = flag_dummy(clear_flag(next));
+ else
+ new_next = clear_flag(next);
+ if (is_removed(iter))
+ new_next = flag_removed(new_next);
+ (void) uatomic_cmpxchg(&iter_prev->p.next, iter, new_next);
+ }
+ return;
+}
+
+static
+int _cds_lfht_replace(struct cds_lfht *ht, unsigned long size,
+ struct cds_lfht_node *old_node,
+ struct cds_lfht_node *ret_next,
+ struct cds_lfht_node *new_node)
+{
+ struct cds_lfht_node *dummy, *old_next;
+ struct _cds_lfht_node *lookup;
+ int flagged = 0;
+
+ if (!old_node) /* Return -ENOENT if asked to replace NULL node */
+ goto end;
+
+ assert(!is_removed(old_node));
+ assert(!is_dummy(old_node));
+ assert(!is_removed(new_node));
+ assert(!is_dummy(new_node));
+ assert(new_node != old_node);
+ do {
+ /* Insert after node to be replaced */
+ old_next = ret_next;
+ if (is_removed(old_next)) {
+ /*
+ * Too late, the old node has been removed under us
+ * between lookup and replace. Fail.
+ */
+ goto end;
+ }
+ assert(!is_dummy(old_next));
+ assert(new_node != clear_flag(old_next));
+ new_node->p.next = clear_flag(old_next);
+ /*
+ * Here is the whole trick for lock-free replace: we add
+ * the replacement node _after_ the node we want to
+ * replace by atomically setting its next pointer at the
+ * same time we set its removal flag. Given that
+ * the lookups/get next use an iterator aware of the
+ * next pointer, they will either skip the old node due
+ * to the removal flag and see the new node, or use
+ * the old node, but will not see the new one.
+ */
+ ret_next = uatomic_cmpxchg(&old_node->p.next,
+ old_next, flag_removed(new_node));
+ } while (ret_next != old_next);
+
+ /* We performed the replacement. */
+ flagged = 1;
+
+ /*
+ * Ensure that the old node is not visible to readers anymore:
+ * lookup for the node, and remove it (along with any other
+ * logically removed node) if found.
+ */
+ lookup = lookup_bucket(ht, size, bit_reverse_ulong(old_node->p.reverse_hash));
+ dummy = (struct cds_lfht_node *) lookup;
+ _cds_lfht_gc_bucket(dummy, new_node);
+end:
+ /*
+ * Only the flagging action indicated that we (and no other)
+ * replaced the node from the hash table.
+ */
+ if (flagged) {
+ assert(is_removed(rcu_dereference(old_node->p.next)));
+ return 0;
+ } else {
+ return -ENOENT;
+ }
+}
+
+static
+struct cds_lfht_node *_cds_lfht_add(struct cds_lfht *ht,
+ unsigned long size,
+ struct cds_lfht_node *node,
+ enum add_mode mode, int dummy)
+{
+ struct cds_lfht_node *iter_prev, *iter, *next, *new_node, *new_next,
+ *dummy_node, *return_node;
+ struct _cds_lfht_node *lookup;
+
+ assert(!is_dummy(node));
+ assert(!is_removed(node));
+ if (!size) {
+ assert(dummy);
+ node->p.next = flag_dummy(get_end());
+ return node; /* Initial first add (head) */
+ }
+ lookup = lookup_bucket(ht, size, bit_reverse_ulong(node->p.reverse_hash));