+ }
+
+ replace:
+ assert(node != clear_flag(iter));
+ assert(!is_removed(iter_prev));
+ assert(!is_removed(iter));
+ assert(!is_gc(iter_prev));
+ assert(!is_gc(iter));
+ assert(iter_prev != node);
+ assert(!dummy);
+ node->p.next = clear_flag(next);
+ if (is_dummy(iter))
+ new_node = flag_dummy(node);
+ else
+ new_node = node;
+ /*
+ * Try to delete to-be-replaced node. Don't gc yet. Not
+ * performing gc here is important, because this lets
+ * concurrent lookups see the old node until we
+ * atomically swap the new node into its place.
+ *
+ * This algorithm is _not_ strictly lock-free between
+ * _cds_lfht_del and the uatomic_cmpxchg of the
+ * replacement operation, so a replacement should _not_
+ * crash here (which means: don't do replacements if you
+ * need strict lock-free guarantees).
+ */
+ if (!replace_pinned) {
+ if (_cds_lfht_del(ht, size, clear_flag(iter), 0, 0))
+ continue; /* concurrently removed. retry. */
+ }
+ /*
+ * After _cds_lfht_del succeeds, we have pinned the
+ * to-be-removed node in place by setting its removed
+ * flag, but not its gc flag. If we fail to cmpxchg our
+ * new node with this node, we need to retry everything
+ * from the initial lookup, and only stop when we reach
+ * the node we pinned into place.
+ */
+ return_node = uatomic_cmpxchg(&iter_prev->p.next,
+ iter, new_node);
+ if (return_node != iter) {
+ /*
+ * If cmpxchg fails, we need to do path
+ * compression, but end it by placing our own
+ * node into place.
+ */
+ replace_pinned = clear_flag(iter);
+ continue; /* retry */
+ } else {
+ /*
+ * cmpxchg succeeded. gc unnecessary, because we
+ * unlinked the return_node ourself with the
+ * cmpxchg.
+ */
+ return_node = clear_flag(return_node);
+ goto end;
+ }
+