- * TO BE INCLUDED ONLY IN LGPL-COMPATIBLE CODE. See rculfstack.h for linking
- * dynamically with the userspace rcu library.
+ * TO BE INCLUDED ONLY IN LGPL-COMPATIBLE CODE. See urcu/lfstack.h for
+ * linking dynamically with the userspace rcu library.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* always performing an exclusive cacheline access, rather than doing
* non-exclusive followed by exclusive cacheline access (which would be
* required if we first read the old head value). This design decision
* always performing an exclusive cacheline access, rather than doing
* non-exclusive followed by exclusive cacheline access (which would be
* required if we first read the old head value). This design decision
struct cds_lfs_head *new_head =
caa_container_of(node, struct cds_lfs_head, node);
struct cds_lfs_head *new_head =
caa_container_of(node, struct cds_lfs_head, node);
for (;;) {
struct cds_lfs_head *head, *next_head;
struct cds_lfs_node *next;
for (;;) {
struct cds_lfs_head *head, *next_head;
struct cds_lfs_node *next;
/*
* Implicit memory barrier after uatomic_xchg() matches implicit
* memory barrier before uatomic_cmpxchg() in cds_lfs_push. It
/*
* Implicit memory barrier after uatomic_xchg() matches implicit
* memory barrier before uatomic_cmpxchg() in cds_lfs_push. It
* taking care to order writes to each node prior to the full
* memory barrier after this uatomic_xchg().
*/
* taking care to order writes to each node prior to the full
* memory barrier after this uatomic_xchg().
*/