assert(!ret);
}
+/*
+ * ___cds_lfs_init: initialize lock-free stack.
+ */
+static inline
+void ___cds_lfs_init(struct __cds_lfs_stack *s)
+{
+ s->head = NULL;
+}
+
static inline
bool ___cds_lfs_empty_head(struct cds_lfs_head *head)
{
* No memory barrier is issued. No mutual exclusion is required.
*/
static inline
-bool _cds_lfs_empty(struct cds_lfs_stack *s)
+bool _cds_lfs_empty(cds_lfs_stack_ptr_t s)
{
- return ___cds_lfs_empty_head(CMM_LOAD_SHARED(s->head));
+ return ___cds_lfs_empty_head(CMM_LOAD_SHARED(s._s->head));
}
/*
* always performing an exclusive cacheline access, rather than doing
* non-exclusive followed by exclusive cacheline access (which would be
* required if we first read the old head value). This design decision
- * might be revisited after more throrough benchmarking on various
+ * might be revisited after more thorough benchmarking on various
* platforms.
*
* Returns 0 if the stack was empty prior to adding the node.
* Returns non-zero otherwise.
*/
static inline
-bool _cds_lfs_push(struct cds_lfs_stack *s,
+bool _cds_lfs_push(cds_lfs_stack_ptr_t u_s,
struct cds_lfs_node *node)
{
+ struct __cds_lfs_stack *s = u_s._s;
struct cds_lfs_head *head = NULL;
struct cds_lfs_head *new_head =
caa_container_of(node, struct cds_lfs_head, node);
if (old_head == head)
break;
}
- return ___cds_lfs_empty_head(head);
+ return !___cds_lfs_empty_head(head);
}
/*
* __cds_lfs_pop needs to be synchronized using one of the following
* techniques:
*
- * 1) Calling __cds_lfs_pop under rcu read lock critical section. The
- * caller must wait for a grace period to pass before freeing the
- * returned node or modifying the cds_lfs_node structure.
+ * 1) Calling __cds_lfs_pop under rcu read lock critical section.
+ * Both __cds_lfs_pop and __cds_lfs_pop_all callers must wait for a
+ * grace period to pass before freeing the returned node or pushing
+ * the node back into the stack. It is valid to overwrite the content
+ * of cds_lfs_node immediately after __cds_lfs_pop and
+ * __cds_lfs_pop_all. No RCU read-side critical section is needed
+ * around __cds_lfs_pop_all.
* 2) Using mutual exclusion (e.g. mutexes) to protect __cds_lfs_pop
* and __cds_lfs_pop_all callers.
* 3) Ensuring that only ONE thread can call __cds_lfs_pop() and
* __cds_lfs_pop_all(). (multi-provider/single-consumer scheme).
*/
static inline
-struct cds_lfs_node *___cds_lfs_pop(struct cds_lfs_stack *s)
+struct cds_lfs_node *___cds_lfs_pop(cds_lfs_stack_ptr_t u_s)
{
+ struct __cds_lfs_stack *s = u_s._s;
+
for (;;) {
struct cds_lfs_head *head, *next_head;
struct cds_lfs_node *next;
* matching the technique used to synchronize __cds_lfs_pop:
*
* 1) If __cds_lfs_pop is called under rcu read lock critical section,
- * both __cds_lfs_pop and cds_lfs_pop_all callers must wait for a
- * grace period to pass before freeing the returned node or modifying
- * the cds_lfs_node structure. However, no RCU read-side critical
- * section is needed around __cds_lfs_pop_all.
+ * both __cds_lfs_pop and __cds_lfs_pop_all callers must wait for a
+ * grace period to pass before freeing the returned node or pushing
+ * the node back into the stack. It is valid to overwrite the content
+ * of cds_lfs_node immediately after __cds_lfs_pop and
+ * __cds_lfs_pop_all. No RCU read-side critical section is needed
+ * around __cds_lfs_pop_all.
* 2) Using mutual exclusion (e.g. mutexes) to protect __cds_lfs_pop and
* __cds_lfs_pop_all callers.
* 3) Ensuring that only ONE thread can call __cds_lfs_pop() and
* __cds_lfs_pop_all(). (multi-provider/single-consumer scheme).
*/
static inline
-struct cds_lfs_head *___cds_lfs_pop_all(struct cds_lfs_stack *s)
+struct cds_lfs_head *___cds_lfs_pop_all(cds_lfs_stack_ptr_t u_s)
{
+ struct __cds_lfs_stack *s = u_s._s;
+
/*
* Implicit memory barrier after uatomic_xchg() matches implicit
* memory barrier before uatomic_cmpxchg() in cds_lfs_push. It