b44b9e29fdbad9b73024d653fb992218f221644b
1 // SPDX-FileCopyrightText: 2010 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
3 // SPDX-License-Identifier: LGPL-2.1-or-later
5 #ifndef _URCU_RCULFSTACK_STATIC_H
6 #define _URCU_RCULFSTACK_STATIC_H
9 * Userspace RCU library - Lock-Free RCU Stack
11 * TO BE INCLUDED ONLY IN LGPL-COMPATIBLE CODE. See rculfstack.h for linking
12 * dynamically with the userspace rcu library.
15 #include <urcu/uatomic.h>
16 #include <urcu-pointer.h>
23 void _cds_lfs_node_init_rcu(struct cds_lfs_node_rcu
*node
__attribute__((unused
)))
28 void _cds_lfs_init_rcu(struct cds_lfs_stack_rcu
*s
)
34 * Lock-free stack push is not subject to ABA problem, so no need to
35 * take the RCU read-side lock. Even if "head" changes between two
36 * uatomic_cmpxchg() invocations here (being popped, and then pushed
37 * again by one or more concurrent threads), the second
38 * uatomic_cmpxchg() invocation only cares about pushing a new entry at
39 * the head of the stack, ensuring consistency by making sure the new
40 * node->next is the same pointer value as the value replaced as head.
41 * It does not care about the content of the actual next node, so it can
42 * very well be reallocated between the two uatomic_cmpxchg().
44 * We take the approach of expecting the stack to be usually empty, so
45 * we first try an initial uatomic_cmpxchg() on a NULL old_head, and
46 * retry if the old head was non-NULL (the value read by the first
47 * uatomic_cmpxchg() is used as old head for the following loop). The
48 * upside of this scheme is to minimize the amount of cacheline traffic,
49 * always performing an exclusive cacheline access, rather than doing
50 * non-exclusive followed by exclusive cacheline access (which would be
51 * required if we first read the old head value). This design decision
52 * might be revisited after more thorough benchmarking on various
55 * Returns 0 if the stack was empty prior to adding the node.
56 * Returns non-zero otherwise.
59 int _cds_lfs_push_rcu(struct cds_lfs_stack_rcu
*s
,
60 struct cds_lfs_node_rcu
*node
)
62 struct cds_lfs_node_rcu
*head
= NULL
;
65 struct cds_lfs_node_rcu
*old_head
= head
;
69 * uatomic_cmpxchg() implicit memory barrier orders earlier
70 * stores to node before publication.
72 cmm_emit_legacy_smp_mb();
73 head
= uatomic_cmpxchg_mo(&s
->head
, old_head
, node
,
74 CMM_SEQ_CST
, CMM_SEQ_CST
);
78 return (int) !!((unsigned long) head
);
82 * Should be called under rcu read-side lock.
84 * The caller must wait for a grace period to pass before freeing the returned
85 * node or modifying the cds_lfs_node_rcu structure.
86 * Returns NULL if stack is empty.
89 struct cds_lfs_node_rcu
*
90 _cds_lfs_pop_rcu(struct cds_lfs_stack_rcu
*s
)
93 struct cds_lfs_node_rcu
*head
;
95 head
= rcu_dereference(s
->head
);
97 struct cds_lfs_node_rcu
*next
= rcu_dereference(head
->next
);
99 if (uatomic_cmpxchg_mo(&s
->head
, head
, next
,
100 CMM_SEQ_CST
, CMM_SEQ_CST
) == head
) {
101 cmm_emit_legacy_smp_mb();
104 /* Concurrent modification. Retry. */
118 #endif /* _URCU_RCULFSTACK_STATIC_H */
This page took 0.03512 seconds and 5 git commands to generate.