b44b9e29fdbad9b73024d653fb992218f221644b
[urcu.git] / include / urcu / static / rculfstack.h
1 // SPDX-FileCopyrightText: 2010 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
2 //
3 // SPDX-License-Identifier: LGPL-2.1-or-later
4
5 #ifndef _URCU_RCULFSTACK_STATIC_H
6 #define _URCU_RCULFSTACK_STATIC_H
7
8 /*
9 * Userspace RCU library - Lock-Free RCU Stack
10 *
11 * TO BE INCLUDED ONLY IN LGPL-COMPATIBLE CODE. See rculfstack.h for linking
12 * dynamically with the userspace rcu library.
13 */
14
15 #include <urcu/uatomic.h>
16 #include <urcu-pointer.h>
17
18 #ifdef __cplusplus
19 extern "C" {
20 #endif
21
22 static inline
23 void _cds_lfs_node_init_rcu(struct cds_lfs_node_rcu *node __attribute__((unused)))
24 {
25 }
26
27 static inline
28 void _cds_lfs_init_rcu(struct cds_lfs_stack_rcu *s)
29 {
30 s->head = NULL;
31 }
32
33 /*
34 * Lock-free stack push is not subject to ABA problem, so no need to
35 * take the RCU read-side lock. Even if "head" changes between two
36 * uatomic_cmpxchg() invocations here (being popped, and then pushed
37 * again by one or more concurrent threads), the second
38 * uatomic_cmpxchg() invocation only cares about pushing a new entry at
39 * the head of the stack, ensuring consistency by making sure the new
40 * node->next is the same pointer value as the value replaced as head.
41 * It does not care about the content of the actual next node, so it can
42 * very well be reallocated between the two uatomic_cmpxchg().
43 *
44 * We take the approach of expecting the stack to be usually empty, so
45 * we first try an initial uatomic_cmpxchg() on a NULL old_head, and
46 * retry if the old head was non-NULL (the value read by the first
47 * uatomic_cmpxchg() is used as old head for the following loop). The
48 * upside of this scheme is to minimize the amount of cacheline traffic,
49 * always performing an exclusive cacheline access, rather than doing
50 * non-exclusive followed by exclusive cacheline access (which would be
51 * required if we first read the old head value). This design decision
52 * might be revisited after more thorough benchmarking on various
53 * platforms.
54 *
55 * Returns 0 if the stack was empty prior to adding the node.
56 * Returns non-zero otherwise.
57 */
58 static inline
59 int _cds_lfs_push_rcu(struct cds_lfs_stack_rcu *s,
60 struct cds_lfs_node_rcu *node)
61 {
62 struct cds_lfs_node_rcu *head = NULL;
63
64 for (;;) {
65 struct cds_lfs_node_rcu *old_head = head;
66
67 node->next = head;
68 /*
69 * uatomic_cmpxchg() implicit memory barrier orders earlier
70 * stores to node before publication.
71 */
72 cmm_emit_legacy_smp_mb();
73 head = uatomic_cmpxchg_mo(&s->head, old_head, node,
74 CMM_SEQ_CST, CMM_SEQ_CST);
75 if (old_head == head)
76 break;
77 }
78 return (int) !!((unsigned long) head);
79 }
80
81 /*
82 * Should be called under rcu read-side lock.
83 *
84 * The caller must wait for a grace period to pass before freeing the returned
85 * node or modifying the cds_lfs_node_rcu structure.
86 * Returns NULL if stack is empty.
87 */
88 static inline
89 struct cds_lfs_node_rcu *
90 _cds_lfs_pop_rcu(struct cds_lfs_stack_rcu *s)
91 {
92 for (;;) {
93 struct cds_lfs_node_rcu *head;
94
95 head = rcu_dereference(s->head);
96 if (head) {
97 struct cds_lfs_node_rcu *next = rcu_dereference(head->next);
98
99 if (uatomic_cmpxchg_mo(&s->head, head, next,
100 CMM_SEQ_CST, CMM_SEQ_CST) == head) {
101 cmm_emit_legacy_smp_mb();
102 return head;
103 } else {
104 /* Concurrent modification. Retry. */
105 continue;
106 }
107 } else {
108 /* Empty stack */
109 return NULL;
110 }
111 }
112 }
113
114 #ifdef __cplusplus
115 }
116 #endif
117
118 #endif /* _URCU_RCULFSTACK_STATIC_H */
This page took 0.03512 seconds and 5 git commands to generate.