urcu/annotate: Add CMM annotation
[urcu.git] / include / urcu / static / rculfstack.h
CommitLineData
d3d3857f
MJ
1// SPDX-FileCopyrightText: 2010 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
2//
3// SPDX-License-Identifier: LGPL-2.1-or-later
4
b6a37eac
MD
5#ifndef _URCU_RCULFSTACK_STATIC_H
6#define _URCU_RCULFSTACK_STATIC_H
7
8/*
b6a37eac
MD
9 * Userspace RCU library - Lock-Free RCU Stack
10 *
b6a37eac
MD
11 * TO BE INCLUDED ONLY IN LGPL-COMPATIBLE CODE. See rculfstack.h for linking
12 * dynamically with the userspace rcu library.
b6a37eac
MD
13 */
14
a2e7bf9c 15#include <urcu/uatomic.h>
4157e1ac 16#include <urcu-pointer.h>
b6a37eac
MD
17
18#ifdef __cplusplus
19extern "C" {
20#endif
21
bd0662db 22static inline
70469b43 23void _cds_lfs_node_init_rcu(struct cds_lfs_node_rcu *node __attribute__((unused)))
b6a37eac
MD
24{
25}
26
bd0662db 27static inline
16aa9ee8 28void _cds_lfs_init_rcu(struct cds_lfs_stack_rcu *s)
b6a37eac
MD
29{
30 s->head = NULL;
31}
32
79dba879
MD
33/*
34 * Lock-free stack push is not subject to ABA problem, so no need to
35 * take the RCU read-side lock. Even if "head" changes between two
36 * uatomic_cmpxchg() invocations here (being popped, and then pushed
37 * again by one or more concurrent threads), the second
38 * uatomic_cmpxchg() invocation only cares about pushing a new entry at
39 * the head of the stack, ensuring consistency by making sure the new
40 * node->next is the same pointer value as the value replaced as head.
41 * It does not care about the content of the actual next node, so it can
42 * very well be reallocated between the two uatomic_cmpxchg().
43 *
44 * We take the approach of expecting the stack to be usually empty, so
45 * we first try an initial uatomic_cmpxchg() on a NULL old_head, and
46 * retry if the old head was non-NULL (the value read by the first
47 * uatomic_cmpxchg() is used as old head for the following loop). The
48 * upside of this scheme is to minimize the amount of cacheline traffic,
49 * always performing an exclusive cacheline access, rather than doing
50 * non-exclusive followed by exclusive cacheline access (which would be
51 * required if we first read the old head value). This design decision
f99c6e92 52 * might be revisited after more thorough benchmarking on various
79dba879 53 * platforms.
e1a3b81e
MD
54 *
55 * Returns 0 if the stack was empty prior to adding the node.
56 * Returns non-zero otherwise.
79dba879 57 */
bd0662db 58static inline
e1a3b81e
MD
59int _cds_lfs_push_rcu(struct cds_lfs_stack_rcu *s,
60 struct cds_lfs_node_rcu *node)
b6a37eac 61{
16aa9ee8 62 struct cds_lfs_node_rcu *head = NULL;
75202035 63
b6a37eac 64 for (;;) {
16aa9ee8 65 struct cds_lfs_node_rcu *old_head = head;
b6a37eac 66
b6a37eac
MD
67 node->next = head;
68 /*
69 * uatomic_cmpxchg() implicit memory barrier orders earlier
70 * stores to node before publication.
71 */
75202035
MD
72 head = uatomic_cmpxchg(&s->head, old_head, node);
73 if (old_head == head)
74 break;
b6a37eac 75 }
e1a3b81e 76 return (int) !!((unsigned long) head);
b6a37eac
MD
77}
78
79/*
6e5f88cf 80 * Should be called under rcu read-side lock.
d9b52143 81 *
b6a37eac 82 * The caller must wait for a grace period to pass before freeing the returned
16aa9ee8 83 * node or modifying the cds_lfs_node_rcu structure.
b6a37eac
MD
84 * Returns NULL if stack is empty.
85 */
bd0662db 86static inline
16aa9ee8
DG
87struct cds_lfs_node_rcu *
88_cds_lfs_pop_rcu(struct cds_lfs_stack_rcu *s)
b6a37eac
MD
89{
90 for (;;) {
16aa9ee8 91 struct cds_lfs_node_rcu *head;
b6a37eac 92
4fc06b7f
MD
93 head = rcu_dereference(s->head);
94 if (head) {
16aa9ee8 95 struct cds_lfs_node_rcu *next = rcu_dereference(head->next);
b6a37eac 96
4fc06b7f 97 if (uatomic_cmpxchg(&s->head, head, next) == head) {
4fc06b7f
MD
98 return head;
99 } else {
100 /* Concurrent modification. Retry. */
4fc06b7f
MD
101 continue;
102 }
103 } else {
104 /* Empty stack */
4fc06b7f
MD
105 return NULL;
106 }
b6a37eac
MD
107 }
108}
109
110#ifdef __cplusplus
111}
112#endif
113
114#endif /* _URCU_RCULFSTACK_STATIC_H */
This page took 0.0517879999999999 seconds and 4 git commands to generate.