uatomic/x86: Remove redundant memory barriers
[urcu.git] / include / urcu / static / rculfqueue.h
CommitLineData
d3d3857f
MJ
1// SPDX-FileCopyrightText: 2010 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
2//
3// SPDX-License-Identifier: LGPL-2.1-or-later
4
3d02c34d
MD
5#ifndef _URCU_RCULFQUEUE_STATIC_H
6#define _URCU_RCULFQUEUE_STATIC_H
7
8/*
3d02c34d
MD
9 * Userspace RCU library - Lock-Free RCU Queue
10 *
3d02c34d
MD
11 * TO BE INCLUDED ONLY IN LGPL-COMPATIBLE CODE. See rculfqueue.h for linking
12 * dynamically with the userspace rcu library.
3d02c34d
MD
13 */
14
e17d9985 15#include <urcu-call-rcu.h>
01477510 16#include <urcu/assert.h>
a2e7bf9c 17#include <urcu/uatomic.h>
4157e1ac 18#include <urcu-pointer.h>
e17d9985 19#include <errno.h>
3d02c34d
MD
20
21#ifdef __cplusplus
22extern "C" {
23#endif
24
e17d9985
MD
25struct cds_lfq_node_rcu_dummy {
26 struct cds_lfq_node_rcu parent;
27 struct rcu_head head;
28 struct cds_lfq_queue_rcu *q;
29};
30
3d02c34d 31/*
e17d9985
MD
32 * Lock-free RCU queue. Enqueue and dequeue operations hold a RCU read
33 * lock to deal with cmpxchg ABA problem. This queue is *not* circular:
34 * head points to the oldest node, tail points to the newest node.
0cca1a2d 35 * A dummy node is kept to ensure enqueue and dequeue can always proceed
e17d9985
MD
36 * concurrently. Keeping a separate head and tail helps with large
37 * queues: enqueue and dequeue can proceed concurrently without
38 * wrestling for exclusive access to the same variables.
39 *
0cca1a2d
MD
40 * Dequeue retry if it detects that it would be dequeueing the last node
41 * (it means a dummy node dequeue-requeue is in progress). This ensures
42 * that there is always at least one node in the queue.
e17d9985 43 *
0cca1a2d
MD
44 * In the dequeue operation, we internally reallocate the dummy node
45 * upon dequeue/requeue and use call_rcu to free the old one after a
46 * grace period.
3d02c34d
MD
47 */
48
e17d9985
MD
49static inline
50struct cds_lfq_node_rcu *make_dummy(struct cds_lfq_queue_rcu *q,
51 struct cds_lfq_node_rcu *next)
52{
53 struct cds_lfq_node_rcu_dummy *dummy;
54
28757437
SM
55 dummy = (struct cds_lfq_node_rcu_dummy *)
56 malloc(sizeof(struct cds_lfq_node_rcu_dummy));
01477510 57 urcu_posix_assert(dummy);
e17d9985 58 dummy->parent.next = next;
fbdb32f6 59 dummy->parent.dummy = 1;
e17d9985 60 dummy->q = q;
909292c2 61 return &dummy->parent;
e17d9985
MD
62}
63
64static inline
f6719811 65void free_dummy_cb(struct rcu_head *head)
e17d9985
MD
66{
67 struct cds_lfq_node_rcu_dummy *dummy =
68 caa_container_of(head, struct cds_lfq_node_rcu_dummy, head);
69 free(dummy);
70}
71
72static inline
73void rcu_free_dummy(struct cds_lfq_node_rcu *node)
74{
75 struct cds_lfq_node_rcu_dummy *dummy;
3d02c34d 76
01477510 77 urcu_posix_assert(node->dummy);
909292c2 78 dummy = caa_container_of(node, struct cds_lfq_node_rcu_dummy, parent);
6e5f88cf 79 dummy->q->queue_call_rcu(&dummy->head, free_dummy_cb);
f6719811
MD
80}
81
82static inline
83void free_dummy(struct cds_lfq_node_rcu *node)
84{
85 struct cds_lfq_node_rcu_dummy *dummy;
86
01477510 87 urcu_posix_assert(node->dummy);
f6719811
MD
88 dummy = caa_container_of(node, struct cds_lfq_node_rcu_dummy, parent);
89 free(dummy);
e17d9985
MD
90}
91
92static inline
16aa9ee8 93void _cds_lfq_node_init_rcu(struct cds_lfq_node_rcu *node)
3d02c34d
MD
94{
95 node->next = NULL;
fbdb32f6 96 node->dummy = 0;
3d02c34d
MD
97}
98
e17d9985 99static inline
6e5f88cf
MD
100void _cds_lfq_init_rcu(struct cds_lfq_queue_rcu *q,
101 void queue_call_rcu(struct rcu_head *head,
102 void (*func)(struct rcu_head *head)))
e17d9985
MD
103{
104 q->tail = make_dummy(q, NULL);
0cca1a2d 105 q->head = q->tail;
6e5f88cf 106 q->queue_call_rcu = queue_call_rcu;
e17d9985
MD
107}
108
109/*
110 * The queue should be emptied before calling destroy.
111 *
112 * Return 0 on success, -EPERM if queue is not empty.
113 */
114static inline
115int _cds_lfq_destroy_rcu(struct cds_lfq_queue_rcu *q)
3d02c34d 116{
0cca1a2d 117 struct cds_lfq_node_rcu *head;
e17d9985
MD
118
119 head = rcu_dereference(q->head);
fbdb32f6 120 if (!(head->dummy && head->next == NULL))
e17d9985 121 return -EPERM; /* not empty */
f6719811 122 free_dummy(head);
e17d9985 123 return 0;
3d02c34d
MD
124}
125
d9b52143 126/*
6e5f88cf 127 * Should be called under rcu read lock critical section.
d9b52143 128 */
e17d9985 129static inline
d9b52143
MD
130void _cds_lfq_enqueue_rcu(struct cds_lfq_queue_rcu *q,
131 struct cds_lfq_node_rcu *node)
3d02c34d 132{
3d02c34d
MD
133 /*
134 * uatomic_cmpxchg() implicit memory barrier orders earlier stores to
135 * node before publication.
136 */
3d02c34d 137 for (;;) {
16aa9ee8 138 struct cds_lfq_node_rcu *tail, *next;
3d02c34d 139
3d02c34d 140 tail = rcu_dereference(q->tail);
0e2125fb
OD
141 cmm_emit_legacy_smp_mb();
142 next = uatomic_cmpxchg_mo(&tail->next, NULL, node,
143 CMM_SEQ_CST, CMM_SEQ_CST);
3d02c34d
MD
144 if (next == NULL) {
145 /*
146 * Tail was at the end of queue, we successfully
e17d9985
MD
147 * appended to it. Now move tail (another
148 * enqueue might beat us to it, that's fine).
3d02c34d 149 */
0e2125fb
OD
150 (void) uatomic_cmpxchg_mo(&q->tail, tail, node,
151 CMM_SEQ_CST, CMM_SEQ_CST);
3d02c34d
MD
152 return;
153 } else {
154 /*
e17d9985
MD
155 * Failure to append to current tail.
156 * Help moving tail further and retry.
3d02c34d 157 */
0e2125fb
OD
158 (void) uatomic_cmpxchg_mo(&q->tail, tail, next,
159 CMM_SEQ_CST, CMM_SEQ_CST);
3d02c34d
MD
160 continue;
161 }
162 }
163}
164
f6719811 165static inline
fbdb32f6 166void enqueue_dummy(struct cds_lfq_queue_rcu *q)
f6719811
MD
167{
168 struct cds_lfq_node_rcu *node;
169
170 /* We need to reallocate to protect from ABA. */
171 node = make_dummy(q, NULL);
f6719811
MD
172 _cds_lfq_enqueue_rcu(q, node);
173}
174
3d02c34d 175/*
6e5f88cf 176 * Should be called under rcu read lock critical section.
d9b52143 177 *
e17d9985
MD
178 * The caller must wait for a grace period to pass before freeing the returned
179 * node or modifying the cds_lfq_node_rcu structure.
180 * Returns NULL if queue is empty.
3d02c34d 181 */
e17d9985 182static inline
a34df756 183struct cds_lfq_node_rcu *_cds_lfq_dequeue_rcu(struct cds_lfq_queue_rcu *q)
3d02c34d
MD
184{
185 for (;;) {
fbdb32f6 186 struct cds_lfq_node_rcu *head, *next;
3d02c34d 187
3d02c34d 188 head = rcu_dereference(q->head);
909292c2 189 next = rcu_dereference(head->next);
6e5f88cf 190 if (head->dummy && next == NULL)
e17d9985
MD
191 return NULL; /* empty */
192 /*
193 * We never, ever allow dequeue to get to a state where
194 * the queue is empty (we need at least one node in the
195 * queue). This is ensured by checking if the head next
fbdb32f6
MD
196 * is NULL, which means we need to enqueue a dummy node
197 * before we can hope dequeuing anything.
e17d9985 198 */
f6719811 199 if (!next) {
fbdb32f6
MD
200 enqueue_dummy(q);
201 next = rcu_dereference(head->next);
f6719811 202 }
0e2125fb
OD
203 if (uatomic_cmpxchg_mo(&q->head, head, next,
204 CMM_SEQ_CST, CMM_SEQ_CST) != head)
f6719811 205 continue; /* Concurrently pushed. */
fbdb32f6
MD
206 if (head->dummy) {
207 /* Free dummy after grace period. */
208 rcu_free_dummy(head);
f6719811 209 continue; /* try again */
3d02c34d 210 }
f6719811 211 return head;
3d02c34d
MD
212 }
213}
214
215#ifdef __cplusplus
216}
217#endif
218
219#endif /* _URCU_RCULFQUEUE_STATIC_H */
This page took 0.061062 seconds and 5 git commands to generate.