rculfqueue: only one dummy node is needed
[urcu.git] / urcu / static / rculfqueue.h
CommitLineData
3d02c34d
MD
1#ifndef _URCU_RCULFQUEUE_STATIC_H
2#define _URCU_RCULFQUEUE_STATIC_H
3
4/*
5 * rculfqueue-static.h
6 *
7 * Userspace RCU library - Lock-Free RCU Queue
8 *
9 * Copyright 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
10 *
11 * TO BE INCLUDED ONLY IN LGPL-COMPATIBLE CODE. See rculfqueue.h for linking
12 * dynamically with the userspace rcu library.
13 *
14 * This library is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU Lesser General Public
16 * License as published by the Free Software Foundation; either
17 * version 2.1 of the License, or (at your option) any later version.
18 *
19 * This library is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 * Lesser General Public License for more details.
23 *
24 * You should have received a copy of the GNU Lesser General Public
25 * License along with this library; if not, write to the Free Software
26 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 */
28
e17d9985 29#include <urcu-call-rcu.h>
a2e7bf9c 30#include <urcu/uatomic.h>
3d02c34d 31#include <assert.h>
e17d9985 32#include <errno.h>
3d02c34d
MD
33
34#ifdef __cplusplus
35extern "C" {
36#endif
37
e17d9985
MD
38struct cds_lfq_node_rcu_dummy {
39 struct cds_lfq_node_rcu parent;
40 struct rcu_head head;
41 struct cds_lfq_queue_rcu *q;
42};
43
3d02c34d 44/*
e17d9985
MD
45 * Lock-free RCU queue. Enqueue and dequeue operations hold a RCU read
46 * lock to deal with cmpxchg ABA problem. This queue is *not* circular:
47 * head points to the oldest node, tail points to the newest node.
0cca1a2d 48 * A dummy node is kept to ensure enqueue and dequeue can always proceed
e17d9985
MD
49 * concurrently. Keeping a separate head and tail helps with large
50 * queues: enqueue and dequeue can proceed concurrently without
51 * wrestling for exclusive access to the same variables.
52 *
0cca1a2d
MD
53 * Dequeue retry if it detects that it would be dequeueing the last node
54 * (it means a dummy node dequeue-requeue is in progress). This ensures
55 * that there is always at least one node in the queue.
e17d9985 56 *
0cca1a2d
MD
57 * In the dequeue operation, we internally reallocate the dummy node
58 * upon dequeue/requeue and use call_rcu to free the old one after a
59 * grace period.
3d02c34d
MD
60 */
61
e17d9985
MD
62static inline
63int is_dummy(struct cds_lfq_node_rcu *node)
64{
65 return ((unsigned long) node) & 0x1UL;
66}
67
68static inline
69struct cds_lfq_node_rcu *make_dummy(struct cds_lfq_queue_rcu *q,
70 struct cds_lfq_node_rcu *next)
71{
72 struct cds_lfq_node_rcu_dummy *dummy;
73
74 dummy = malloc(sizeof(struct cds_lfq_node_rcu_dummy));
8f03ed0d 75 assert(dummy);
e17d9985
MD
76 dummy->parent.next = next;
77 dummy->q = q;
78 return (struct cds_lfq_node_rcu *) (((unsigned long) &dummy->parent) | 0x1UL);
79}
80
81static inline
82struct cds_lfq_node_rcu *get_node(struct cds_lfq_node_rcu *node)
83{
84 return (struct cds_lfq_node_rcu *) (((unsigned long )node) & ~0x1UL);
85}
86
87static inline
88void free_dummy(struct rcu_head *head)
89{
90 struct cds_lfq_node_rcu_dummy *dummy =
91 caa_container_of(head, struct cds_lfq_node_rcu_dummy, head);
92 free(dummy);
93}
94
95static inline
96void rcu_free_dummy(struct cds_lfq_node_rcu *node)
97{
98 struct cds_lfq_node_rcu_dummy *dummy;
3d02c34d 99
e17d9985
MD
100 dummy = caa_container_of(get_node(node), struct cds_lfq_node_rcu_dummy,
101 parent);
102 dummy->q->queue_call_rcu(&dummy->head, free_dummy);
103}
104
105static inline
16aa9ee8 106void _cds_lfq_node_init_rcu(struct cds_lfq_node_rcu *node)
3d02c34d
MD
107{
108 node->next = NULL;
3d02c34d
MD
109}
110
e17d9985 111static inline
d9b52143 112void _cds_lfq_init_rcu(struct cds_lfq_queue_rcu *q,
e17d9985
MD
113 void queue_call_rcu(struct rcu_head *head,
114 void (*func)(struct rcu_head *head)))
115{
116 q->tail = make_dummy(q, NULL);
0cca1a2d 117 q->head = q->tail;
e17d9985
MD
118 q->queue_call_rcu = queue_call_rcu;
119}
120
121/*
122 * The queue should be emptied before calling destroy.
123 *
124 * Return 0 on success, -EPERM if queue is not empty.
125 */
126static inline
127int _cds_lfq_destroy_rcu(struct cds_lfq_queue_rcu *q)
3d02c34d 128{
0cca1a2d 129 struct cds_lfq_node_rcu *head;
e17d9985
MD
130
131 head = rcu_dereference(q->head);
0cca1a2d 132 if (!(is_dummy(head) && get_node(head)->next == NULL))
e17d9985
MD
133 return -EPERM; /* not empty */
134 rcu_free_dummy(head);
e17d9985 135 return 0;
3d02c34d
MD
136}
137
d9b52143
MD
138/*
139 * Should be called under rcu read lock critical section.
140 */
e17d9985 141static inline
d9b52143
MD
142void _cds_lfq_enqueue_rcu(struct cds_lfq_queue_rcu *q,
143 struct cds_lfq_node_rcu *node)
3d02c34d 144{
3d02c34d
MD
145 /*
146 * uatomic_cmpxchg() implicit memory barrier orders earlier stores to
147 * node before publication.
148 */
149
150 for (;;) {
16aa9ee8 151 struct cds_lfq_node_rcu *tail, *next;
3d02c34d 152
3d02c34d 153 tail = rcu_dereference(q->tail);
e17d9985 154 next = uatomic_cmpxchg(&get_node(tail)->next, NULL, node);
3d02c34d
MD
155 if (next == NULL) {
156 /*
157 * Tail was at the end of queue, we successfully
e17d9985
MD
158 * appended to it. Now move tail (another
159 * enqueue might beat us to it, that's fine).
3d02c34d 160 */
85b57703 161 (void) uatomic_cmpxchg(&q->tail, tail, node);
3d02c34d
MD
162 return;
163 } else {
164 /*
e17d9985
MD
165 * Failure to append to current tail.
166 * Help moving tail further and retry.
3d02c34d 167 */
85b57703 168 (void) uatomic_cmpxchg(&q->tail, tail, next);
3d02c34d
MD
169 continue;
170 }
171 }
172}
173
174/*
d9b52143
MD
175 * Should be called under rcu read lock critical section.
176 *
e17d9985
MD
177 * The caller must wait for a grace period to pass before freeing the returned
178 * node or modifying the cds_lfq_node_rcu structure.
179 * Returns NULL if queue is empty.
3d02c34d 180 */
e17d9985 181static inline
a34df756 182struct cds_lfq_node_rcu *_cds_lfq_dequeue_rcu(struct cds_lfq_queue_rcu *q)
3d02c34d
MD
183{
184 for (;;) {
16aa9ee8 185 struct cds_lfq_node_rcu *head, *next;
3d02c34d 186
3d02c34d 187 head = rcu_dereference(q->head);
e17d9985 188 next = rcu_dereference(get_node(head)->next);
0cca1a2d 189 if (is_dummy(head) && next == NULL)
e17d9985
MD
190 return NULL; /* empty */
191 /*
192 * We never, ever allow dequeue to get to a state where
193 * the queue is empty (we need at least one node in the
194 * queue). This is ensured by checking if the head next
195 * is NULL and retry in that case (this means a
196 * concurrent dummy node re-enqueue is in progress).
197 */
3d02c34d
MD
198 if (next) {
199 if (uatomic_cmpxchg(&q->head, head, next) == head) {
e17d9985
MD
200 if (is_dummy(head)) {
201 struct cds_lfq_node_rcu *node;
202 /*
203 * Requeue dummy. We need to
204 * reallocate to protect from
205 * ABA.
206 */
207 rcu_free_dummy(head);
208 node = make_dummy(q, NULL);
209 _cds_lfq_enqueue_rcu(q, node);
210 continue; /* try again */
211 }
212 return head;
3d02c34d
MD
213 } else {
214 /* Concurrently pushed, retry */
3d02c34d
MD
215 continue;
216 }
217 } else {
e17d9985
MD
218 /* Dummy node re-enqueue is in progress, retry. */
219 continue;
3d02c34d
MD
220 }
221 }
222}
223
224#ifdef __cplusplus
225}
226#endif
227
228#endif /* _URCU_RCULFQUEUE_STATIC_H */
This page took 0.034291 seconds and 4 git commands to generate.