rcu lfqueue: make dequeue lockless by helping out other dequeuers
[urcu.git] / urcu / static / rculfqueue.h
... / ...
CommitLineData
1#ifndef _URCU_RCULFQUEUE_STATIC_H
2#define _URCU_RCULFQUEUE_STATIC_H
3
4/*
5 * rculfqueue-static.h
6 *
7 * Userspace RCU library - Lock-Free RCU Queue
8 *
9 * Copyright 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
10 *
11 * TO BE INCLUDED ONLY IN LGPL-COMPATIBLE CODE. See rculfqueue.h for linking
12 * dynamically with the userspace rcu library.
13 *
14 * This library is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU Lesser General Public
16 * License as published by the Free Software Foundation; either
17 * version 2.1 of the License, or (at your option) any later version.
18 *
19 * This library is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 * Lesser General Public License for more details.
23 *
24 * You should have received a copy of the GNU Lesser General Public
25 * License along with this library; if not, write to the Free Software
26 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 */
28
29#include <urcu-call-rcu.h>
30#include <urcu/uatomic.h>
31#include <assert.h>
32#include <errno.h>
33
34#ifdef __cplusplus
35extern "C" {
36#endif
37
38struct cds_lfq_node_rcu_dummy {
39 struct cds_lfq_node_rcu parent;
40 struct rcu_head head;
41 struct cds_lfq_queue_rcu *q;
42};
43
44/*
45 * Lock-free RCU queue. Enqueue and dequeue operations hold a RCU read
46 * lock to deal with cmpxchg ABA problem. This queue is *not* circular:
47 * head points to the oldest node, tail points to the newest node.
48 * A dummy node is kept to ensure enqueue and dequeue can always proceed
49 * concurrently. Keeping a separate head and tail helps with large
50 * queues: enqueue and dequeue can proceed concurrently without
51 * wrestling for exclusive access to the same variables.
52 *
53 * Dequeue retry if it detects that it would be dequeueing the last node
54 * (it means a dummy node dequeue-requeue is in progress). This ensures
55 * that there is always at least one node in the queue.
56 *
57 * In the dequeue operation, we internally reallocate the dummy node
58 * upon dequeue/requeue and use call_rcu to free the old one after a
59 * grace period.
60 */
61
62static inline
63struct cds_lfq_node_rcu *make_dummy(struct cds_lfq_queue_rcu *q,
64 struct cds_lfq_node_rcu *next)
65{
66 struct cds_lfq_node_rcu_dummy *dummy;
67
68 dummy = malloc(sizeof(struct cds_lfq_node_rcu_dummy));
69 assert(dummy);
70 dummy->parent.next = next;
71 dummy->q = q;
72 return &dummy->parent;
73}
74
75static inline
76void free_dummy_cb(struct rcu_head *head)
77{
78 struct cds_lfq_node_rcu_dummy *dummy =
79 caa_container_of(head, struct cds_lfq_node_rcu_dummy, head);
80 free(dummy);
81}
82
83static inline
84void rcu_free_dummy(struct cds_lfq_node_rcu *node)
85{
86 struct cds_lfq_node_rcu_dummy *dummy;
87
88 dummy = caa_container_of(node, struct cds_lfq_node_rcu_dummy, parent);
89 dummy->q->queue_call_rcu(&dummy->head, free_dummy_cb);
90}
91
92static inline
93void free_dummy(struct cds_lfq_node_rcu *node)
94{
95 struct cds_lfq_node_rcu_dummy *dummy;
96
97 dummy = caa_container_of(node, struct cds_lfq_node_rcu_dummy, parent);
98 free(dummy);
99}
100
101static inline
102void _cds_lfq_node_init_rcu(struct cds_lfq_node_rcu *node)
103{
104 node->next = NULL;
105}
106
107static inline
108void _cds_lfq_init_rcu(struct cds_lfq_queue_rcu *q,
109 void queue_call_rcu(struct rcu_head *head,
110 void (*func)(struct rcu_head *head)))
111{
112 q->tail = make_dummy(q, NULL);
113 q->dummy = q->tail;
114 q->head = q->tail;
115 q->queue_call_rcu = queue_call_rcu;
116}
117
118/*
119 * The queue should be emptied before calling destroy.
120 *
121 * Return 0 on success, -EPERM if queue is not empty.
122 */
123static inline
124int _cds_lfq_destroy_rcu(struct cds_lfq_queue_rcu *q)
125{
126 struct cds_lfq_node_rcu *head;
127
128 head = rcu_dereference(q->head);
129 if (!(head == q->dummy && head->next == NULL))
130 return -EPERM; /* not empty */
131 free_dummy(head);
132 return 0;
133}
134
135/*
136 * Should be called under rcu read lock critical section.
137 */
138static inline
139void _cds_lfq_enqueue_rcu(struct cds_lfq_queue_rcu *q,
140 struct cds_lfq_node_rcu *node)
141{
142 /*
143 * uatomic_cmpxchg() implicit memory barrier orders earlier stores to
144 * node before publication.
145 */
146
147 for (;;) {
148 struct cds_lfq_node_rcu *tail, *next;
149
150 tail = rcu_dereference(q->tail);
151 next = uatomic_cmpxchg(&tail->next, NULL, node);
152 if (next == NULL) {
153 /*
154 * Tail was at the end of queue, we successfully
155 * appended to it. Now move tail (another
156 * enqueue might beat us to it, that's fine).
157 */
158 (void) uatomic_cmpxchg(&q->tail, tail, node);
159 return;
160 } else {
161 /*
162 * Failure to append to current tail.
163 * Help moving tail further and retry.
164 */
165 (void) uatomic_cmpxchg(&q->tail, tail, next);
166 continue;
167 }
168 }
169}
170
171static inline
172void reenqueue_dummy(struct cds_lfq_queue_rcu *q)
173{
174 struct cds_lfq_node_rcu *node;
175
176 /* We need to reallocate to protect from ABA. */
177 node = make_dummy(q, NULL);
178 if (uatomic_cmpxchg(&q->dummy, NULL, node) != NULL) {
179 /* other dequeue populated its new dummy */
180 free_dummy(node);
181 return;
182 }
183 _cds_lfq_enqueue_rcu(q, node);
184}
185
186/*
187 * Should be called under rcu read lock critical section.
188 *
189 * The caller must wait for a grace period to pass before freeing the returned
190 * node or modifying the cds_lfq_node_rcu structure.
191 * Returns NULL if queue is empty.
192 */
193static inline
194struct cds_lfq_node_rcu *_cds_lfq_dequeue_rcu(struct cds_lfq_queue_rcu *q)
195{
196 for (;;) {
197 struct cds_lfq_node_rcu *head, *next, *dummy;
198
199 head = rcu_dereference(q->head);
200 next = rcu_dereference(head->next);
201 dummy = rcu_dereference(q->dummy);
202 if (head == dummy && next == NULL)
203 return NULL; /* empty */
204 /*
205 * We never, ever allow dequeue to get to a state where
206 * the queue is empty (we need at least one node in the
207 * queue). This is ensured by checking if the head next
208 * is NULL. This means a concurrent dummy node
209 * re-enqueue is in progress. We help it reach
210 * completion, and retry.
211 */
212 if (!next) {
213 /*
214 * Dummy node re-enqueue is in progress. Try to
215 * help.
216 */
217 reenqueue_dummy(q);
218 continue; /* try again */
219 }
220 if (uatomic_cmpxchg(&q->head, head, next) != head)
221 continue; /* Concurrently pushed. */
222 if (head == dummy) {
223 /* Free old and requeue new dummy. */
224 rcu_set_pointer(&q->dummy, NULL);
225 rcu_free_dummy(dummy);
226 reenqueue_dummy(q);
227 continue; /* try again */
228 }
229 return head;
230 }
231}
232
233#ifdef __cplusplus
234}
235#endif
236
237#endif /* _URCU_RCULFQUEUE_STATIC_H */
This page took 0.022706 seconds and 4 git commands to generate.