rculfqueue: Keep a reference to the current dummy node rather than using low bit
[urcu.git] / urcu / static / rculfqueue.h
1 #ifndef _URCU_RCULFQUEUE_STATIC_H
2 #define _URCU_RCULFQUEUE_STATIC_H
3
4 /*
5 * rculfqueue-static.h
6 *
7 * Userspace RCU library - Lock-Free RCU Queue
8 *
9 * Copyright 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
10 *
11 * TO BE INCLUDED ONLY IN LGPL-COMPATIBLE CODE. See rculfqueue.h for linking
12 * dynamically with the userspace rcu library.
13 *
14 * This library is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU Lesser General Public
16 * License as published by the Free Software Foundation; either
17 * version 2.1 of the License, or (at your option) any later version.
18 *
19 * This library is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 * Lesser General Public License for more details.
23 *
24 * You should have received a copy of the GNU Lesser General Public
25 * License along with this library; if not, write to the Free Software
26 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 */
28
29 #include <urcu-call-rcu.h>
30 #include <urcu/uatomic.h>
31 #include <assert.h>
32 #include <errno.h>
33
34 #ifdef __cplusplus
35 extern "C" {
36 #endif
37
38 struct cds_lfq_node_rcu_dummy {
39 struct cds_lfq_node_rcu parent;
40 struct rcu_head head;
41 struct cds_lfq_queue_rcu *q;
42 };
43
44 /*
45 * Lock-free RCU queue. Enqueue and dequeue operations hold a RCU read
46 * lock to deal with cmpxchg ABA problem. This queue is *not* circular:
47 * head points to the oldest node, tail points to the newest node.
48 * A dummy node is kept to ensure enqueue and dequeue can always proceed
49 * concurrently. Keeping a separate head and tail helps with large
50 * queues: enqueue and dequeue can proceed concurrently without
51 * wrestling for exclusive access to the same variables.
52 *
53 * Dequeue retry if it detects that it would be dequeueing the last node
54 * (it means a dummy node dequeue-requeue is in progress). This ensures
55 * that there is always at least one node in the queue.
56 *
57 * In the dequeue operation, we internally reallocate the dummy node
58 * upon dequeue/requeue and use call_rcu to free the old one after a
59 * grace period.
60 */
61
62 static inline
63 int is_dummy(struct cds_lfq_queue_rcu *q, struct cds_lfq_node_rcu *node)
64 {
65 return node == q->dummy;
66 }
67
68 static inline
69 struct cds_lfq_node_rcu *make_dummy(struct cds_lfq_queue_rcu *q,
70 struct cds_lfq_node_rcu *next)
71 {
72 struct cds_lfq_node_rcu_dummy *dummy;
73
74 dummy = malloc(sizeof(struct cds_lfq_node_rcu_dummy));
75 assert(dummy);
76 dummy->parent.next = next;
77 dummy->q = q;
78 return &dummy->parent;
79 }
80
81 static inline
82 void free_dummy(struct rcu_head *head)
83 {
84 struct cds_lfq_node_rcu_dummy *dummy =
85 caa_container_of(head, struct cds_lfq_node_rcu_dummy, head);
86 free(dummy);
87 }
88
89 static inline
90 void rcu_free_dummy(struct cds_lfq_node_rcu *node)
91 {
92 struct cds_lfq_node_rcu_dummy *dummy;
93
94 dummy = caa_container_of(node, struct cds_lfq_node_rcu_dummy, parent);
95 dummy->q->queue_call_rcu(&dummy->head, free_dummy);
96 }
97
98 static inline
99 void _cds_lfq_node_init_rcu(struct cds_lfq_node_rcu *node)
100 {
101 node->next = NULL;
102 }
103
104 static inline
105 void _cds_lfq_init_rcu(struct cds_lfq_queue_rcu *q,
106 void queue_call_rcu(struct rcu_head *head,
107 void (*func)(struct rcu_head *head)))
108 {
109 q->tail = make_dummy(q, NULL);
110 q->dummy = q->tail;
111 q->head = q->tail;
112 q->queue_call_rcu = queue_call_rcu;
113 }
114
115 /*
116 * The queue should be emptied before calling destroy.
117 *
118 * Return 0 on success, -EPERM if queue is not empty.
119 */
120 static inline
121 int _cds_lfq_destroy_rcu(struct cds_lfq_queue_rcu *q)
122 {
123 struct cds_lfq_node_rcu *head;
124
125 head = rcu_dereference(q->head);
126 if (!(is_dummy(q, head) && head->next == NULL))
127 return -EPERM; /* not empty */
128 rcu_free_dummy(head);
129 return 0;
130 }
131
132 /*
133 * Should be called under rcu read lock critical section.
134 */
135 static inline
136 void _cds_lfq_enqueue_rcu(struct cds_lfq_queue_rcu *q,
137 struct cds_lfq_node_rcu *node)
138 {
139 /*
140 * uatomic_cmpxchg() implicit memory barrier orders earlier stores to
141 * node before publication.
142 */
143
144 for (;;) {
145 struct cds_lfq_node_rcu *tail, *next;
146
147 tail = rcu_dereference(q->tail);
148 next = uatomic_cmpxchg(&tail->next, NULL, node);
149 if (next == NULL) {
150 /*
151 * Tail was at the end of queue, we successfully
152 * appended to it. Now move tail (another
153 * enqueue might beat us to it, that's fine).
154 */
155 (void) uatomic_cmpxchg(&q->tail, tail, node);
156 return;
157 } else {
158 /*
159 * Failure to append to current tail.
160 * Help moving tail further and retry.
161 */
162 (void) uatomic_cmpxchg(&q->tail, tail, next);
163 continue;
164 }
165 }
166 }
167
168 /*
169 * Should be called under rcu read lock critical section.
170 *
171 * The caller must wait for a grace period to pass before freeing the returned
172 * node or modifying the cds_lfq_node_rcu structure.
173 * Returns NULL if queue is empty.
174 */
175 static inline
176 struct cds_lfq_node_rcu *_cds_lfq_dequeue_rcu(struct cds_lfq_queue_rcu *q)
177 {
178 for (;;) {
179 struct cds_lfq_node_rcu *head, *next;
180
181 head = rcu_dereference(q->head);
182 next = rcu_dereference(head->next);
183 if (is_dummy(q, head) && next == NULL)
184 return NULL; /* empty */
185 /*
186 * We never, ever allow dequeue to get to a state where
187 * the queue is empty (we need at least one node in the
188 * queue). This is ensured by checking if the head next
189 * is NULL and retry in that case (this means a
190 * concurrent dummy node re-enqueue is in progress).
191 */
192 if (next) {
193 if (uatomic_cmpxchg(&q->head, head, next) == head) {
194 if (is_dummy(q, head)) {
195 struct cds_lfq_node_rcu *node;
196 /*
197 * Requeue dummy. We need to
198 * reallocate to protect from
199 * ABA.
200 */
201 rcu_free_dummy(head);
202 node = make_dummy(q, NULL);
203 /*
204 * We are the only thread
205 * allowed to update dummy (we
206 * own the old dummy).
207 */
208 q->dummy = node;
209 _cds_lfq_enqueue_rcu(q, node);
210 continue; /* try again */
211 }
212 return head;
213 } else {
214 /* Concurrently pushed, retry */
215 continue;
216 }
217 } else {
218 /* Dummy node re-enqueue is in progress, retry. */
219 continue;
220 }
221 }
222 }
223
224 #ifdef __cplusplus
225 }
226 #endif
227
228 #endif /* _URCU_RCULFQUEUE_STATIC_H */
This page took 0.034282 seconds and 5 git commands to generate.