RCU lf queue: Add assert for memory allocation
[urcu.git] / urcu / static / rculfqueue.h
CommitLineData
3d02c34d
MD
1#ifndef _URCU_RCULFQUEUE_STATIC_H
2#define _URCU_RCULFQUEUE_STATIC_H
3
4/*
5 * rculfqueue-static.h
6 *
7 * Userspace RCU library - Lock-Free RCU Queue
8 *
9 * Copyright 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
10 *
11 * TO BE INCLUDED ONLY IN LGPL-COMPATIBLE CODE. See rculfqueue.h for linking
12 * dynamically with the userspace rcu library.
13 *
14 * This library is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU Lesser General Public
16 * License as published by the Free Software Foundation; either
17 * version 2.1 of the License, or (at your option) any later version.
18 *
19 * This library is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 * Lesser General Public License for more details.
23 *
24 * You should have received a copy of the GNU Lesser General Public
25 * License along with this library; if not, write to the Free Software
26 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 */
28
e17d9985 29#include <urcu-call-rcu.h>
a2e7bf9c 30#include <urcu/uatomic.h>
3d02c34d 31#include <assert.h>
e17d9985 32#include <errno.h>
3d02c34d
MD
33/* A urcu implementation header should be already included. */
34
35#ifdef __cplusplus
36extern "C" {
37#endif
38
e17d9985
MD
39struct cds_lfq_node_rcu_dummy {
40 struct cds_lfq_node_rcu parent;
41 struct rcu_head head;
42 struct cds_lfq_queue_rcu *q;
43};
44
3d02c34d 45/*
e17d9985
MD
46 * Lock-free RCU queue. Enqueue and dequeue operations hold a RCU read
47 * lock to deal with cmpxchg ABA problem. This queue is *not* circular:
48 * head points to the oldest node, tail points to the newest node.
49 * Dummy nodes are kept to ensure enqueue and dequeue can always proceed
50 * concurrently. Keeping a separate head and tail helps with large
51 * queues: enqueue and dequeue can proceed concurrently without
52 * wrestling for exclusive access to the same variables.
53 *
54 * We keep two dummy nodes in the queue to distinguish between empty queue
55 * state and intermediate state while a dummy node dequeue/requeue is
56 * being performed. Dequeue retry if it detects that it would be
57 * dequeueing the last node (it means a dummy node dequeue-requeue is in
58 * progress). This ensures that there is always at least one node in
59 * the queue. In a situation where the two dummy nodes are being
60 * requeued (they therefore don't appear in the queue at a given
61 * moment), we are certain that there is at least one non-dummy node in
62 * the queue (ensured by the test for NULL next node upon dequeue).
63 *
64 * In the dequeue operation, we internally reallocate the dummy nodes
65 * upon dequeue/requeue and use call_rcu to free them after a grace
66 * period.
3d02c34d
MD
67 */
68
e17d9985
MD
69static inline
70int is_dummy(struct cds_lfq_node_rcu *node)
71{
72 return ((unsigned long) node) & 0x1UL;
73}
74
75static inline
76struct cds_lfq_node_rcu *make_dummy(struct cds_lfq_queue_rcu *q,
77 struct cds_lfq_node_rcu *next)
78{
79 struct cds_lfq_node_rcu_dummy *dummy;
80
81 dummy = malloc(sizeof(struct cds_lfq_node_rcu_dummy));
8f03ed0d 82 assert(dummy);
e17d9985
MD
83 dummy->parent.next = next;
84 dummy->q = q;
85 return (struct cds_lfq_node_rcu *) (((unsigned long) &dummy->parent) | 0x1UL);
86}
87
88static inline
89struct cds_lfq_node_rcu *get_node(struct cds_lfq_node_rcu *node)
90{
91 return (struct cds_lfq_node_rcu *) (((unsigned long )node) & ~0x1UL);
92}
93
94static inline
95void free_dummy(struct rcu_head *head)
96{
97 struct cds_lfq_node_rcu_dummy *dummy =
98 caa_container_of(head, struct cds_lfq_node_rcu_dummy, head);
99 free(dummy);
100}
101
102static inline
103void rcu_free_dummy(struct cds_lfq_node_rcu *node)
104{
105 struct cds_lfq_node_rcu_dummy *dummy;
3d02c34d 106
e17d9985
MD
107 dummy = caa_container_of(get_node(node), struct cds_lfq_node_rcu_dummy,
108 parent);
109 dummy->q->queue_call_rcu(&dummy->head, free_dummy);
110}
111
112static inline
16aa9ee8 113void _cds_lfq_node_init_rcu(struct cds_lfq_node_rcu *node)
3d02c34d
MD
114{
115 node->next = NULL;
3d02c34d
MD
116}
117
e17d9985 118static inline
d9b52143 119void _cds_lfq_init_rcu(struct cds_lfq_queue_rcu *q,
e17d9985
MD
120 void queue_call_rcu(struct rcu_head *head,
121 void (*func)(struct rcu_head *head)))
122{
123 q->tail = make_dummy(q, NULL);
124 q->head = make_dummy(q, q->tail);
125 q->queue_call_rcu = queue_call_rcu;
126}
127
128/*
129 * The queue should be emptied before calling destroy.
130 *
131 * Return 0 on success, -EPERM if queue is not empty.
132 */
133static inline
134int _cds_lfq_destroy_rcu(struct cds_lfq_queue_rcu *q)
3d02c34d 135{
e17d9985
MD
136 struct cds_lfq_node_rcu *head, *next;
137
138 head = rcu_dereference(q->head);
139 next = rcu_dereference(get_node(head)->next);
140 if (!(is_dummy(head) && is_dummy(next) && get_node(next)->next == NULL))
141 return -EPERM; /* not empty */
142 rcu_free_dummy(head);
143 rcu_free_dummy(next);
144 return 0;
3d02c34d
MD
145}
146
d9b52143
MD
147/*
148 * Should be called under rcu read lock critical section.
149 */
e17d9985 150static inline
d9b52143
MD
151void _cds_lfq_enqueue_rcu(struct cds_lfq_queue_rcu *q,
152 struct cds_lfq_node_rcu *node)
3d02c34d 153{
3d02c34d
MD
154 /*
155 * uatomic_cmpxchg() implicit memory barrier orders earlier stores to
156 * node before publication.
157 */
158
159 for (;;) {
16aa9ee8 160 struct cds_lfq_node_rcu *tail, *next;
3d02c34d 161
3d02c34d 162 tail = rcu_dereference(q->tail);
e17d9985 163 next = uatomic_cmpxchg(&get_node(tail)->next, NULL, node);
3d02c34d
MD
164 if (next == NULL) {
165 /*
166 * Tail was at the end of queue, we successfully
e17d9985
MD
167 * appended to it. Now move tail (another
168 * enqueue might beat us to it, that's fine).
3d02c34d 169 */
85b57703 170 (void) uatomic_cmpxchg(&q->tail, tail, node);
3d02c34d
MD
171 return;
172 } else {
173 /*
e17d9985
MD
174 * Failure to append to current tail.
175 * Help moving tail further and retry.
3d02c34d 176 */
85b57703 177 (void) uatomic_cmpxchg(&q->tail, tail, next);
3d02c34d
MD
178 continue;
179 }
180 }
181}
182
183/*
d9b52143
MD
184 * Should be called under rcu read lock critical section.
185 *
e17d9985
MD
186 * The caller must wait for a grace period to pass before freeing the returned
187 * node or modifying the cds_lfq_node_rcu structure.
188 * Returns NULL if queue is empty.
3d02c34d 189 */
e17d9985 190static inline
a34df756 191struct cds_lfq_node_rcu *_cds_lfq_dequeue_rcu(struct cds_lfq_queue_rcu *q)
3d02c34d
MD
192{
193 for (;;) {
16aa9ee8 194 struct cds_lfq_node_rcu *head, *next;
3d02c34d 195
3d02c34d 196 head = rcu_dereference(q->head);
e17d9985
MD
197 next = rcu_dereference(get_node(head)->next);
198 if (is_dummy(head) && is_dummy(next) && get_node(next)->next == NULL)
199 return NULL; /* empty */
200 /*
201 * We never, ever allow dequeue to get to a state where
202 * the queue is empty (we need at least one node in the
203 * queue). This is ensured by checking if the head next
204 * is NULL and retry in that case (this means a
205 * concurrent dummy node re-enqueue is in progress).
206 */
3d02c34d
MD
207 if (next) {
208 if (uatomic_cmpxchg(&q->head, head, next) == head) {
e17d9985
MD
209 if (is_dummy(head)) {
210 struct cds_lfq_node_rcu *node;
211 /*
212 * Requeue dummy. We need to
213 * reallocate to protect from
214 * ABA.
215 */
216 rcu_free_dummy(head);
217 node = make_dummy(q, NULL);
218 _cds_lfq_enqueue_rcu(q, node);
219 continue; /* try again */
220 }
221 return head;
3d02c34d
MD
222 } else {
223 /* Concurrently pushed, retry */
3d02c34d
MD
224 continue;
225 }
226 } else {
e17d9985
MD
227 /* Dummy node re-enqueue is in progress, retry. */
228 continue;
3d02c34d
MD
229 }
230 }
231}
232
233#ifdef __cplusplus
234}
235#endif
236
237#endif /* _URCU_RCULFQUEUE_STATIC_H */
This page took 0.03185 seconds and 4 git commands to generate.