Fix: handle sys_futex() FUTEX_WAIT interrupted by signal
[urcu.git] / urcu / static / rculfqueue.h
1 #ifndef _URCU_RCULFQUEUE_STATIC_H
2 #define _URCU_RCULFQUEUE_STATIC_H
3
4 /*
5 * rculfqueue-static.h
6 *
7 * Userspace RCU library - Lock-Free RCU Queue
8 *
9 * Copyright 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
10 *
11 * TO BE INCLUDED ONLY IN LGPL-COMPATIBLE CODE. See rculfqueue.h for linking
12 * dynamically with the userspace rcu library.
13 *
14 * This library is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU Lesser General Public
16 * License as published by the Free Software Foundation; either
17 * version 2.1 of the License, or (at your option) any later version.
18 *
19 * This library is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 * Lesser General Public License for more details.
23 *
24 * You should have received a copy of the GNU Lesser General Public
25 * License along with this library; if not, write to the Free Software
26 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 */
28
29 #include <urcu-call-rcu.h>
30 #include <urcu/uatomic.h>
31 #include <urcu-pointer.h>
32 #include <assert.h>
33 #include <errno.h>
34
35 #ifdef __cplusplus
36 extern "C" {
37 #endif
38
39 struct cds_lfq_node_rcu_dummy {
40 struct cds_lfq_node_rcu parent;
41 struct rcu_head head;
42 struct cds_lfq_queue_rcu *q;
43 };
44
45 /*
46 * Lock-free RCU queue. Enqueue and dequeue operations hold a RCU read
47 * lock to deal with cmpxchg ABA problem. This queue is *not* circular:
48 * head points to the oldest node, tail points to the newest node.
49 * A dummy node is kept to ensure enqueue and dequeue can always proceed
50 * concurrently. Keeping a separate head and tail helps with large
51 * queues: enqueue and dequeue can proceed concurrently without
52 * wrestling for exclusive access to the same variables.
53 *
54 * Dequeue retry if it detects that it would be dequeueing the last node
55 * (it means a dummy node dequeue-requeue is in progress). This ensures
56 * that there is always at least one node in the queue.
57 *
58 * In the dequeue operation, we internally reallocate the dummy node
59 * upon dequeue/requeue and use call_rcu to free the old one after a
60 * grace period.
61 */
62
63 static inline
64 struct cds_lfq_node_rcu *make_dummy(struct cds_lfq_queue_rcu *q,
65 struct cds_lfq_node_rcu *next)
66 {
67 struct cds_lfq_node_rcu_dummy *dummy;
68
69 dummy = malloc(sizeof(struct cds_lfq_node_rcu_dummy));
70 assert(dummy);
71 dummy->parent.next = next;
72 dummy->parent.dummy = 1;
73 dummy->q = q;
74 return &dummy->parent;
75 }
76
77 static inline
78 void free_dummy_cb(struct rcu_head *head)
79 {
80 struct cds_lfq_node_rcu_dummy *dummy =
81 caa_container_of(head, struct cds_lfq_node_rcu_dummy, head);
82 free(dummy);
83 }
84
85 static inline
86 void rcu_free_dummy(struct cds_lfq_node_rcu *node)
87 {
88 struct cds_lfq_node_rcu_dummy *dummy;
89
90 assert(node->dummy);
91 dummy = caa_container_of(node, struct cds_lfq_node_rcu_dummy, parent);
92 dummy->q->queue_call_rcu(&dummy->head, free_dummy_cb);
93 }
94
95 static inline
96 void free_dummy(struct cds_lfq_node_rcu *node)
97 {
98 struct cds_lfq_node_rcu_dummy *dummy;
99
100 assert(node->dummy);
101 dummy = caa_container_of(node, struct cds_lfq_node_rcu_dummy, parent);
102 free(dummy);
103 }
104
105 static inline
106 void _cds_lfq_node_init_rcu(struct cds_lfq_node_rcu *node)
107 {
108 node->next = NULL;
109 node->dummy = 0;
110 }
111
112 static inline
113 void _cds_lfq_init_rcu(struct cds_lfq_queue_rcu *q,
114 void queue_call_rcu(struct rcu_head *head,
115 void (*func)(struct rcu_head *head)))
116 {
117 q->tail = make_dummy(q, NULL);
118 q->head = q->tail;
119 q->queue_call_rcu = queue_call_rcu;
120 }
121
122 /*
123 * The queue should be emptied before calling destroy.
124 *
125 * Return 0 on success, -EPERM if queue is not empty.
126 */
127 static inline
128 int _cds_lfq_destroy_rcu(struct cds_lfq_queue_rcu *q)
129 {
130 struct cds_lfq_node_rcu *head;
131
132 head = rcu_dereference(q->head);
133 if (!(head->dummy && head->next == NULL))
134 return -EPERM; /* not empty */
135 free_dummy(head);
136 return 0;
137 }
138
139 /*
140 * Should be called under rcu read lock critical section.
141 */
142 static inline
143 void _cds_lfq_enqueue_rcu(struct cds_lfq_queue_rcu *q,
144 struct cds_lfq_node_rcu *node)
145 {
146 /*
147 * uatomic_cmpxchg() implicit memory barrier orders earlier stores to
148 * node before publication.
149 */
150
151 for (;;) {
152 struct cds_lfq_node_rcu *tail, *next;
153
154 tail = rcu_dereference(q->tail);
155 next = uatomic_cmpxchg(&tail->next, NULL, node);
156 if (next == NULL) {
157 /*
158 * Tail was at the end of queue, we successfully
159 * appended to it. Now move tail (another
160 * enqueue might beat us to it, that's fine).
161 */
162 (void) uatomic_cmpxchg(&q->tail, tail, node);
163 return;
164 } else {
165 /*
166 * Failure to append to current tail.
167 * Help moving tail further and retry.
168 */
169 (void) uatomic_cmpxchg(&q->tail, tail, next);
170 continue;
171 }
172 }
173 }
174
175 static inline
176 void enqueue_dummy(struct cds_lfq_queue_rcu *q)
177 {
178 struct cds_lfq_node_rcu *node;
179
180 /* We need to reallocate to protect from ABA. */
181 node = make_dummy(q, NULL);
182 _cds_lfq_enqueue_rcu(q, node);
183 }
184
185 /*
186 * Should be called under rcu read lock critical section.
187 *
188 * The caller must wait for a grace period to pass before freeing the returned
189 * node or modifying the cds_lfq_node_rcu structure.
190 * Returns NULL if queue is empty.
191 */
192 static inline
193 struct cds_lfq_node_rcu *_cds_lfq_dequeue_rcu(struct cds_lfq_queue_rcu *q)
194 {
195 for (;;) {
196 struct cds_lfq_node_rcu *head, *next;
197
198 head = rcu_dereference(q->head);
199 next = rcu_dereference(head->next);
200 if (head->dummy && next == NULL)
201 return NULL; /* empty */
202 /*
203 * We never, ever allow dequeue to get to a state where
204 * the queue is empty (we need at least one node in the
205 * queue). This is ensured by checking if the head next
206 * is NULL, which means we need to enqueue a dummy node
207 * before we can hope dequeuing anything.
208 */
209 if (!next) {
210 enqueue_dummy(q);
211 next = rcu_dereference(head->next);
212 }
213 if (uatomic_cmpxchg(&q->head, head, next) != head)
214 continue; /* Concurrently pushed. */
215 if (head->dummy) {
216 /* Free dummy after grace period. */
217 rcu_free_dummy(head);
218 continue; /* try again */
219 }
220 return head;
221 }
222 }
223
224 #ifdef __cplusplus
225 }
226 #endif
227
228 #endif /* _URCU_RCULFQUEUE_STATIC_H */
This page took 0.033167 seconds and 4 git commands to generate.