1 #ifndef _URCU_RCULFQUEUE_STATIC_H
2 #define _URCU_RCULFQUEUE_STATIC_H
7 * Userspace RCU library - Lock-Free RCU Queue
9 * Copyright 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
11 * TO BE INCLUDED ONLY IN LGPL-COMPATIBLE CODE. See rculfqueue.h for linking
12 * dynamically with the userspace rcu library.
14 * This library is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU Lesser General Public
16 * License as published by the Free Software Foundation; either
17 * version 2.1 of the License, or (at your option) any later version.
19 * This library is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 * Lesser General Public License for more details.
24 * You should have received a copy of the GNU Lesser General Public
25 * License along with this library; if not, write to the Free Software
26 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
29 #include <urcu-call-rcu.h>
30 #include <urcu/assert.h>
31 #include <urcu/uatomic.h>
32 #include <urcu-pointer.h>
39 struct cds_lfq_node_rcu_dummy
{
40 struct cds_lfq_node_rcu parent
;
42 struct cds_lfq_queue_rcu
*q
;
46 * Lock-free RCU queue. Enqueue and dequeue operations hold a RCU read
47 * lock to deal with cmpxchg ABA problem. This queue is *not* circular:
48 * head points to the oldest node, tail points to the newest node.
49 * A dummy node is kept to ensure enqueue and dequeue can always proceed
50 * concurrently. Keeping a separate head and tail helps with large
51 * queues: enqueue and dequeue can proceed concurrently without
52 * wrestling for exclusive access to the same variables.
54 * Dequeue retry if it detects that it would be dequeueing the last node
55 * (it means a dummy node dequeue-requeue is in progress). This ensures
56 * that there is always at least one node in the queue.
58 * In the dequeue operation, we internally reallocate the dummy node
59 * upon dequeue/requeue and use call_rcu to free the old one after a
64 struct cds_lfq_node_rcu
*make_dummy(struct cds_lfq_queue_rcu
*q
,
65 struct cds_lfq_node_rcu
*next
)
67 struct cds_lfq_node_rcu_dummy
*dummy
;
69 dummy
= (struct cds_lfq_node_rcu_dummy
*)
70 malloc(sizeof(struct cds_lfq_node_rcu_dummy
));
71 urcu_posix_assert(dummy
);
72 dummy
->parent
.next
= next
;
73 dummy
->parent
.dummy
= 1;
75 return &dummy
->parent
;
79 void free_dummy_cb(struct rcu_head
*head
)
81 struct cds_lfq_node_rcu_dummy
*dummy
=
82 caa_container_of(head
, struct cds_lfq_node_rcu_dummy
, head
);
87 void rcu_free_dummy(struct cds_lfq_node_rcu
*node
)
89 struct cds_lfq_node_rcu_dummy
*dummy
;
91 urcu_posix_assert(node
->dummy
);
92 dummy
= caa_container_of(node
, struct cds_lfq_node_rcu_dummy
, parent
);
93 dummy
->q
->queue_call_rcu(&dummy
->head
, free_dummy_cb
);
97 void free_dummy(struct cds_lfq_node_rcu
*node
)
99 struct cds_lfq_node_rcu_dummy
*dummy
;
101 urcu_posix_assert(node
->dummy
);
102 dummy
= caa_container_of(node
, struct cds_lfq_node_rcu_dummy
, parent
);
107 void _cds_lfq_node_init_rcu(struct cds_lfq_node_rcu
*node
)
114 void _cds_lfq_init_rcu(struct cds_lfq_queue_rcu
*q
,
115 void queue_call_rcu(struct rcu_head
*head
,
116 void (*func
)(struct rcu_head
*head
)))
118 q
->tail
= make_dummy(q
, NULL
);
120 q
->queue_call_rcu
= queue_call_rcu
;
124 * The queue should be emptied before calling destroy.
126 * Return 0 on success, -EPERM if queue is not empty.
129 int _cds_lfq_destroy_rcu(struct cds_lfq_queue_rcu
*q
)
131 struct cds_lfq_node_rcu
*head
;
133 head
= rcu_dereference(q
->head
);
134 if (!(head
->dummy
&& head
->next
== NULL
))
135 return -EPERM
; /* not empty */
141 * Should be called under rcu read lock critical section.
144 void _cds_lfq_enqueue_rcu(struct cds_lfq_queue_rcu
*q
,
145 struct cds_lfq_node_rcu
*node
)
148 * uatomic_cmpxchg() implicit memory barrier orders earlier stores to
149 * node before publication.
153 struct cds_lfq_node_rcu
*tail
, *next
;
155 tail
= rcu_dereference(q
->tail
);
156 next
= uatomic_cmpxchg(&tail
->next
, NULL
, node
);
159 * Tail was at the end of queue, we successfully
160 * appended to it. Now move tail (another
161 * enqueue might beat us to it, that's fine).
163 (void) uatomic_cmpxchg(&q
->tail
, tail
, node
);
167 * Failure to append to current tail.
168 * Help moving tail further and retry.
170 (void) uatomic_cmpxchg(&q
->tail
, tail
, next
);
177 void enqueue_dummy(struct cds_lfq_queue_rcu
*q
)
179 struct cds_lfq_node_rcu
*node
;
181 /* We need to reallocate to protect from ABA. */
182 node
= make_dummy(q
, NULL
);
183 _cds_lfq_enqueue_rcu(q
, node
);
187 * Should be called under rcu read lock critical section.
189 * The caller must wait for a grace period to pass before freeing the returned
190 * node or modifying the cds_lfq_node_rcu structure.
191 * Returns NULL if queue is empty.
194 struct cds_lfq_node_rcu
*_cds_lfq_dequeue_rcu(struct cds_lfq_queue_rcu
*q
)
197 struct cds_lfq_node_rcu
*head
, *next
;
199 head
= rcu_dereference(q
->head
);
200 next
= rcu_dereference(head
->next
);
201 if (head
->dummy
&& next
== NULL
)
202 return NULL
; /* empty */
204 * We never, ever allow dequeue to get to a state where
205 * the queue is empty (we need at least one node in the
206 * queue). This is ensured by checking if the head next
207 * is NULL, which means we need to enqueue a dummy node
208 * before we can hope dequeuing anything.
212 next
= rcu_dereference(head
->next
);
214 if (uatomic_cmpxchg(&q
->head
, head
, next
) != head
)
215 continue; /* Concurrently pushed. */
217 /* Free dummy after grace period. */
218 rcu_free_dummy(head
);
219 continue; /* try again */
229 #endif /* _URCU_RCULFQUEUE_STATIC_H */
This page took 0.050557 seconds and 4 git commands to generate.