RCU lock-free queue comment updates
[urcu.git] / urcu / rculfqueue.h
1 #ifndef _URCU_RCULFQUEUE_H
2 #define _URCU_RCULFQUEUE_H
3
4 /*
5 * rculfqueue.h
6 *
7 * Userspace RCU library - Lock-Free RCU Queue
8 *
9 * Copyright 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
10 *
11 * This library is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; either
14 * version 2.1 of the License, or (at your option) any later version.
15 *
16 * This library is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
20 *
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with this library; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24 */
25
26 #include <urcu/urcu_ref.h>
27 #include <assert.h>
28
29 #if (!defined(_GNU_SOURCE) && !defined(_LGPL_SOURCE))
30 #error "Dynamic loader LGPL wrappers not implemented yet"
31 #endif
32
33 /*
34 * Lock-free RCU queue using reference counting. Enqueue and dequeue operations
35 * hold a RCU read lock to deal with cmpxchg ABA problem. This implementation
36 * keeps a dummy head node to ensure we can always update the queue locklessly.
37 * Given that this is a queue, the dummy head node must always advance as we
38 * dequeue entries. Therefore, we keep a reference count on each entry we are
39 * dequeueing, so they can be kept as dummy head node until the next dequeue, at
40 * which point their reference count will be decremented.
41 */
42
43 #define URCU_LFQ_PERMANENT_REF 128
44
45 struct rcu_lfq_node {
46 struct rcu_lfq_node *next;
47 struct urcu_ref ref;
48 };
49
50 struct rcu_lfq_queue {
51 struct rcu_lfq_node *head, *tail;
52 struct rcu_lfq_node init; /* Dummy initialization node */
53 };
54
55 void rcu_lfq_node_init(struct rcu_lfq_node *node)
56 {
57 node->next = NULL;
58 urcu_ref_init(&node->ref);
59 }
60
61 void rcu_lfq_init(struct rcu_lfq_queue *q)
62 {
63 rcu_lfq_node_init(&q->init);
64 /* Make sure the initial node is never freed. */
65 urcu_ref_set(&q->init.ref, URCU_LFQ_PERMANENT_REF);
66 q->head = q->tail = &q->init;
67 }
68
69 void rcu_lfq_enqueue(struct rcu_lfq_queue *q, struct rcu_lfq_node *node)
70 {
71 urcu_ref_get(&node->ref);
72
73 /*
74 * uatomic_cmpxchg() implicit memory barrier orders earlier stores to
75 * node before publication.
76 */
77
78 for (;;) {
79 struct rcu_lfq_node *tail, *next;
80
81 rcu_read_lock();
82 tail = rcu_dereference(q->tail);
83 /*
84 * Typically expect tail->next to be NULL.
85 */
86 next = uatomic_cmpxchg(&tail->next, NULL, node);
87 if (next == NULL) {
88 /*
89 * Tail was at the end of queue, we successfully
90 * appended to it.
91 * Now move tail (another enqueue might beat
92 * us to it, that's fine).
93 */
94 uatomic_cmpxchg(&q->tail, tail, node);
95 rcu_read_unlock();
96 return;
97 } else {
98 /*
99 * Failure to append to current tail. Help moving tail
100 * further and retry.
101 */
102 uatomic_cmpxchg(&q->tail, tail, next);
103 rcu_read_unlock();
104 continue;
105 }
106 }
107 }
108
109 /*
110 * The entry returned by dequeue must be taken care of by doing a urcu_ref_put,
111 * which calls the release primitive when the reference count drops to zero. A
112 * grace period must be waited before performing the actual memory reclamation
113 * in the release primitive.
114 * The entry lfq node returned by dequeue must not be modified/re-used/freed
115 * until the reference count reaches zero and a grace period has elapsed (after
116 * the refcount reached 0).
117 */
118 struct rcu_lfq_node *
119 rcu_lfq_dequeue(struct rcu_lfq_queue *q, void (*release)(struct urcu_ref *))
120 {
121 for (;;) {
122 struct rcu_lfq_node *head, *next;
123
124 rcu_read_lock();
125 head = rcu_dereference(q->head);
126 next = rcu_dereference(head->next);
127 if (next) {
128 if (uatomic_cmpxchg(&q->head, head, next) == head) {
129 rcu_read_unlock();
130 urcu_ref_put(&head->ref, release);
131 return next;
132 } else {
133 /* Concurrently pushed, retry */
134 rcu_read_unlock();
135 continue;
136 }
137 } else {
138 /* Empty */
139 rcu_read_unlock();
140 return NULL;
141 }
142 }
143 }
144
145 #endif /* _URCU_RCULFQUEUE_H */
This page took 0.031253 seconds and 4 git commands to generate.