add missing comment to wfstack.c
[urcu.git] / urcu / rculfqueue.h
... / ...
CommitLineData
1#ifndef _URCU_RCULFQUEUE_H
2#define _URCU_RCULFQUEUE_H
3
4/*
5 * rculfqueue.h
6 *
7 * Userspace RCU library - Lock-Free RCU Queue
8 *
9 * Copyright 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
10 *
11 * This library is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; either
14 * version 2.1 of the License, or (at your option) any later version.
15 *
16 * This library is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
20 *
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with this library; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24 */
25
26#include <urcu/urcu_ref.h>
27#include <assert.h>
28
29#ifdef __cplusplus
30extern "C" {
31#endif
32
33#if (!defined(_GNU_SOURCE) && !defined(_LGPL_SOURCE))
34#error "Dynamic loader LGPL wrappers not implemented yet"
35#endif
36
37/*
38 * Lock-free RCU queue using reference counting. Enqueue and dequeue operations
39 * hold a RCU read lock to deal with cmpxchg ABA problem. This implementation
40 * keeps a dummy head node to ensure we can always update the queue locklessly.
41 * Given that this is a queue, the dummy head node must always advance as we
42 * dequeue entries. Therefore, we keep a reference count on each entry we are
43 * dequeueing, so they can be kept as dummy head node until the next dequeue, at
44 * which point their reference count will be decremented.
45 */
46
47#define URCU_LFQ_PERMANENT_REF 128
48
49struct rcu_lfq_node {
50 struct rcu_lfq_node *next;
51 struct urcu_ref ref;
52};
53
54struct rcu_lfq_queue {
55 struct rcu_lfq_node *head, *tail;
56 struct rcu_lfq_node init; /* Dummy initialization node */
57};
58
59void rcu_lfq_node_init(struct rcu_lfq_node *node)
60{
61 node->next = NULL;
62 urcu_ref_init(&node->ref);
63}
64
65void rcu_lfq_init(struct rcu_lfq_queue *q)
66{
67 rcu_lfq_node_init(&q->init);
68 /* Make sure the initial node is never freed. */
69 urcu_ref_set(&q->init.ref, URCU_LFQ_PERMANENT_REF);
70 q->head = q->tail = &q->init;
71}
72
73void rcu_lfq_enqueue(struct rcu_lfq_queue *q, struct rcu_lfq_node *node)
74{
75 urcu_ref_get(&node->ref);
76
77 /*
78 * uatomic_cmpxchg() implicit memory barrier orders earlier stores to
79 * node before publication.
80 */
81
82 for (;;) {
83 struct rcu_lfq_node *tail, *next;
84
85 rcu_read_lock();
86 tail = rcu_dereference(q->tail);
87 /*
88 * Typically expect tail->next to be NULL.
89 */
90 next = uatomic_cmpxchg(&tail->next, NULL, node);
91 if (next == NULL) {
92 /*
93 * Tail was at the end of queue, we successfully
94 * appended to it.
95 * Now move tail (another enqueue might beat
96 * us to it, that's fine).
97 */
98 uatomic_cmpxchg(&q->tail, tail, node);
99 rcu_read_unlock();
100 return;
101 } else {
102 /*
103 * Failure to append to current tail. Help moving tail
104 * further and retry.
105 */
106 uatomic_cmpxchg(&q->tail, tail, next);
107 rcu_read_unlock();
108 continue;
109 }
110 }
111}
112
113/*
114 * The entry returned by dequeue must be taken care of by doing a urcu_ref_put,
115 * which calls the release primitive when the reference count drops to zero. A
116 * grace period must be waited after execution of the release callback before
117 * performing the actual memory reclamation or modifying the rcu_lfq_node
118 * structure.
119 * In other words, the entry lfq node returned by dequeue must not be
120 * modified/re-used/freed until the reference count reaches zero and a grace
121 * period has elapsed (after the refcount reached 0).
122 */
123struct rcu_lfq_node *
124rcu_lfq_dequeue(struct rcu_lfq_queue *q, void (*release)(struct urcu_ref *))
125{
126 for (;;) {
127 struct rcu_lfq_node *head, *next;
128
129 rcu_read_lock();
130 head = rcu_dereference(q->head);
131 next = rcu_dereference(head->next);
132 if (next) {
133 if (uatomic_cmpxchg(&q->head, head, next) == head) {
134 rcu_read_unlock();
135 urcu_ref_put(&head->ref, release);
136 return next;
137 } else {
138 /* Concurrently pushed, retry */
139 rcu_read_unlock();
140 continue;
141 }
142 } else {
143 /* Empty */
144 rcu_read_unlock();
145 return NULL;
146 }
147 }
148}
149
150#ifdef __cplusplus
151}
152#endif
153
154#endif /* _URCU_RCULFQUEUE_H */
This page took 0.024616 seconds and 4 git commands to generate.