Document call_rcu() usage
[urcu.git] / urcu-ht.c
CommitLineData
2ed95849
MD
1
2#define _LGPL_SOURCE
3#include <stdlib.h>
4#include <urcu.h>
5#include <arch.h>
6#include <arch_atomic.h>
7#include <assert.h>
8#include <compiler.h>
9#include <urcu-defer.h>
10#include <errno.h>
ab7d5fc6 11#include <urcu-ht.h>
2ed95849
MD
12
13struct rcu_ht_node;
14
15struct rcu_ht_node {
16 struct rcu_ht_node *next;
17 void *key;
18 void *data;
19};
20
21struct rcu_ht {
22 struct rcu_ht_node *tbl[HASH_SIZE];
23 ht_hash_fct hash_fct;
24 void (*free_fct)(void *data); /* fct to free data */
25};
26
2ed95849
MD
27struct rcu_ht *ht_new(ht_hash_fct hash_fct, void (*free_fct)(void *data))
28{
29 struct rcu_ht *ht;
30
31 ht = calloc(1, sizeof(struct rcu_ht));
32 ht->hash_fct = hash_fct;
33 ht->free_fct = free_fct;
ab7d5fc6 34 return ht;
2ed95849
MD
35}
36
37/* delete all elements */
38void ht_delete_all(struct rcu_ht *ht)
39{
40 unsigned long i;
41 struct rcu_ht_node **prev, *node;
42
43 for (i = 0; i < HASH_SIZE; i++) {
44 rcu_read_lock();
45
46 prev = &ht->tbl[i];
47 node = rcu_dereference(*prev);
48 /*
49 * Cut the head, therefore whole bucket will be unused
50 * after GP. (except for concurrent additions)
51 */
52 rcu_assign_pointer(prev, NULL);
53 for (;;) {
54 if (likely(!node)) {
55 break;
56 }
57 prev = &node->next;
1a3d00b3
MD
58 /*
59 * FIXME: calling call_rcu within a read-side C.S. is a
60 * bug, because it may call synchronize_rcu() if the
61 * callback queue is full.
62 */
2ed95849
MD
63 if (node->data)
64 call_rcu(ht->free_fct, node->data);
65 call_rcu(free, node);
66 node = rcu_dereference(*prev);
67 }
68 rcu_read_unlock();
69 }
70}
71
72/*
73 * Should only be called when no more concurrent readers nor writers can
74 * possibly access the table.
75 */
ab7d5fc6 76void ht_destroy(struct rcu_ht *ht)
2ed95849
MD
77{
78 ht_delete_all(ht);
79 free(ht);
80}
81
82void *ht_lookup(struct rcu_ht *ht, void *key)
83{
84 unsigned long hash;
85 struct rcu_ht_node *node;
86 void *ret;
87
88 hash = ht->hash_fct(key);
89
90 rcu_read_lock();
91 node = rcu_dereference(ht->tbl[hash]);
92 for (;;) {
93 if (likely(!node)) {
94 ret = NULL;
95 break;
96 }
97 if (node->key == key) {
98 ret = node->data;
99 break;
100 }
101 node = rcu_dereference(node->next);
102 }
103 rcu_read_unlock();
ab7d5fc6
MD
104
105 return ret;
2ed95849
MD
106}
107
108/*
109 * Will re-try until either:
110 * - The key is already there (-EEXIST)
111 * - We successfully add the key at the head of a table bucket.
112 */
113int ht_add(struct rcu_ht *ht, void *key, void *data)
114{
115 struct rcu_ht_node *node, *old_head, *new_head;
116 unsigned long hash;
117 int ret = 0;
118
119 new_head = calloc(1, sizeof(struct rcu_ht_node));
120 new_head->key = key;
121 new_head->data = data;
122 hash = ht->hash_fct(key);
123
124 /* here comes the fun and tricky part.
125 * Add at the beginning with a cmpxchg.
126 * Hold a read lock between the moment the first element is read
127 * and the nodes traversal (to find duplicates). This ensures
128 * the head pointer has not been reclaimed when cmpxchg is done.
129 * Always adding at the head ensures that we would have to
130 * re-try if a new item has been added concurrently. So we ensure that
131 * we never add duplicates. */
132retry:
133 rcu_read_lock();
134
135 old_head = node = rcu_dereference(ht->tbl[hash]);
136 for (;;) {
137 if (likely(!node)) {
138 break;
139 }
140 if (node->key == key) {
141 ret = -EEXIST;
142 goto end;
143 }
144 node = rcu_dereference(node->next);
145 }
146 if (rcu_cmpxchg_pointer(&ht->tbl[hash], old_head, new_head) != old_head)
147 goto restart;
148end:
149 rcu_read_unlock();
150
151 return ret;
152
153 /* restart loop, release and re-take the read lock to be kind to GP */
154restart:
155 rcu_read_unlock();
156 goto retry;
157}
158
159/*
160 * Restart until we successfully remove the entry, or no entry is left
161 * ((void *)(unsigned long)-ENOENT).
162 */
ab7d5fc6 163void *ht_steal(struct rcu_ht *ht, void *key)
2ed95849
MD
164{
165 struct rcu_ht_node **prev, *node;
166 unsigned long hash;
ab7d5fc6 167 void *data;
2ed95849
MD
168
169 hash = ht->hash_fct(key);
170
171retry:
172 rcu_read_lock();
173
174 prev = &ht->tbl[hash];
175 node = rcu_dereference(*prev);
176 for (;;) {
177 if (likely(!node)) {
ab7d5fc6 178 data = (void *)(unsigned long)-ENOENT;
2ed95849
MD
179 goto end;
180 }
181 if (node->key == key) {
182 break;
183 }
184 prev = &node->next;
185 node = rcu_dereference(*prev);
186 }
187 /* Found it ! pointer to object is in "prev" */
188 if (rcu_cmpxchg_pointer(prev, node, node->next) != node)
189 goto restart;
190end:
191 rcu_read_unlock();
192
ab7d5fc6
MD
193 data = node->data;
194 call_rcu(free, node);
195
196 return data;
2ed95849
MD
197
198 /* restart loop, release and re-take the read lock to be kind to GP */
199restart:
200 rcu_read_unlock();
201 goto retry;
202}
203
204int ht_delete(struct rcu_ht *ht, void *key)
205{
ab7d5fc6 206 void *data;
2ed95849 207
ab7d5fc6
MD
208 data = ht_steal(ht, key);
209 if (data) {
210 if (ht->free_fct && data)
211 call_rcu(ht->free_fct, data);
2ed95849
MD
212 return 0;
213 } else {
214 return -ENOENT;
215 }
216}
ab7d5fc6
MD
217
218unsigned long stupid_hash(void *key)
219{
220 return (unsigned long)key % HASH_SIZE;
221}
This page took 0.029893 seconds and 4 git commands to generate.