urcu-ht: works.
[urcu.git] / urcu-ht.c
1
2 /*
3 * TODO: keys are currently assumed <= sizeof(void *). Key target never freed.
4 */
5
6 #define _LGPL_SOURCE
7 #include <stdlib.h>
8 #include <urcu.h>
9 #include <arch.h>
10 #include <arch_atomic.h>
11 #include <assert.h>
12 #include <compiler.h>
13 #include <urcu-defer.h>
14 #include <errno.h>
15 #include <urcu-ht.h>
16 #include <urcu/jhash.h>
17 #include <stdio.h>
18
19 struct rcu_ht_node;
20
21 struct rcu_ht_node {
22 struct rcu_ht_node *next;
23 void *key;
24 void *data;
25 };
26
27 struct rcu_ht {
28 struct rcu_ht_node **tbl;
29 ht_hash_fct hash_fct;
30 void (*free_fct)(void *data); /* fct to free data */
31 uint32_t keylen;
32 uint32_t hashseed;
33 struct ht_size {
34 unsigned long add;
35 unsigned long lookup;
36 } size;
37 };
38
39 struct rcu_ht *ht_new(ht_hash_fct hash_fct, void (*free_fct)(void *data),
40 unsigned long init_size, uint32_t keylen,
41 uint32_t hashseed)
42 {
43 struct rcu_ht *ht;
44
45 ht = calloc(1, sizeof(struct rcu_ht));
46 ht->hash_fct = hash_fct;
47 ht->free_fct = free_fct;
48 ht->size.add = init_size;
49 ht->size.lookup = init_size;
50 ht->keylen = keylen;
51 ht->hashseed = hashseed;
52 ht->tbl = calloc(init_size, sizeof(struct rcu_ht_node *));
53 return ht;
54 }
55
56 void *ht_lookup(struct rcu_ht *ht, void *key)
57 {
58 unsigned long hash;
59 struct rcu_ht_node *node;
60 void *ret;
61
62 hash = ht->hash_fct(key, ht->keylen, ht->hashseed) % ht->size.lookup;
63
64 rcu_read_lock();
65 node = rcu_dereference(ht->tbl[hash]);
66 for (;;) {
67 if (likely(!node)) {
68 ret = NULL;
69 break;
70 }
71 if (node->key == key) {
72 ret = node->data;
73 break;
74 }
75 node = rcu_dereference(node->next);
76 }
77 rcu_read_unlock();
78
79 return ret;
80 }
81
82 /*
83 * Will re-try until either:
84 * - The key is already there (-EEXIST)
85 * - We successfully add the key at the head of a table bucket.
86 */
87 int ht_add(struct rcu_ht *ht, void *key, void *data)
88 {
89 struct rcu_ht_node *node, *old_head, *new_head;
90 unsigned long hash;
91 int ret = 0;
92
93 new_head = calloc(1, sizeof(struct rcu_ht_node));
94 new_head->key = key;
95 new_head->data = data;
96 /* here comes the fun and tricky part.
97 * Add at the beginning with a cmpxchg.
98 * Hold a read lock between the moment the first element is read
99 * and the nodes traversal (to find duplicates). This ensures
100 * the head pointer has not been reclaimed when cmpxchg is done.
101 * Always adding at the head ensures that we would have to
102 * re-try if a new item has been added concurrently. So we ensure that
103 * we never add duplicates. */
104 retry:
105 rcu_read_lock();
106
107 hash = ht->hash_fct(key, ht->keylen, ht->hashseed) % ht->size.add;
108
109 old_head = node = rcu_dereference(ht->tbl[hash]);
110 for (;;) {
111 if (likely(!node)) {
112 break;
113 }
114 if (node->key == key) {
115 ret = -EEXIST;
116 goto end;
117 }
118 node = rcu_dereference(node->next);
119 }
120 new_head->next = old_head;
121 if (rcu_cmpxchg_pointer(&ht->tbl[hash], old_head, new_head) != old_head)
122 goto restart;
123 end:
124 rcu_read_unlock();
125
126 return ret;
127
128 /* restart loop, release and re-take the read lock to be kind to GP */
129 restart:
130 rcu_read_unlock();
131 goto retry;
132 }
133
134 /*
135 * Restart until we successfully remove the entry, or no entry is left
136 * ((void *)(unsigned long)-ENOENT).
137 */
138 void *ht_steal(struct rcu_ht *ht, void *key)
139 {
140 struct rcu_ht_node **prev, *node;
141 unsigned long hash;
142 void *data;
143
144 retry:
145 rcu_read_lock();
146
147 hash = ht->hash_fct(key, ht->keylen, ht->hashseed) % ht->size.lookup;
148
149 prev = &ht->tbl[hash];
150 node = rcu_dereference(*prev);
151 for (;;) {
152 if (likely(!node)) {
153 data = (void *)(unsigned long)-ENOENT;
154 goto error;
155 }
156 if (node->key == key) {
157 break;
158 }
159 prev = &node->next;
160 node = rcu_dereference(*prev);
161 }
162 /* Found it ! pointer to object is in "prev" */
163 if (rcu_cmpxchg_pointer(prev, node, node->next) != node)
164 goto restart;
165
166 /* From that point, we own node. We can free it outside of read lock */
167 rcu_read_unlock();
168
169 data = node->data;
170 call_rcu(free, node);
171 return data;
172
173 error:
174 rcu_read_unlock();
175 return data;
176
177 /* restart loop, release and re-take the read lock to be kind to GP */
178 restart:
179 rcu_read_unlock();
180 goto retry;
181 }
182
183 int ht_delete(struct rcu_ht *ht, void *key)
184 {
185 void *data;
186
187 data = ht_steal(ht, key);
188 if (data && data != (void *)(unsigned long)-ENOENT) {
189 if (ht->free_fct)
190 call_rcu(ht->free_fct, data);
191 return 0;
192 } else {
193 return -ENOENT;
194 }
195 }
196
197 /* Delete all old elements. Allow concurrent writer accesses. */
198 int ht_delete_all(struct rcu_ht *ht)
199 {
200 unsigned long i;
201 struct rcu_ht_node **prev, *node, *inext;
202 int cnt = 0;
203
204 for (i = 0; i < ht->size.lookup; i++) {
205 rcu_read_lock();
206 prev = &ht->tbl[i];
207 /*
208 * Cut the head. After that, we own the first element.
209 */
210 node = rcu_xchg_pointer(prev, NULL);
211 if (!node) {
212 rcu_read_unlock();
213 continue;
214 }
215 /*
216 * We manage a list shared with concurrent writers and readers.
217 * Note that a concurrent add may or may not be deleted by us,
218 * depending if it arrives before or after the head is cut.
219 * "node" points to our first node. Remove first elements
220 * iteratively.
221 */
222 for (;;) {
223 inext = NULL;
224 prev = &node->next;
225 if (prev)
226 inext = rcu_xchg_pointer(prev, NULL);
227 /*
228 * "node" is the first element of the list we have cut.
229 * We therefore own it, no concurrent writer may delete
230 * it. There can only be concurrent lookups. Concurrent
231 * add can only be done on a bucket head, but we've cut
232 * it already. inext is also owned by us, because we
233 * have exchanged it for "NULL". It will therefore be
234 * safe to use it after a G.P.
235 */
236 rcu_read_unlock();
237 if (node->data)
238 call_rcu(ht->free_fct, node->data);
239 call_rcu(free, node);
240 cnt++;
241 if (likely(!inext))
242 break;
243 rcu_read_lock();
244 node = inext;
245 }
246 }
247 return cnt;
248 }
249
250 /*
251 * Should only be called when no more concurrent readers nor writers can
252 * possibly access the table.
253 */
254 int ht_destroy(struct rcu_ht *ht)
255 {
256 int ret;
257
258 ret = ht_delete_all(ht);
259 free(ht->tbl);
260 free(ht);
261 return ret;
262 }
263
264 /*
265 * Expects keys <= than pointer size to be encoded in the pointer itself.
266 */
267 uint32_t ht_jhash(void *key, uint32_t length, uint32_t initval)
268 {
269 uint32_t ret;
270 void *vkey;
271
272 if (length <= sizeof(u32))
273 vkey = &key;
274 else
275 vkey = key;
276 ret = jhash(vkey, length, initval);
277 return ret;
278 }
This page took 0.033583 seconds and 4 git commands to generate.