urcu-ht: add correct hash, implement delete_all
[urcu.git] / urcu-ht.c
1
2 #define _LGPL_SOURCE
3 #include <stdlib.h>
4 #include <urcu.h>
5 #include <arch.h>
6 #include <arch_atomic.h>
7 #include <assert.h>
8 #include <compiler.h>
9 #include <urcu-defer.h>
10 #include <errno.h>
11 #include <urcu-ht.h>
12 #include <urcu/jhash.h>
13
14 struct rcu_ht_node;
15
16 struct rcu_ht_node {
17 struct rcu_ht_node *next;
18 void *key;
19 void *data;
20 };
21
22 struct rcu_ht {
23 struct rcu_ht_node **tbl;
24 ht_hash_fct hash_fct;
25 void (*free_fct)(void *data); /* fct to free data */
26 struct ht_size {
27 unsigned long add;
28 unsigned long lookup;
29 } size;
30 };
31
32 struct rcu_ht *ht_new(ht_hash_fct hash_fct, void (*free_fct)(void *data),
33 unsigned long init_size)
34 {
35 struct rcu_ht *ht;
36
37 ht = calloc(1, sizeof(struct rcu_ht));
38 ht->hash_fct = hash_fct;
39 ht->free_fct = free_fct;
40 ht->size.add = init_size;
41 ht->size.lookup = init_size;
42 ht->tbl = calloc(init_size, sizeof(struct rcu_ht_node *));
43 return ht;
44 }
45
46 void *ht_lookup(struct rcu_ht *ht, void *key)
47 {
48 unsigned long hash;
49 struct rcu_ht_node *node;
50 void *ret;
51
52 hash = ht->hash_fct(key) % ht->size.lookup;
53
54 rcu_read_lock();
55 node = rcu_dereference(*ht->tbl[hash]);
56 for (;;) {
57 if (likely(!node)) {
58 ret = NULL;
59 break;
60 }
61 if (node->key == key) {
62 ret = node->data;
63 break;
64 }
65 node = rcu_dereference(node->next);
66 }
67 rcu_read_unlock();
68
69 return ret;
70 }
71
72 /*
73 * Will re-try until either:
74 * - The key is already there (-EEXIST)
75 * - We successfully add the key at the head of a table bucket.
76 */
77 int ht_add(struct rcu_ht *ht, void *key, void *data)
78 {
79 struct rcu_ht_node *node, *old_head, *new_head;
80 unsigned long hash;
81 int ret = 0;
82
83 new_head = calloc(1, sizeof(struct rcu_ht_node));
84 new_head->key = key;
85 new_head->data = data;
86 hash = ht->hash_fct(key) % ht->size.add;
87
88 /* here comes the fun and tricky part.
89 * Add at the beginning with a cmpxchg.
90 * Hold a read lock between the moment the first element is read
91 * and the nodes traversal (to find duplicates). This ensures
92 * the head pointer has not been reclaimed when cmpxchg is done.
93 * Always adding at the head ensures that we would have to
94 * re-try if a new item has been added concurrently. So we ensure that
95 * we never add duplicates. */
96 retry:
97 rcu_read_lock();
98
99 old_head = node = rcu_dereference(*ht->tbl[hash]);
100 for (;;) {
101 if (likely(!node)) {
102 break;
103 }
104 if (node->key == key) {
105 ret = -EEXIST;
106 goto end;
107 }
108 node = rcu_dereference(node->next);
109 }
110 if (rcu_cmpxchg_pointer(ht->tbl[hash], old_head, new_head) != old_head)
111 goto restart;
112 end:
113 rcu_read_unlock();
114
115 return ret;
116
117 /* restart loop, release and re-take the read lock to be kind to GP */
118 restart:
119 rcu_read_unlock();
120 goto retry;
121 }
122
123 /*
124 * Restart until we successfully remove the entry, or no entry is left
125 * ((void *)(unsigned long)-ENOENT).
126 */
127 void *ht_steal(struct rcu_ht *ht, void *key)
128 {
129 struct rcu_ht_node **prev, *node;
130 unsigned long hash;
131 void *data;
132
133 hash = ht->hash_fct(key) % ht->size.lookup;
134
135 retry:
136 rcu_read_lock();
137
138 prev = ht->tbl[hash];
139 node = rcu_dereference(*prev);
140 for (;;) {
141 if (likely(!node)) {
142 data = (void *)(unsigned long)-ENOENT;
143 goto end;
144 }
145 if (node->key == key) {
146 break;
147 }
148 prev = &node->next;
149 node = rcu_dereference(*prev);
150 }
151 /* Found it ! pointer to object is in "prev" */
152 if (rcu_cmpxchg_pointer(prev, node, node->next) != node)
153 goto restart;
154 end:
155 rcu_read_unlock();
156
157 data = node->data;
158 call_rcu(free, node);
159
160 return data;
161
162 /* restart loop, release and re-take the read lock to be kind to GP */
163 restart:
164 rcu_read_unlock();
165 goto retry;
166 }
167
168 int ht_delete(struct rcu_ht *ht, void *key)
169 {
170 void *data;
171
172 data = ht_steal(ht, key);
173 if (data) {
174 if (ht->free_fct && data)
175 call_rcu(ht->free_fct, data);
176 return 0;
177 } else {
178 return -ENOENT;
179 }
180 }
181
182 /* Delete all old elements. Allow concurrent writer accesses. */
183 void ht_delete_all(struct rcu_ht *ht)
184 {
185 unsigned long i;
186 struct rcu_ht_node **prev, *node, *inext;
187
188 for (i = 0; i < ht->size.lookup; i++) {
189 rcu_read_lock();
190 cut_head:
191 prev = ht->tbl[i];
192 if (prev) {
193 /*
194 * Cut the head. After that, we own the first element.
195 */
196 node = rcu_xchg_pointer(prev, NULL);
197 } else {
198 rcu_read_unlock();
199 continue;
200 }
201 /*
202 * We manage a list shared with concurrent writers and readers.
203 * Note that a concurrent add may or may not be deleted by us,
204 * depending if it arrives before or after the head is cut.
205 * "node" points to our first node. Remove first elements
206 * iteratively.
207 */
208 for (;;) {
209 prev = &node->next;
210 inext = rcu_xchg_pointer(prev, NULL);
211 /*
212 * "node" is the first element of the list we have cut.
213 * We therefore own it, no concurrent writer may delete
214 * it. There can only be concurrent lookups. Concurrent
215 * add can only be done on a bucket head, but we've cut
216 * it already. inext is also owned by us, because we
217 * have exchanged it for "NULL". It will therefore be
218 * safe to use it after a G.P.
219 */
220 rcu_read_unlock();
221 if (node->data)
222 call_rcu(ht->free_fct, node->data);
223 call_rcu(free, node);
224 if (likely(!inext))
225 break;
226 rcu_read_lock();
227 node = inext;
228 }
229 }
230 }
231
232 /*
233 * Should only be called when no more concurrent readers nor writers can
234 * possibly access the table.
235 */
236 void ht_destroy(struct rcu_ht *ht)
237 {
238 ht_delete_all(ht);
239 free(ht->tbl);
240 free(ht);
241 }
242
243 uint32_t ht_jhash(void *key, uint32_t length, uint32_t initval)
244 {
245 return jhash(key, length, initval);
246 }
This page took 0.033151 seconds and 4 git commands to generate.