rculfhash: implement real hash function
[urcu.git] / rculfhash.c
1 /*
2 * rculfhash.c
3 *
4 * Userspace RCU library - Lock-Free Expandable RCU Hash Table
5 *
6 * Copyright 2010-2011 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23 #define _LGPL_SOURCE
24 #include <stdlib.h>
25 #include <errno.h>
26 #include <assert.h>
27 #include <stdio.h>
28 #include <stdint.h>
29 #include <string.h>
30
31 #include <urcu.h>
32 #include <urcu-call-rcu.h>
33 #include <urcu/arch.h>
34 #include <urcu/uatomic.h>
35 #include <urcu/jhash.h>
36 #include <urcu/compiler.h>
37 #include <urcu/rculfhash.h>
38 #include <stdio.h>
39 #include <pthread.h>
40
41 #define DEBUG /* Test */
42
43 #ifdef DEBUG
44 #define dbg_printf(args...) printf(args)
45 #else
46 #define dbg_printf(args...)
47 #endif
48
49 #define BUCKET_SIZE_RESIZE_THRESHOLD 4
50
51 #ifndef max
52 #define max(a, b) ((a) > (b) ? (a) : (b))
53 #endif
54
55 struct rcu_table {
56 unsigned long size; /* always a power of 2 */
57 unsigned long resize_target;
58 struct rcu_head head;
59 struct rcu_ht_node *tbl[0];
60 };
61
62 struct rcu_ht {
63 struct rcu_table *t; /* shared */
64 ht_hash_fct hash_fct;
65 ht_compare_fct compare_fct;
66 unsigned long hash_seed;
67 pthread_mutex_t resize_mutex; /* resize mutex: add/del mutex */
68 void (*ht_call_rcu)(struct rcu_head *head,
69 void (*func)(struct rcu_head *head));
70 };
71
72 struct rcu_resize_work {
73 struct rcu_head head;
74 struct rcu_ht *ht;
75 };
76
77 /*
78 * Algorithm to reverse bits in a word by lookup table, extended to
79 * 64-bit words.
80 * Source:
81 * http://graphics.stanford.edu/~seander/bithacks.html#BitReverseTable
82 * Originally from Public Domain.
83 */
84
85 static const uint8_t BitReverseTable256[256] =
86 {
87 #define R2(n) (n), (n) + 2*64, (n) + 1*64, (n) + 3*64
88 #define R4(n) R2(n), R2((n) + 2*16), R2((n) + 1*16), R2((n) + 3*16)
89 #define R6(n) R4(n), R4((n) + 2*4 ), R4((n) + 1*4 ), R4((n) + 3*4 )
90 R6(0), R6(2), R6(1), R6(3)
91 };
92 #undef R2
93 #undef R4
94 #undef R6
95
96 static
97 uint8_t bit_reverse_u8(uint8_t v)
98 {
99 return BitReverseTable256[v];
100 }
101
102 static __attribute__((unused))
103 uint32_t bit_reverse_u32(uint32_t v)
104 {
105 return ((uint32_t) bit_reverse_u8(v) << 24) |
106 ((uint32_t) bit_reverse_u8(v >> 8) << 16) |
107 ((uint32_t) bit_reverse_u8(v >> 16) << 8) |
108 ((uint32_t) bit_reverse_u8(v >> 24));
109 }
110
111 static __attribute__((unused))
112 uint64_t bit_reverse_u64(uint64_t v)
113 {
114 return ((uint64_t) bit_reverse_u8(v) << 56) |
115 ((uint64_t) bit_reverse_u8(v >> 8) << 48) |
116 ((uint64_t) bit_reverse_u8(v >> 16) << 40) |
117 ((uint64_t) bit_reverse_u8(v >> 24) << 32) |
118 ((uint64_t) bit_reverse_u8(v >> 32) << 24) |
119 ((uint64_t) bit_reverse_u8(v >> 40) << 16) |
120 ((uint64_t) bit_reverse_u8(v >> 48) << 8) |
121 ((uint64_t) bit_reverse_u8(v >> 56));
122 }
123
124 static
125 unsigned long bit_reverse_ulong(unsigned long v)
126 {
127 #if (CAA_BITS_PER_LONG == 32)
128 return bit_reverse_u32(v);
129 #else
130 return bit_reverse_u64(v);
131 #endif
132 }
133
134 /*
135 * Algorithm to find the log2 of a 32-bit unsigned integer.
136 * source: http://graphics.stanford.edu/~seander/bithacks.html#IntegerLogLookup
137 * Originally from Public Domain.
138 */
139 static const char LogTable256[256] =
140 {
141 #define LT(n) n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n
142 -1, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3,
143 LT(4), LT(5), LT(5), LT(6), LT(6), LT(6), LT(6),
144 LT(7), LT(7), LT(7), LT(7), LT(7), LT(7), LT(7), LT(7)
145 };
146
147 uint32_t log2_u32(uint32_t v)
148 {
149 uint32_t t, tt;
150
151 if ((tt = (v >> 16)))
152 return (t = (tt >> 8))
153 ? 24 + LogTable256[t]
154 : 16 + LogTable256[tt];
155 else
156 return (t = (v >> 8))
157 ? 8 + LogTable256[t]
158 : LogTable256[v];
159 }
160
161 static
162 void ht_resize_lazy(struct rcu_ht *ht, struct rcu_table *t, int growth);
163
164 static
165 void check_resize(struct rcu_ht *ht, struct rcu_table *t,
166 uint32_t chain_len)
167 {
168 if (chain_len >= BUCKET_SIZE_RESIZE_THRESHOLD)
169 ht_resize_lazy(ht, t, log2_u32(chain_len));
170 }
171
172 static
173 struct rcu_ht_node *clear_flag(struct rcu_ht_node *node)
174 {
175 return (struct rcu_ht_node *) (((unsigned long) node) & ~0x1);
176 }
177
178 static
179 int is_removed(struct rcu_ht_node *node)
180 {
181 return ((unsigned long) node) & 0x1;
182 }
183
184 static
185 struct rcu_ht_node *flag_removed(struct rcu_ht_node *node)
186 {
187 return (struct rcu_ht_node *) (((unsigned long) node) | 0x1);
188 }
189
190 static
191 unsigned long _uatomic_max(unsigned long *ptr, unsigned long v)
192 {
193 unsigned long old1, old2;
194
195 old1 = uatomic_read(ptr);
196 do {
197 old2 = old1;
198 if (old2 >= v)
199 return old2;
200 } while ((old1 = uatomic_cmpxchg(ptr, old2, v)) != old2);
201 return v;
202 }
203
204 static
205 void _ht_add(struct rcu_ht *ht, struct rcu_table *t, struct rcu_ht_node *node)
206 {
207 struct rcu_ht_node *iter_prev = NULL, *iter = NULL;
208
209 if (!t->size)
210 return;
211 for (;;) {
212 uint32_t chain_len = 0;
213
214 iter_prev = rcu_dereference(t->tbl[node->hash & (t->size - 1)]);
215 assert(iter_prev);
216 assert(iter_prev->reverse_hash <= node->reverse_hash);
217 for (;;) {
218 iter = clear_flag(rcu_dereference(iter_prev->next));
219 if (unlikely(!iter))
220 break;
221 if (iter->reverse_hash < node->reverse_hash)
222 break;
223 iter_prev = iter;
224 check_resize(ht, t, ++chain_len);
225 }
226 /* add in iter_prev->next */
227 if (is_removed(iter))
228 continue;
229 assert(node != iter);
230 node->next = iter;
231 assert(iter_prev != node);
232 if (uatomic_cmpxchg(&iter_prev->next, iter, node) != iter)
233 continue;
234 break;
235 }
236 }
237
238 static
239 int _ht_remove(struct rcu_ht *ht, struct rcu_table *t, struct rcu_ht_node *node)
240 {
241 struct rcu_ht_node *iter_prev, *iter, *next, *old;
242 unsigned long chain_len;
243 int found, ret = 0;
244 int flagged = 0;
245
246 retry:
247 chain_len = 0;
248 found = 0;
249 iter_prev = rcu_dereference(t->tbl[node->hash & (t->size - 1)]);
250 assert(iter_prev);
251 assert(iter_prev->reverse_hash <= node->reverse_hash);
252 for (;;) {
253 iter = clear_flag(rcu_dereference(iter_prev->next));
254 if (unlikely(!iter))
255 break;
256 if (iter->reverse_hash < node->reverse_hash)
257 break;
258 if (iter == node) {
259 found = 1;
260 break;
261 }
262 iter_prev = iter;
263 }
264 if (!found) {
265 ret = -ENOENT;
266 goto end;
267 }
268 next = rcu_dereference(iter->next);
269 if (!flagged) {
270 if (is_removed(next)) {
271 ret = -ENOENT;
272 goto end;
273 }
274 /* set deletion flag */
275 if ((old = uatomic_cmpxchg(&iter->next, next,
276 flag_removed(next))) != next) {
277 if (old == flag_removed(next)) {
278 ret = -ENOENT;
279 goto end;
280 } else {
281 goto retry;
282 }
283 }
284 flagged = 1;
285 }
286 /*
287 * Remove the element from the list. Retry if there has been a
288 * concurrent add (there cannot be a concurrent delete, because
289 * we won the deletion flag cmpxchg).
290 */
291 if (uatomic_cmpxchg(&iter_prev->next, iter, clear_flag(next)) != iter)
292 goto retry;
293 end:
294 return ret;
295 }
296
297 static
298 void init_table(struct rcu_ht *ht, struct rcu_table *t,
299 unsigned long first, unsigned long len)
300 {
301 unsigned long i, end;
302
303 end = first + len;
304 for (i = first; i < end; i++) {
305 /* Update table size when power of two */
306 if (i != 0 && !(i & (i - 1)))
307 t->size = i;
308 t->tbl[i] = calloc(1, sizeof(struct rcu_ht_node));
309 t->tbl[i]->dummy = 1;
310 t->tbl[i]->hash = i;
311 t->tbl[i]->reverse_hash = bit_reverse_ulong(i);
312 _ht_add(ht, t, t->tbl[i]);
313 }
314 t->resize_target = t->size = end;
315 }
316
317 struct rcu_ht *ht_new(ht_hash_fct hash_fct,
318 ht_compare_fct compare_fct,
319 unsigned long hash_seed,
320 unsigned long init_size,
321 void (*ht_call_rcu)(struct rcu_head *head,
322 void (*func)(struct rcu_head *head)))
323 {
324 struct rcu_ht *ht;
325
326 ht = calloc(1, sizeof(struct rcu_ht));
327 ht->hash_fct = hash_fct;
328 ht->compare_fct = compare_fct;
329 ht->hash_seed = hash_seed;
330 ht->ht_call_rcu = ht_call_rcu;
331 /* this mutex should not nest in read-side C.S. */
332 pthread_mutex_init(&ht->resize_mutex, NULL);
333 ht->t = calloc(1, sizeof(struct rcu_table)
334 + (max(init_size, 1) * sizeof(struct rcu_ht_node *)));
335 ht->t->size = 0;
336 pthread_mutex_lock(&ht->resize_mutex);
337 init_table(ht, ht->t, 0, max(init_size, 1));
338 pthread_mutex_unlock(&ht->resize_mutex);
339 return ht;
340 }
341
342 struct rcu_ht_node *ht_lookup(struct rcu_ht *ht, void *key, size_t key_len)
343 {
344 struct rcu_table *t;
345 struct rcu_ht_node *node;
346 unsigned long hash, reverse_hash;
347
348 hash = ht->hash_fct(key, key_len, ht->hash_seed);
349 reverse_hash = bit_reverse_ulong(hash);
350
351 t = rcu_dereference(ht->t);
352 node = rcu_dereference(t->tbl[hash & (t->size - 1)]);
353 for (;;) {
354 if (unlikely(!node))
355 break;
356 if (node->reverse_hash > reverse_hash) {
357 node = NULL;
358 break;
359 }
360 if (!ht->compare_fct(node->key, node->key_len, key, key_len)) {
361 if (is_removed(rcu_dereference(node->next)))
362 node = NULL;
363 break;
364 }
365 node = clear_flag(rcu_dereference(node->next));
366 }
367 return node;
368 }
369
370 void ht_add(struct rcu_ht *ht, struct rcu_ht_node *node)
371 {
372 struct rcu_table *t;
373
374 node->hash = ht->hash_fct(node->key, node->key_len, ht->hash_seed);
375 node->reverse_hash = bit_reverse_ulong((unsigned long) node->hash);
376
377 t = rcu_dereference(ht->t);
378 _ht_add(ht, t, node);
379 }
380
381 int ht_remove(struct rcu_ht *ht, struct rcu_ht_node *node)
382 {
383 struct rcu_table *t;
384
385 t = rcu_dereference(ht->t);
386 return _ht_remove(ht, t, node);
387 }
388
389 static
390 int ht_delete_dummy(struct rcu_ht *ht)
391 {
392 struct rcu_table *t;
393 struct rcu_ht_node *node;
394 unsigned long i;
395
396 t = ht->t;
397 /* Check that the table is empty */
398 node = t->tbl[0];
399 do {
400 if (!node->dummy)
401 return -EPERM;
402 node = node->next;
403 } while (node);
404 /* Internal sanity check: all nodes left should be dummy */
405 for (i = 0; i < t->size; i++) {
406 assert(t->tbl[i]->dummy);
407 free(t->tbl[i]);
408 }
409 return 0;
410 }
411
412 /*
413 * Should only be called when no more concurrent readers nor writers can
414 * possibly access the table.
415 */
416 int ht_destroy(struct rcu_ht *ht)
417 {
418 int ret;
419
420 ret = ht_delete_dummy(ht);
421 if (ret)
422 return ret;
423 free(ht->t);
424 free(ht);
425 return ret;
426 }
427
428 static
429 void ht_free_table_cb(struct rcu_head *head)
430 {
431 struct rcu_table *t =
432 caa_container_of(head, struct rcu_table, head);
433 free(t);
434 }
435
436 /* called with resize mutex held */
437 static
438 void _do_ht_resize(struct rcu_ht *ht)
439 {
440 unsigned long new_size, old_size;
441 struct rcu_table *new_t, *old_t;
442
443 old_t = ht->t;
444 old_size = old_t->size;
445
446 new_size = CMM_LOAD_SHARED(old_t->resize_target);
447 dbg_printf("rculfhash: resize from %lu to %lu buckets\n",
448 old_size, new_size);
449 if (old_size == new_size)
450 return;
451 new_t = malloc(sizeof(struct rcu_table)
452 + (new_size * sizeof(struct rcu_ht_node *)));
453 assert(new_size > old_size);
454 memcpy(&new_t->tbl, &old_t->tbl,
455 old_size * sizeof(struct rcu_ht_node *));
456 init_table(ht, new_t, old_size, new_size - old_size);
457 /* Changing table and size atomically wrt lookups */
458 rcu_assign_pointer(ht->t, new_t);
459 ht->ht_call_rcu(&old_t->head, ht_free_table_cb);
460 }
461
462 static
463 unsigned long resize_target_update(struct rcu_table *t,
464 int growth_order)
465 {
466 return _uatomic_max(&t->resize_target,
467 t->size << growth_order);
468 }
469
470 void ht_resize(struct rcu_ht *ht, int growth)
471 {
472 struct rcu_table *t = rcu_dereference(ht->t);
473 unsigned long target_size;
474
475 target_size = resize_target_update(t, growth);
476 if (t->size < target_size) {
477 pthread_mutex_lock(&ht->resize_mutex);
478 _do_ht_resize(ht);
479 pthread_mutex_unlock(&ht->resize_mutex);
480 }
481 }
482
483 static
484 void do_resize_cb(struct rcu_head *head)
485 {
486 struct rcu_resize_work *work =
487 caa_container_of(head, struct rcu_resize_work, head);
488 struct rcu_ht *ht = work->ht;
489
490 pthread_mutex_lock(&ht->resize_mutex);
491 _do_ht_resize(ht);
492 pthread_mutex_unlock(&ht->resize_mutex);
493 free(work);
494 }
495
496 static
497 void ht_resize_lazy(struct rcu_ht *ht, struct rcu_table *t, int growth)
498 {
499 struct rcu_resize_work *work;
500 unsigned long target_size;
501
502 target_size = resize_target_update(t, growth);
503 if (t->size < target_size) {
504 work = malloc(sizeof(*work));
505 work->ht = ht;
506 ht->ht_call_rcu(&work->head, do_resize_cb);
507 }
508 }
This page took 0.038662 seconds and 4 git commands to generate.