rculfhash: update add_unique api
[urcu.git] / rculfhash.c
1 /*
2 * rculfhash.c
3 *
4 * Userspace RCU library - Lock-Free Expandable RCU Hash Table
5 *
6 * Copyright 2010-2011 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23 #define _LGPL_SOURCE
24 #include <stdlib.h>
25 #include <errno.h>
26 #include <assert.h>
27 #include <stdio.h>
28 #include <stdint.h>
29 #include <string.h>
30
31 #include <urcu.h>
32 #include <urcu-call-rcu.h>
33 #include <urcu/arch.h>
34 #include <urcu/uatomic.h>
35 #include <urcu/jhash.h>
36 #include <urcu/compiler.h>
37 #include <urcu/rculfhash.h>
38 #include <stdio.h>
39 #include <pthread.h>
40
41 #define DEBUG /* Test */
42
43 #ifdef DEBUG
44 #define dbg_printf(args...) printf(args)
45 #else
46 #define dbg_printf(args...)
47 #endif
48
49 #define CHAIN_LEN_TARGET 1
50 #define CHAIN_LEN_RESIZE_THRESHOLD 2
51
52 #ifndef max
53 #define max(a, b) ((a) > (b) ? (a) : (b))
54 #endif
55
56 struct rcu_table {
57 unsigned long size; /* always a power of 2 */
58 unsigned long resize_target;
59 int resize_initiated;
60 struct rcu_head head;
61 struct rcu_ht_node *tbl[0];
62 };
63
64 struct rcu_ht {
65 struct rcu_table *t; /* shared */
66 ht_hash_fct hash_fct;
67 ht_compare_fct compare_fct;
68 unsigned long hash_seed;
69 pthread_mutex_t resize_mutex; /* resize mutex: add/del mutex */
70 void (*ht_call_rcu)(struct rcu_head *head,
71 void (*func)(struct rcu_head *head));
72 };
73
74 struct rcu_resize_work {
75 struct rcu_head head;
76 struct rcu_ht *ht;
77 };
78
79 /*
80 * Algorithm to reverse bits in a word by lookup table, extended to
81 * 64-bit words.
82 * Source:
83 * http://graphics.stanford.edu/~seander/bithacks.html#BitReverseTable
84 * Originally from Public Domain.
85 */
86
87 static const uint8_t BitReverseTable256[256] =
88 {
89 #define R2(n) (n), (n) + 2*64, (n) + 1*64, (n) + 3*64
90 #define R4(n) R2(n), R2((n) + 2*16), R2((n) + 1*16), R2((n) + 3*16)
91 #define R6(n) R4(n), R4((n) + 2*4 ), R4((n) + 1*4 ), R4((n) + 3*4 )
92 R6(0), R6(2), R6(1), R6(3)
93 };
94 #undef R2
95 #undef R4
96 #undef R6
97
98 static
99 uint8_t bit_reverse_u8(uint8_t v)
100 {
101 return BitReverseTable256[v];
102 }
103
104 static __attribute__((unused))
105 uint32_t bit_reverse_u32(uint32_t v)
106 {
107 return ((uint32_t) bit_reverse_u8(v) << 24) |
108 ((uint32_t) bit_reverse_u8(v >> 8) << 16) |
109 ((uint32_t) bit_reverse_u8(v >> 16) << 8) |
110 ((uint32_t) bit_reverse_u8(v >> 24));
111 }
112
113 static __attribute__((unused))
114 uint64_t bit_reverse_u64(uint64_t v)
115 {
116 return ((uint64_t) bit_reverse_u8(v) << 56) |
117 ((uint64_t) bit_reverse_u8(v >> 8) << 48) |
118 ((uint64_t) bit_reverse_u8(v >> 16) << 40) |
119 ((uint64_t) bit_reverse_u8(v >> 24) << 32) |
120 ((uint64_t) bit_reverse_u8(v >> 32) << 24) |
121 ((uint64_t) bit_reverse_u8(v >> 40) << 16) |
122 ((uint64_t) bit_reverse_u8(v >> 48) << 8) |
123 ((uint64_t) bit_reverse_u8(v >> 56));
124 }
125
126 static
127 unsigned long bit_reverse_ulong(unsigned long v)
128 {
129 #if (CAA_BITS_PER_LONG == 32)
130 return bit_reverse_u32(v);
131 #else
132 return bit_reverse_u64(v);
133 #endif
134 }
135
136 /*
137 * Algorithm to find the log2 of a 32-bit unsigned integer.
138 * source: http://graphics.stanford.edu/~seander/bithacks.html#IntegerLogLookup
139 * Originally from Public Domain.
140 */
141 static const char LogTable256[256] =
142 {
143 #define LT(n) n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n
144 -1, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3,
145 LT(4), LT(5), LT(5), LT(6), LT(6), LT(6), LT(6),
146 LT(7), LT(7), LT(7), LT(7), LT(7), LT(7), LT(7), LT(7)
147 };
148
149 uint32_t log2_u32(uint32_t v)
150 {
151 uint32_t t, tt;
152
153 if ((tt = (v >> 16)))
154 return (t = (tt >> 8))
155 ? 24 + LogTable256[t]
156 : 16 + LogTable256[tt];
157 else
158 return (t = (v >> 8))
159 ? 8 + LogTable256[t]
160 : LogTable256[v];
161 }
162
163 static
164 void ht_resize_lazy(struct rcu_ht *ht, struct rcu_table *t, int growth);
165
166 static
167 void check_resize(struct rcu_ht *ht, struct rcu_table *t,
168 uint32_t chain_len)
169 {
170 if (chain_len >= CHAIN_LEN_RESIZE_THRESHOLD)
171 ht_resize_lazy(ht, t,
172 log2_u32(chain_len - CHAIN_LEN_TARGET - 1));
173 }
174
175 static
176 struct rcu_ht_node *clear_flag(struct rcu_ht_node *node)
177 {
178 return (struct rcu_ht_node *) (((unsigned long) node) & ~0x1);
179 }
180
181 static
182 int is_removed(struct rcu_ht_node *node)
183 {
184 return ((unsigned long) node) & 0x1;
185 }
186
187 static
188 struct rcu_ht_node *flag_removed(struct rcu_ht_node *node)
189 {
190 return (struct rcu_ht_node *) (((unsigned long) node) | 0x1);
191 }
192
193 static
194 unsigned long _uatomic_max(unsigned long *ptr, unsigned long v)
195 {
196 unsigned long old1, old2;
197
198 old1 = uatomic_read(ptr);
199 do {
200 old2 = old1;
201 if (old2 >= v)
202 return old2;
203 } while ((old1 = uatomic_cmpxchg(ptr, old2, v)) != old2);
204 return v;
205 }
206
207 /*
208 * Remove all logically deleted nodes from a bucket up to a certain node key.
209 */
210 static
211 void _ht_gc_bucket(struct rcu_ht_node *dummy, struct rcu_ht_node *node)
212 {
213 struct rcu_ht_node *iter_prev, *iter, *next;
214
215 for (;;) {
216 iter_prev = dummy;
217 /* We can always skip the dummy node initially */
218 iter = rcu_dereference(iter_prev->next);
219 assert(iter_prev->reverse_hash <= node->reverse_hash);
220 for (;;) {
221 if (unlikely(!iter))
222 return;
223 if (clear_flag(iter)->reverse_hash > node->reverse_hash)
224 return;
225 next = rcu_dereference(clear_flag(iter)->next);
226 if (is_removed(next))
227 break;
228 iter_prev = iter;
229 iter = next;
230 }
231 assert(!is_removed(iter));
232 (void) uatomic_cmpxchg(&iter_prev->next, iter, clear_flag(next));
233 }
234 }
235
236 static
237 struct rcu_ht_node *_ht_add(struct rcu_ht *ht, struct rcu_table *t,
238 struct rcu_ht_node *node, int unique)
239 {
240 struct rcu_ht_node *iter_prev, *dummy, *iter, *next;
241 unsigned long hash;
242
243 if (!t->size) {
244 assert(node->dummy);
245 return node; /* Initial first add (head) */
246 }
247 hash = bit_reverse_ulong(node->reverse_hash);
248 for (;;) {
249 uint32_t chain_len = 0;
250
251 /*
252 * iter_prev points to the non-removed node prior to the
253 * insert location.
254 */
255 iter_prev = rcu_dereference(t->tbl[hash & (t->size - 1)]);
256 /* We can always skip the dummy node initially */
257 iter = rcu_dereference(iter_prev->next);
258 assert(iter_prev->reverse_hash <= node->reverse_hash);
259 for (;;) {
260 if (unlikely(!iter))
261 goto insert;
262 if (clear_flag(iter)->reverse_hash > node->reverse_hash)
263 goto insert;
264 next = rcu_dereference(clear_flag(iter)->next);
265 if (is_removed(next))
266 goto gc_node;
267 if (unique
268 && !clear_flag(iter)->dummy
269 && !ht->compare_fct(node->key, node->key_len,
270 clear_flag(iter)->key,
271 clear_flag(iter)->key_len))
272 return clear_flag(iter);
273 /* Only account for identical reverse hash once */
274 if (iter_prev->reverse_hash != clear_flag(iter)->reverse_hash)
275 check_resize(ht, t, ++chain_len);
276 iter_prev = clear_flag(iter);
277 iter = next;
278 }
279 insert:
280 assert(node != clear_flag(iter));
281 assert(!is_removed(iter_prev));
282 assert(iter_prev != node);
283 node->next = iter;
284 if (uatomic_cmpxchg(&iter_prev->next, iter,
285 node) != iter)
286 continue; /* retry */
287 else
288 goto gc_end;
289 gc_node:
290 assert(!is_removed(iter));
291 (void) uatomic_cmpxchg(&iter_prev->next, iter, clear_flag(next));
292 /* retry */
293 }
294 gc_end:
295 /* Garbage collect logically removed nodes in the bucket */
296 dummy = rcu_dereference(t->tbl[hash & (t->size - 1)]);
297 _ht_gc_bucket(dummy, node);
298 return node;
299 }
300
301 static
302 int _ht_remove(struct rcu_ht *ht, struct rcu_table *t, struct rcu_ht_node *node)
303 {
304 struct rcu_ht_node *dummy, *next, *old;
305 int flagged = 0;
306 unsigned long hash;
307
308 /* logically delete the node */
309 old = rcu_dereference(node->next);
310 do {
311 next = old;
312 if (is_removed(next))
313 goto end;
314 assert(!node->dummy);
315 old = uatomic_cmpxchg(&node->next, next,
316 flag_removed(next));
317 } while (old != next);
318
319 /* We performed the (logical) deletion. */
320 flagged = 1;
321
322 /*
323 * Ensure that the node is not visible to readers anymore: lookup for
324 * the node, and remove it (along with any other logically removed node)
325 * if found.
326 */
327 hash = bit_reverse_ulong(node->reverse_hash);
328 dummy = rcu_dereference(t->tbl[hash & (t->size - 1)]);
329 _ht_gc_bucket(dummy, node);
330 end:
331 /*
332 * Only the flagging action indicated that we (and no other)
333 * removed the node from the hash.
334 */
335 if (flagged) {
336 assert(is_removed(rcu_dereference(node->next)));
337 return 0;
338 } else
339 return -ENOENT;
340 }
341
342 static
343 void init_table(struct rcu_ht *ht, struct rcu_table *t,
344 unsigned long first, unsigned long len)
345 {
346 unsigned long i, end;
347
348 end = first + len;
349 for (i = first; i < end; i++) {
350 /* Update table size when power of two */
351 if (i != 0 && !(i & (i - 1)))
352 t->size = i;
353 t->tbl[i] = calloc(1, sizeof(struct rcu_ht_node));
354 t->tbl[i]->dummy = 1;
355 t->tbl[i]->reverse_hash = bit_reverse_ulong(i);
356 (void) _ht_add(ht, t, t->tbl[i], 0);
357 }
358 t->resize_target = t->size = end;
359 t->resize_initiated = 0;
360 }
361
362 struct rcu_ht *ht_new(ht_hash_fct hash_fct,
363 ht_compare_fct compare_fct,
364 unsigned long hash_seed,
365 unsigned long init_size,
366 void (*ht_call_rcu)(struct rcu_head *head,
367 void (*func)(struct rcu_head *head)))
368 {
369 struct rcu_ht *ht;
370
371 ht = calloc(1, sizeof(struct rcu_ht));
372 ht->hash_fct = hash_fct;
373 ht->compare_fct = compare_fct;
374 ht->hash_seed = hash_seed;
375 ht->ht_call_rcu = ht_call_rcu;
376 /* this mutex should not nest in read-side C.S. */
377 pthread_mutex_init(&ht->resize_mutex, NULL);
378 ht->t = calloc(1, sizeof(struct rcu_table)
379 + (max(init_size, 1) * sizeof(struct rcu_ht_node *)));
380 ht->t->size = 0;
381 pthread_mutex_lock(&ht->resize_mutex);
382 init_table(ht, ht->t, 0, max(init_size, 1));
383 pthread_mutex_unlock(&ht->resize_mutex);
384 return ht;
385 }
386
387 struct rcu_ht_node *ht_lookup(struct rcu_ht *ht, void *key, size_t key_len)
388 {
389 struct rcu_table *t;
390 struct rcu_ht_node *node;
391 unsigned long hash, reverse_hash;
392
393 hash = ht->hash_fct(key, key_len, ht->hash_seed);
394 reverse_hash = bit_reverse_ulong(hash);
395
396 t = rcu_dereference(ht->t);
397 node = rcu_dereference(t->tbl[hash & (t->size - 1)]);
398 for (;;) {
399 if (unlikely(!node))
400 break;
401 if (unlikely(node->reverse_hash > reverse_hash)) {
402 node = NULL;
403 break;
404 }
405 if (likely(!is_removed(rcu_dereference(node->next)))
406 && !node->dummy
407 && likely(!ht->compare_fct(node->key, node->key_len, key, key_len))) {
408 break;
409 }
410 node = clear_flag(rcu_dereference(node->next));
411 }
412 assert(!node || !node->dummy);
413 return node;
414 }
415
416 void ht_add(struct rcu_ht *ht, struct rcu_ht_node *node)
417 {
418 struct rcu_table *t;
419 unsigned long hash;
420
421 hash = ht->hash_fct(node->key, node->key_len, ht->hash_seed);
422 node->reverse_hash = bit_reverse_ulong((unsigned long) hash);
423
424 t = rcu_dereference(ht->t);
425 (void) _ht_add(ht, t, node, 0);
426 }
427
428 struct rcu_ht_node *ht_add_unique(struct rcu_ht *ht, struct rcu_ht_node *node)
429 {
430 struct rcu_table *t;
431 unsigned long hash;
432
433 hash = ht->hash_fct(node->key, node->key_len, ht->hash_seed);
434 node->reverse_hash = bit_reverse_ulong((unsigned long) hash);
435
436 t = rcu_dereference(ht->t);
437 return _ht_add(ht, t, node, 1);
438 }
439
440 int ht_remove(struct rcu_ht *ht, struct rcu_ht_node *node)
441 {
442 struct rcu_table *t;
443
444 t = rcu_dereference(ht->t);
445 return _ht_remove(ht, t, node);
446 }
447
448 static
449 int ht_delete_dummy(struct rcu_ht *ht)
450 {
451 struct rcu_table *t;
452 struct rcu_ht_node *node;
453 unsigned long i;
454
455 t = ht->t;
456 /* Check that the table is empty */
457 node = t->tbl[0];
458 do {
459 if (!node->dummy)
460 return -EPERM;
461 node = node->next;
462 assert(!is_removed(node));
463 } while (node);
464 /* Internal sanity check: all nodes left should be dummy */
465 for (i = 0; i < t->size; i++) {
466 assert(t->tbl[i]->dummy);
467 free(t->tbl[i]);
468 }
469 return 0;
470 }
471
472 /*
473 * Should only be called when no more concurrent readers nor writers can
474 * possibly access the table.
475 */
476 int ht_destroy(struct rcu_ht *ht)
477 {
478 int ret;
479
480 ret = ht_delete_dummy(ht);
481 if (ret)
482 return ret;
483 free(ht->t);
484 free(ht);
485 return ret;
486 }
487
488 void ht_count_nodes(struct rcu_ht *ht,
489 unsigned long *count,
490 unsigned long *removed)
491 {
492 struct rcu_table *t;
493 struct rcu_ht_node *node, *next;
494
495 *count = 0;
496 *removed = 0;
497
498 t = rcu_dereference(ht->t);
499 /* Check that the table is empty */
500 node = rcu_dereference(t->tbl[0]);
501 do {
502 next = rcu_dereference(node->next);
503 if (is_removed(next)) {
504 assert(!node->dummy);
505 (*removed)++;
506 } else if (!node->dummy)
507 (*count)++;
508 node = clear_flag(next);
509 } while (node);
510 }
511
512 static
513 void ht_free_table_cb(struct rcu_head *head)
514 {
515 struct rcu_table *t =
516 caa_container_of(head, struct rcu_table, head);
517 free(t);
518 }
519
520 /* called with resize mutex held */
521 static
522 void _do_ht_resize(struct rcu_ht *ht)
523 {
524 unsigned long new_size, old_size;
525 struct rcu_table *new_t, *old_t;
526
527 old_t = ht->t;
528 old_size = old_t->size;
529
530 new_size = CMM_LOAD_SHARED(old_t->resize_target);
531 dbg_printf("rculfhash: resize from %lu to %lu buckets\n",
532 old_size, new_size);
533 if (old_size == new_size)
534 return;
535 new_t = malloc(sizeof(struct rcu_table)
536 + (new_size * sizeof(struct rcu_ht_node *)));
537 assert(new_size > old_size);
538 memcpy(&new_t->tbl, &old_t->tbl,
539 old_size * sizeof(struct rcu_ht_node *));
540 init_table(ht, new_t, old_size, new_size - old_size);
541 /* Changing table and size atomically wrt lookups */
542 rcu_assign_pointer(ht->t, new_t);
543 ht->ht_call_rcu(&old_t->head, ht_free_table_cb);
544 }
545
546 static
547 unsigned long resize_target_update(struct rcu_table *t,
548 int growth_order)
549 {
550 return _uatomic_max(&t->resize_target,
551 t->size << growth_order);
552 }
553
554 void ht_resize(struct rcu_ht *ht, int growth)
555 {
556 struct rcu_table *t = rcu_dereference(ht->t);
557 unsigned long target_size;
558
559 target_size = resize_target_update(t, growth);
560 if (t->size < target_size) {
561 CMM_STORE_SHARED(t->resize_initiated, 1);
562 pthread_mutex_lock(&ht->resize_mutex);
563 _do_ht_resize(ht);
564 pthread_mutex_unlock(&ht->resize_mutex);
565 }
566 }
567
568 static
569 void do_resize_cb(struct rcu_head *head)
570 {
571 struct rcu_resize_work *work =
572 caa_container_of(head, struct rcu_resize_work, head);
573 struct rcu_ht *ht = work->ht;
574
575 pthread_mutex_lock(&ht->resize_mutex);
576 _do_ht_resize(ht);
577 pthread_mutex_unlock(&ht->resize_mutex);
578 free(work);
579 }
580
581 static
582 void ht_resize_lazy(struct rcu_ht *ht, struct rcu_table *t, int growth)
583 {
584 struct rcu_resize_work *work;
585 unsigned long target_size;
586
587 target_size = resize_target_update(t, growth);
588 if (!CMM_LOAD_SHARED(t->resize_initiated) && t->size < target_size) {
589 work = malloc(sizeof(*work));
590 work->ht = ht;
591 ht->ht_call_rcu(&work->head, do_resize_cb);
592 CMM_STORE_SHARED(t->resize_initiated, 1);
593 }
594 }
This page took 0.040798 seconds and 5 git commands to generate.