rculfhash: merge dummy flag into next pointer
[urcu.git] / rculfhash.c
1 /*
2 * rculfhash.c
3 *
4 * Userspace RCU library - Lock-Free Expandable RCU Hash Table
5 *
6 * Copyright 2010-2011 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23 #define _LGPL_SOURCE
24 #include <stdlib.h>
25 #include <errno.h>
26 #include <assert.h>
27 #include <stdio.h>
28 #include <stdint.h>
29 #include <string.h>
30
31 #include <urcu.h>
32 #include <urcu-call-rcu.h>
33 #include <urcu/arch.h>
34 #include <urcu/uatomic.h>
35 #include <urcu/jhash.h>
36 #include <urcu/compiler.h>
37 #include <urcu/rculfhash.h>
38 #include <stdio.h>
39 #include <pthread.h>
40
41 #define DEBUG /* Test */
42
43 #ifdef DEBUG
44 #define dbg_printf(args...) printf(args)
45 #else
46 #define dbg_printf(args...)
47 #endif
48
49 #define CHAIN_LEN_TARGET 1
50 #define CHAIN_LEN_RESIZE_THRESHOLD 2
51
52 #ifndef max
53 #define max(a, b) ((a) > (b) ? (a) : (b))
54 #endif
55
56 #define REMOVED_FLAG (1UL << 0)
57 #define DUMMY_FLAG (1UL << 1)
58 #define FLAGS_MASK ((1UL << 2) - 1)
59
60 struct rcu_table {
61 unsigned long size; /* always a power of 2 */
62 unsigned long resize_target;
63 int resize_initiated;
64 struct rcu_head head;
65 struct rcu_ht_node *tbl[0];
66 };
67
68 struct rcu_ht {
69 struct rcu_table *t; /* shared */
70 ht_hash_fct hash_fct;
71 ht_compare_fct compare_fct;
72 unsigned long hash_seed;
73 pthread_mutex_t resize_mutex; /* resize mutex: add/del mutex */
74 unsigned int in_progress_resize;
75 void (*ht_call_rcu)(struct rcu_head *head,
76 void (*func)(struct rcu_head *head));
77 };
78
79 struct rcu_resize_work {
80 struct rcu_head head;
81 struct rcu_ht *ht;
82 };
83
84 /*
85 * Algorithm to reverse bits in a word by lookup table, extended to
86 * 64-bit words.
87 * Source:
88 * http://graphics.stanford.edu/~seander/bithacks.html#BitReverseTable
89 * Originally from Public Domain.
90 */
91
92 static const uint8_t BitReverseTable256[256] =
93 {
94 #define R2(n) (n), (n) + 2*64, (n) + 1*64, (n) + 3*64
95 #define R4(n) R2(n), R2((n) + 2*16), R2((n) + 1*16), R2((n) + 3*16)
96 #define R6(n) R4(n), R4((n) + 2*4 ), R4((n) + 1*4 ), R4((n) + 3*4 )
97 R6(0), R6(2), R6(1), R6(3)
98 };
99 #undef R2
100 #undef R4
101 #undef R6
102
103 static
104 uint8_t bit_reverse_u8(uint8_t v)
105 {
106 return BitReverseTable256[v];
107 }
108
109 static __attribute__((unused))
110 uint32_t bit_reverse_u32(uint32_t v)
111 {
112 return ((uint32_t) bit_reverse_u8(v) << 24) |
113 ((uint32_t) bit_reverse_u8(v >> 8) << 16) |
114 ((uint32_t) bit_reverse_u8(v >> 16) << 8) |
115 ((uint32_t) bit_reverse_u8(v >> 24));
116 }
117
118 static __attribute__((unused))
119 uint64_t bit_reverse_u64(uint64_t v)
120 {
121 return ((uint64_t) bit_reverse_u8(v) << 56) |
122 ((uint64_t) bit_reverse_u8(v >> 8) << 48) |
123 ((uint64_t) bit_reverse_u8(v >> 16) << 40) |
124 ((uint64_t) bit_reverse_u8(v >> 24) << 32) |
125 ((uint64_t) bit_reverse_u8(v >> 32) << 24) |
126 ((uint64_t) bit_reverse_u8(v >> 40) << 16) |
127 ((uint64_t) bit_reverse_u8(v >> 48) << 8) |
128 ((uint64_t) bit_reverse_u8(v >> 56));
129 }
130
131 static
132 unsigned long bit_reverse_ulong(unsigned long v)
133 {
134 #if (CAA_BITS_PER_LONG == 32)
135 return bit_reverse_u32(v);
136 #else
137 return bit_reverse_u64(v);
138 #endif
139 }
140
141 /*
142 * Algorithm to find the log2 of a 32-bit unsigned integer.
143 * source: http://graphics.stanford.edu/~seander/bithacks.html#IntegerLogLookup
144 * Originally from Public Domain.
145 */
146 static const char LogTable256[256] =
147 {
148 #define LT(n) n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n
149 -1, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3,
150 LT(4), LT(5), LT(5), LT(6), LT(6), LT(6), LT(6),
151 LT(7), LT(7), LT(7), LT(7), LT(7), LT(7), LT(7), LT(7)
152 };
153
154 uint32_t log2_u32(uint32_t v)
155 {
156 uint32_t t, tt;
157
158 if ((tt = (v >> 16)))
159 return (t = (tt >> 8))
160 ? 24 + LogTable256[t]
161 : 16 + LogTable256[tt];
162 else
163 return (t = (v >> 8))
164 ? 8 + LogTable256[t]
165 : LogTable256[v];
166 }
167
168 static
169 void ht_resize_lazy(struct rcu_ht *ht, struct rcu_table *t, int growth);
170
171 static
172 void check_resize(struct rcu_ht *ht, struct rcu_table *t,
173 uint32_t chain_len)
174 {
175 if (chain_len >= CHAIN_LEN_RESIZE_THRESHOLD)
176 ht_resize_lazy(ht, t,
177 log2_u32(chain_len - CHAIN_LEN_TARGET - 1));
178 }
179
180 static
181 struct rcu_ht_node *clear_flag(struct rcu_ht_node *node)
182 {
183 return (struct rcu_ht_node *) (((unsigned long) node) & ~FLAGS_MASK);
184 }
185
186 static
187 int is_removed(struct rcu_ht_node *node)
188 {
189 return ((unsigned long) node) & REMOVED_FLAG;
190 }
191
192 static
193 struct rcu_ht_node *flag_removed(struct rcu_ht_node *node)
194 {
195 return (struct rcu_ht_node *) (((unsigned long) node) | REMOVED_FLAG);
196 }
197
198 static
199 int is_dummy(struct rcu_ht_node *node)
200 {
201 return ((unsigned long) node) & DUMMY_FLAG;
202 }
203
204 static
205 struct rcu_ht_node *flag_dummy(struct rcu_ht_node *node)
206 {
207 return (struct rcu_ht_node *) (((unsigned long) node) | DUMMY_FLAG);
208 }
209
210 static
211 unsigned long _uatomic_max(unsigned long *ptr, unsigned long v)
212 {
213 unsigned long old1, old2;
214
215 old1 = uatomic_read(ptr);
216 do {
217 old2 = old1;
218 if (old2 >= v)
219 return old2;
220 } while ((old1 = uatomic_cmpxchg(ptr, old2, v)) != old2);
221 return v;
222 }
223
224 /*
225 * Remove all logically deleted nodes from a bucket up to a certain node key.
226 */
227 static
228 void _ht_gc_bucket(struct rcu_ht_node *dummy, struct rcu_ht_node *node)
229 {
230 struct rcu_ht_node *iter_prev, *iter, *next, *new_next;
231
232 for (;;) {
233 iter_prev = dummy;
234 /* We can always skip the dummy node initially */
235 iter = rcu_dereference(iter_prev->p.next);
236 assert(iter_prev->p.reverse_hash <= node->p.reverse_hash);
237 for (;;) {
238 if (unlikely(!clear_flag(iter)))
239 return;
240 if (clear_flag(iter)->p.reverse_hash > node->p.reverse_hash)
241 return;
242 next = rcu_dereference(clear_flag(iter)->p.next);
243 if (is_removed(next))
244 break;
245 iter_prev = iter;
246 iter = next;
247 }
248 assert(!is_removed(iter));
249 if (is_dummy(iter))
250 new_next = flag_dummy(clear_flag(next));
251 else
252 new_next = clear_flag(next);
253 (void) uatomic_cmpxchg(&iter_prev->p.next, iter, new_next);
254 }
255 }
256
257 static
258 struct rcu_ht_node *_ht_add(struct rcu_ht *ht, struct rcu_table *t,
259 struct rcu_ht_node *node, int unique, int dummy)
260 {
261 struct rcu_ht_node *iter_prev, *iter, *next, *new_node, *new_next,
262 *dummy_node;
263 unsigned long hash;
264
265 if (!t->size) {
266 assert(node->p.dummy);
267 assert(dummy);
268 node->p.next = flag_dummy(NULL);
269 return node; /* Initial first add (head) */
270 }
271 hash = bit_reverse_ulong(node->p.reverse_hash);
272 for (;;) {
273 uint32_t chain_len = 0;
274
275 /*
276 * iter_prev points to the non-removed node prior to the
277 * insert location.
278 */
279 iter_prev = rcu_dereference(t->tbl[hash & (t->size - 1)]);
280 /* We can always skip the dummy node initially */
281 iter = rcu_dereference(iter_prev->p.next);
282 assert(iter_prev->p.reverse_hash <= node->p.reverse_hash);
283 for (;;) {
284 if (unlikely(!clear_flag(iter)))
285 goto insert;
286 if (clear_flag(iter)->p.reverse_hash > node->p.reverse_hash)
287 goto insert;
288 next = rcu_dereference(clear_flag(iter)->p.next);
289 if (is_removed(next))
290 goto gc_node;
291 if (unique
292 && !clear_flag(iter)->p.dummy
293 && !ht->compare_fct(node->key, node->key_len,
294 clear_flag(iter)->key,
295 clear_flag(iter)->key_len))
296 return clear_flag(iter);
297 /* Only account for identical reverse hash once */
298 if (iter_prev->p.reverse_hash != clear_flag(iter)->p.reverse_hash)
299 check_resize(ht, t, ++chain_len);
300 iter_prev = clear_flag(iter);
301 iter = next;
302 }
303 insert:
304 assert(node != clear_flag(iter));
305 assert(!is_removed(iter_prev));
306 assert(iter_prev != node);
307 if (!dummy)
308 node->p.next = iter;
309 else
310 node->p.next = flag_dummy(iter);
311 if (is_dummy(iter))
312 new_node = flag_dummy(node);
313 else
314 new_node = node;
315 if (uatomic_cmpxchg(&iter_prev->p.next, iter,
316 new_node) != iter)
317 continue; /* retry */
318 else
319 goto gc_end;
320 gc_node:
321 assert(!is_removed(iter));
322 if (is_dummy(iter))
323 new_next = flag_dummy(clear_flag(next));
324 else
325 new_next = clear_flag(next);
326 (void) uatomic_cmpxchg(&iter_prev->p.next, iter, new_next);
327 /* retry */
328 }
329 gc_end:
330 /* Garbage collect logically removed nodes in the bucket */
331 dummy_node = rcu_dereference(t->tbl[hash & (t->size - 1)]);
332 _ht_gc_bucket(dummy_node, node);
333 return node;
334 }
335
336 static
337 int _ht_remove(struct rcu_ht *ht, struct rcu_table *t, struct rcu_ht_node *node)
338 {
339 struct rcu_ht_node *dummy, *next, *old;
340 int flagged = 0;
341 unsigned long hash;
342
343 /* logically delete the node */
344 old = rcu_dereference(node->p.next);
345 do {
346 next = old;
347 if (is_removed(next))
348 goto end;
349 assert(!node->p.dummy);
350 old = uatomic_cmpxchg(&node->p.next, next,
351 flag_removed(next));
352 } while (old != next);
353
354 /* We performed the (logical) deletion. */
355 flagged = 1;
356
357 /*
358 * Ensure that the node is not visible to readers anymore: lookup for
359 * the node, and remove it (along with any other logically removed node)
360 * if found.
361 */
362 hash = bit_reverse_ulong(node->p.reverse_hash);
363 dummy = rcu_dereference(t->tbl[hash & (t->size - 1)]);
364 _ht_gc_bucket(dummy, node);
365 end:
366 /*
367 * Only the flagging action indicated that we (and no other)
368 * removed the node from the hash.
369 */
370 if (flagged) {
371 assert(is_removed(rcu_dereference(node->p.next)));
372 return 0;
373 } else
374 return -ENOENT;
375 }
376
377 static
378 void init_table(struct rcu_ht *ht, struct rcu_table *t,
379 unsigned long first, unsigned long len)
380 {
381 unsigned long i, end;
382
383 end = first + len;
384 for (i = first; i < end; i++) {
385 /* Update table size when power of two */
386 if (i != 0 && !(i & (i - 1)))
387 t->size = i;
388 t->tbl[i] = calloc(1, sizeof(struct _rcu_ht_node));
389 t->tbl[i]->p.dummy = 1;
390 t->tbl[i]->p.reverse_hash = bit_reverse_ulong(i);
391 (void) _ht_add(ht, t, t->tbl[i], 0, 1);
392 }
393 t->resize_target = t->size = end;
394 t->resize_initiated = 0;
395 }
396
397 struct rcu_ht *ht_new(ht_hash_fct hash_fct,
398 ht_compare_fct compare_fct,
399 unsigned long hash_seed,
400 unsigned long init_size,
401 void (*ht_call_rcu)(struct rcu_head *head,
402 void (*func)(struct rcu_head *head)))
403 {
404 struct rcu_ht *ht;
405
406 ht = calloc(1, sizeof(struct rcu_ht));
407 ht->hash_fct = hash_fct;
408 ht->compare_fct = compare_fct;
409 ht->hash_seed = hash_seed;
410 ht->ht_call_rcu = ht_call_rcu;
411 ht->in_progress_resize = 0;
412 /* this mutex should not nest in read-side C.S. */
413 pthread_mutex_init(&ht->resize_mutex, NULL);
414 ht->t = calloc(1, sizeof(struct rcu_table)
415 + (max(init_size, 1) * sizeof(struct rcu_ht_node *)));
416 ht->t->size = 0;
417 pthread_mutex_lock(&ht->resize_mutex);
418 init_table(ht, ht->t, 0, max(init_size, 1));
419 pthread_mutex_unlock(&ht->resize_mutex);
420 return ht;
421 }
422
423 struct rcu_ht_node *ht_lookup(struct rcu_ht *ht, void *key, size_t key_len)
424 {
425 struct rcu_table *t;
426 struct rcu_ht_node *node;
427 unsigned long hash, reverse_hash;
428
429 hash = ht->hash_fct(key, key_len, ht->hash_seed);
430 reverse_hash = bit_reverse_ulong(hash);
431
432 t = rcu_dereference(ht->t);
433 node = rcu_dereference(t->tbl[hash & (t->size - 1)]);
434 for (;;) {
435 if (unlikely(!node))
436 break;
437 if (unlikely(node->p.reverse_hash > reverse_hash)) {
438 node = NULL;
439 break;
440 }
441 if (likely(!is_removed(rcu_dereference(node->p.next)))
442 && !node->p.dummy
443 && likely(!ht->compare_fct(node->key, node->key_len, key, key_len))) {
444 break;
445 }
446 node = clear_flag(rcu_dereference(node->p.next));
447 }
448 assert(!node || !node->p.dummy);
449 return node;
450 }
451
452 void ht_add(struct rcu_ht *ht, struct rcu_ht_node *node)
453 {
454 struct rcu_table *t;
455 unsigned long hash;
456
457 hash = ht->hash_fct(node->key, node->key_len, ht->hash_seed);
458 node->p.reverse_hash = bit_reverse_ulong((unsigned long) hash);
459
460 t = rcu_dereference(ht->t);
461 (void) _ht_add(ht, t, node, 0, 0);
462 }
463
464 struct rcu_ht_node *ht_add_unique(struct rcu_ht *ht, struct rcu_ht_node *node)
465 {
466 struct rcu_table *t;
467 unsigned long hash;
468
469 hash = ht->hash_fct(node->key, node->key_len, ht->hash_seed);
470 node->p.reverse_hash = bit_reverse_ulong((unsigned long) hash);
471
472 t = rcu_dereference(ht->t);
473 return _ht_add(ht, t, node, 1, 0);
474 }
475
476 int ht_remove(struct rcu_ht *ht, struct rcu_ht_node *node)
477 {
478 struct rcu_table *t;
479
480 t = rcu_dereference(ht->t);
481 return _ht_remove(ht, t, node);
482 }
483
484 static
485 int ht_delete_dummy(struct rcu_ht *ht)
486 {
487 struct rcu_table *t;
488 struct rcu_ht_node *node;
489 unsigned long i;
490
491 t = ht->t;
492 /* Check that the table is empty */
493 node = t->tbl[0];
494 do {
495 if (!node->p.dummy)
496 return -EPERM;
497 node = node->p.next;
498 assert(!is_removed(node));
499 } while (clear_flag(node));
500 /* Internal sanity check: all nodes left should be dummy */
501 for (i = 0; i < t->size; i++) {
502 assert(t->tbl[i]->p.dummy);
503 free(t->tbl[i]);
504 }
505 return 0;
506 }
507
508 /*
509 * Should only be called when no more concurrent readers nor writers can
510 * possibly access the table.
511 */
512 int ht_destroy(struct rcu_ht *ht)
513 {
514 int ret;
515
516 /* Wait for in-flight resize operations to complete */
517 while (uatomic_read(&ht->in_progress_resize))
518 poll(NULL, 0, 100); /* wait for 100ms */
519 ret = ht_delete_dummy(ht);
520 if (ret)
521 return ret;
522 free(ht->t);
523 free(ht);
524 return ret;
525 }
526
527 void ht_count_nodes(struct rcu_ht *ht,
528 unsigned long *count,
529 unsigned long *removed)
530 {
531 struct rcu_table *t;
532 struct rcu_ht_node *node, *next;
533
534 *count = 0;
535 *removed = 0;
536
537 t = rcu_dereference(ht->t);
538 /* Check that the table is empty */
539 node = rcu_dereference(t->tbl[0]);
540 do {
541 next = rcu_dereference(node->p.next);
542 if (is_removed(next)) {
543 assert(!node->p.dummy);
544 (*removed)++;
545 } else if (!node->p.dummy)
546 (*count)++;
547 node = clear_flag(next);
548 } while (node);
549 }
550
551 static
552 void ht_free_table_cb(struct rcu_head *head)
553 {
554 struct rcu_table *t =
555 caa_container_of(head, struct rcu_table, head);
556 free(t);
557 }
558
559 /* called with resize mutex held */
560 static
561 void _do_ht_resize(struct rcu_ht *ht)
562 {
563 unsigned long new_size, old_size;
564 struct rcu_table *new_t, *old_t;
565
566 old_t = ht->t;
567 old_size = old_t->size;
568
569 new_size = CMM_LOAD_SHARED(old_t->resize_target);
570 dbg_printf("rculfhash: resize from %lu to %lu buckets\n",
571 old_size, new_size);
572 if (old_size == new_size)
573 return;
574 new_t = malloc(sizeof(struct rcu_table)
575 + (new_size * sizeof(struct rcu_ht_node *)));
576 assert(new_size > old_size);
577 memcpy(&new_t->tbl, &old_t->tbl,
578 old_size * sizeof(struct rcu_ht_node *));
579 init_table(ht, new_t, old_size, new_size - old_size);
580 /* Changing table and size atomically wrt lookups */
581 rcu_assign_pointer(ht->t, new_t);
582 ht->ht_call_rcu(&old_t->head, ht_free_table_cb);
583 }
584
585 static
586 unsigned long resize_target_update(struct rcu_table *t,
587 int growth_order)
588 {
589 return _uatomic_max(&t->resize_target,
590 t->size << growth_order);
591 }
592
593 void ht_resize(struct rcu_ht *ht, int growth)
594 {
595 struct rcu_table *t = rcu_dereference(ht->t);
596 unsigned long target_size;
597
598 target_size = resize_target_update(t, growth);
599 if (t->size < target_size) {
600 CMM_STORE_SHARED(t->resize_initiated, 1);
601 pthread_mutex_lock(&ht->resize_mutex);
602 _do_ht_resize(ht);
603 pthread_mutex_unlock(&ht->resize_mutex);
604 }
605 }
606
607 static
608 void do_resize_cb(struct rcu_head *head)
609 {
610 struct rcu_resize_work *work =
611 caa_container_of(head, struct rcu_resize_work, head);
612 struct rcu_ht *ht = work->ht;
613
614 pthread_mutex_lock(&ht->resize_mutex);
615 _do_ht_resize(ht);
616 pthread_mutex_unlock(&ht->resize_mutex);
617 free(work);
618 cmm_smp_mb(); /* finish resize before decrement */
619 uatomic_dec(&ht->in_progress_resize);
620 }
621
622 static
623 void ht_resize_lazy(struct rcu_ht *ht, struct rcu_table *t, int growth)
624 {
625 struct rcu_resize_work *work;
626 unsigned long target_size;
627
628 target_size = resize_target_update(t, growth);
629 if (!CMM_LOAD_SHARED(t->resize_initiated) && t->size < target_size) {
630 uatomic_inc(&ht->in_progress_resize);
631 cmm_smp_mb(); /* increment resize count before calling it */
632 work = malloc(sizeof(*work));
633 work->ht = ht;
634 ht->ht_call_rcu(&work->head, do_resize_cb);
635 CMM_STORE_SHARED(t->resize_initiated, 1);
636 }
637 }
This page took 0.054431 seconds and 5 git commands to generate.