rculfhash test: make teardown more verbose and faster
[urcu.git] / rculfhash.c
1 /*
2 * rculfhash.c
3 *
4 * Userspace RCU library - Lock-Free Expandable RCU Hash Table
5 *
6 * Copyright 2010-2011 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23 #define _LGPL_SOURCE
24 #include <stdlib.h>
25 #include <errno.h>
26 #include <assert.h>
27 #include <stdio.h>
28 #include <stdint.h>
29 #include <string.h>
30
31 #include <urcu.h>
32 #include <urcu-call-rcu.h>
33 #include <urcu/arch.h>
34 #include <urcu/uatomic.h>
35 #include <urcu/jhash.h>
36 #include <urcu/compiler.h>
37 #include <urcu/rculfhash.h>
38 #include <stdio.h>
39 #include <pthread.h>
40
41 //#define DEBUG /* Test */
42
43 #ifdef DEBUG
44 #define dbg_printf(args...) printf(args)
45 #else
46 #define dbg_printf(args...)
47 #endif
48
49 #define CHAIN_LEN_TARGET 4
50 #define CHAIN_LEN_RESIZE_THRESHOLD 8
51
52 #ifndef max
53 #define max(a, b) ((a) > (b) ? (a) : (b))
54 #endif
55
56 /*
57 * The removed flag needs to be updated atomically with the pointer.
58 * The dummy flag does not require to be updated atomically with the
59 * pointer, but it is added as a pointer low bit flag to save space.
60 */
61 #define REMOVED_FLAG (1UL << 0)
62 #define DUMMY_FLAG (1UL << 1)
63 #define FLAGS_MASK ((1UL << 2) - 1)
64
65 struct rcu_table {
66 unsigned long size; /* always a power of 2 */
67 unsigned long resize_target;
68 int resize_initiated;
69 struct rcu_head head;
70 struct _rcu_ht_node *tbl[0];
71 };
72
73 struct rcu_ht {
74 struct rcu_table *t; /* shared */
75 ht_hash_fct hash_fct;
76 ht_compare_fct compare_fct;
77 unsigned long hash_seed;
78 pthread_mutex_t resize_mutex; /* resize mutex: add/del mutex */
79 unsigned int in_progress_resize, in_progress_destroy;
80 void (*ht_call_rcu)(struct rcu_head *head,
81 void (*func)(struct rcu_head *head));
82 };
83
84 struct rcu_resize_work {
85 struct rcu_head head;
86 struct rcu_ht *ht;
87 };
88
89 /*
90 * Algorithm to reverse bits in a word by lookup table, extended to
91 * 64-bit words.
92 * Source:
93 * http://graphics.stanford.edu/~seander/bithacks.html#BitReverseTable
94 * Originally from Public Domain.
95 */
96
97 static const uint8_t BitReverseTable256[256] =
98 {
99 #define R2(n) (n), (n) + 2*64, (n) + 1*64, (n) + 3*64
100 #define R4(n) R2(n), R2((n) + 2*16), R2((n) + 1*16), R2((n) + 3*16)
101 #define R6(n) R4(n), R4((n) + 2*4 ), R4((n) + 1*4 ), R4((n) + 3*4 )
102 R6(0), R6(2), R6(1), R6(3)
103 };
104 #undef R2
105 #undef R4
106 #undef R6
107
108 static
109 uint8_t bit_reverse_u8(uint8_t v)
110 {
111 return BitReverseTable256[v];
112 }
113
114 static __attribute__((unused))
115 uint32_t bit_reverse_u32(uint32_t v)
116 {
117 return ((uint32_t) bit_reverse_u8(v) << 24) |
118 ((uint32_t) bit_reverse_u8(v >> 8) << 16) |
119 ((uint32_t) bit_reverse_u8(v >> 16) << 8) |
120 ((uint32_t) bit_reverse_u8(v >> 24));
121 }
122
123 static __attribute__((unused))
124 uint64_t bit_reverse_u64(uint64_t v)
125 {
126 return ((uint64_t) bit_reverse_u8(v) << 56) |
127 ((uint64_t) bit_reverse_u8(v >> 8) << 48) |
128 ((uint64_t) bit_reverse_u8(v >> 16) << 40) |
129 ((uint64_t) bit_reverse_u8(v >> 24) << 32) |
130 ((uint64_t) bit_reverse_u8(v >> 32) << 24) |
131 ((uint64_t) bit_reverse_u8(v >> 40) << 16) |
132 ((uint64_t) bit_reverse_u8(v >> 48) << 8) |
133 ((uint64_t) bit_reverse_u8(v >> 56));
134 }
135
136 static
137 unsigned long bit_reverse_ulong(unsigned long v)
138 {
139 #if (CAA_BITS_PER_LONG == 32)
140 return bit_reverse_u32(v);
141 #else
142 return bit_reverse_u64(v);
143 #endif
144 }
145
146 /*
147 * fls: returns the position of the most significant bit.
148 * Returns 0 if no bit is set, else returns the position of the most
149 * significant bit (from 1 to 32 on 32-bit, from 1 to 64 on 64-bit).
150 */
151 #if defined(__i386) || defined(__x86_64)
152 static inline
153 unsigned int fls_u32(uint32_t x)
154 {
155 int r;
156
157 asm("bsrl %1,%0\n\t"
158 "jnz 1f\n\t"
159 "movl $-1,%0\n\t"
160 "1:\n\t"
161 : "=r" (r) : "rm" (x));
162 return r + 1;
163 }
164 #define HAS_FLS_U32
165 #endif
166
167 #if defined(__x86_64)
168 static inline
169 unsigned int fls_u64(uint64_t x)
170 {
171 long r;
172
173 asm("bsrq %1,%0\n\t"
174 "jnz 1f\n\t"
175 "movq $-1,%0\n\t"
176 "1:\n\t"
177 : "=r" (r) : "rm" (x));
178 return r + 1;
179 }
180 #define HAS_FLS_U64
181 #endif
182
183 #ifndef HAS_FLS_U64
184 static __attribute__((unused))
185 unsigned int fls_u64(uint64_t x)
186 {
187 unsigned int r = 64;
188
189 if (!x)
190 return 0;
191
192 if (!(x & 0xFFFFFFFF00000000ULL)) {
193 x <<= 32;
194 r -= 32;
195 }
196 if (!(x & 0xFFFF000000000000ULL)) {
197 x <<= 16;
198 r -= 16;
199 }
200 if (!(x & 0xFF00000000000000ULL)) {
201 x <<= 8;
202 r -= 8;
203 }
204 if (!(x & 0xF000000000000000ULL)) {
205 x <<= 4;
206 r -= 4;
207 }
208 if (!(x & 0xC000000000000000ULL)) {
209 x <<= 2;
210 r -= 2;
211 }
212 if (!(x & 0x8000000000000000ULL)) {
213 x <<= 1;
214 r -= 1;
215 }
216 return r;
217 }
218 #endif
219
220 #ifndef HAS_FLS_U32
221 static __attribute__((unused))
222 unsigned int fls_u32(uint32_t x)
223 {
224 unsigned int r = 32;
225
226 if (!x)
227 return 0;
228 if (!(x & 0xFFFF0000U)) {
229 x <<= 16;
230 r -= 16;
231 }
232 if (!(x & 0xFF000000U)) {
233 x <<= 8;
234 r -= 8;
235 }
236 if (!(x & 0xF0000000U)) {
237 x <<= 4;
238 r -= 4;
239 }
240 if (!(x & 0xC0000000U)) {
241 x <<= 2;
242 r -= 2;
243 }
244 if (!(x & 0x80000000U)) {
245 x <<= 1;
246 r -= 1;
247 }
248 return r;
249 }
250 #endif
251
252 unsigned int fls_ulong(unsigned long x)
253 {
254 #if (CAA_BITS_PER_lONG == 32)
255 return fls_u32(x);
256 #else
257 return fls_u64(x);
258 #endif
259 }
260
261 int get_count_order_u32(uint32_t x)
262 {
263 int order;
264
265 order = fls_u32(x) - 1;
266 if (x & (x - 1))
267 order++;
268 return order;
269 }
270
271 int get_count_order_ulong(unsigned long x)
272 {
273 int order;
274
275 order = fls_ulong(x) - 1;
276 if (x & (x - 1))
277 order++;
278 return order;
279 }
280
281 static
282 void ht_resize_lazy(struct rcu_ht *ht, struct rcu_table *t, int growth);
283
284 static
285 void check_resize(struct rcu_ht *ht, struct rcu_table *t,
286 uint32_t chain_len)
287 {
288 if (chain_len > 100)
289 dbg_printf("rculfhash: WARNING: large chain length: %u.\n",
290 chain_len);
291 if (chain_len >= CHAIN_LEN_RESIZE_THRESHOLD)
292 ht_resize_lazy(ht, t,
293 get_count_order_u32(chain_len - (CHAIN_LEN_TARGET - 1)));
294 }
295
296 static
297 struct rcu_ht_node *clear_flag(struct rcu_ht_node *node)
298 {
299 return (struct rcu_ht_node *) (((unsigned long) node) & ~FLAGS_MASK);
300 }
301
302 static
303 int is_removed(struct rcu_ht_node *node)
304 {
305 return ((unsigned long) node) & REMOVED_FLAG;
306 }
307
308 static
309 struct rcu_ht_node *flag_removed(struct rcu_ht_node *node)
310 {
311 return (struct rcu_ht_node *) (((unsigned long) node) | REMOVED_FLAG);
312 }
313
314 static
315 int is_dummy(struct rcu_ht_node *node)
316 {
317 return ((unsigned long) node) & DUMMY_FLAG;
318 }
319
320 static
321 struct rcu_ht_node *flag_dummy(struct rcu_ht_node *node)
322 {
323 return (struct rcu_ht_node *) (((unsigned long) node) | DUMMY_FLAG);
324 }
325
326 static
327 unsigned long _uatomic_max(unsigned long *ptr, unsigned long v)
328 {
329 unsigned long old1, old2;
330
331 old1 = uatomic_read(ptr);
332 do {
333 old2 = old1;
334 if (old2 >= v)
335 return old2;
336 } while ((old1 = uatomic_cmpxchg(ptr, old2, v)) != old2);
337 return v;
338 }
339
340 /*
341 * Remove all logically deleted nodes from a bucket up to a certain node key.
342 */
343 static
344 void _ht_gc_bucket(struct rcu_ht_node *dummy, struct rcu_ht_node *node)
345 {
346 struct rcu_ht_node *iter_prev, *iter, *next, *new_next;
347
348 for (;;) {
349 iter_prev = dummy;
350 /* We can always skip the dummy node initially */
351 iter = rcu_dereference(iter_prev->p.next);
352 assert(iter_prev->p.reverse_hash <= node->p.reverse_hash);
353 for (;;) {
354 if (unlikely(!clear_flag(iter)))
355 return;
356 if (clear_flag(iter)->p.reverse_hash > node->p.reverse_hash)
357 return;
358 next = rcu_dereference(clear_flag(iter)->p.next);
359 if (is_removed(next))
360 break;
361 iter_prev = clear_flag(iter);
362 iter = next;
363 }
364 assert(!is_removed(iter));
365 if (is_dummy(iter))
366 new_next = flag_dummy(clear_flag(next));
367 else
368 new_next = clear_flag(next);
369 (void) uatomic_cmpxchg(&iter_prev->p.next, iter, new_next);
370 }
371 }
372
373 static
374 struct rcu_ht_node *_ht_add(struct rcu_ht *ht, struct rcu_table *t,
375 struct rcu_ht_node *node, int unique, int dummy)
376 {
377 struct rcu_ht_node *iter_prev, *iter, *next, *new_node, *new_next,
378 *dummy_node;
379 struct _rcu_ht_node *lookup;
380 unsigned long hash, index, order;
381
382 if (!t->size) {
383 assert(dummy);
384 node->p.next = flag_dummy(NULL);
385 return node; /* Initial first add (head) */
386 }
387 hash = bit_reverse_ulong(node->p.reverse_hash);
388 for (;;) {
389 uint32_t chain_len = 0;
390
391 /*
392 * iter_prev points to the non-removed node prior to the
393 * insert location.
394 */
395 index = hash & (t->size - 1);
396 order = get_count_order_ulong(index + 1);
397 lookup = &t->tbl[order][index & ((1UL << (order - 1)) - 1)];
398 iter_prev = (struct rcu_ht_node *) lookup;
399 /* We can always skip the dummy node initially */
400 iter = rcu_dereference(iter_prev->p.next);
401 assert(iter_prev->p.reverse_hash <= node->p.reverse_hash);
402 for (;;) {
403 if (unlikely(!clear_flag(iter)))
404 goto insert;
405 if (clear_flag(iter)->p.reverse_hash > node->p.reverse_hash)
406 goto insert;
407 next = rcu_dereference(clear_flag(iter)->p.next);
408 if (is_removed(next))
409 goto gc_node;
410 if (unique
411 && !is_dummy(next)
412 && !ht->compare_fct(node->key, node->key_len,
413 clear_flag(iter)->key,
414 clear_flag(iter)->key_len))
415 return clear_flag(iter);
416 /* Only account for identical reverse hash once */
417 if (iter_prev->p.reverse_hash != clear_flag(iter)->p.reverse_hash
418 && !is_dummy(next))
419 check_resize(ht, t, ++chain_len);
420 iter_prev = clear_flag(iter);
421 iter = next;
422 }
423 insert:
424 assert(node != clear_flag(iter));
425 assert(!is_removed(iter_prev));
426 assert(iter_prev != node);
427 if (!dummy)
428 node->p.next = clear_flag(iter);
429 else
430 node->p.next = flag_dummy(clear_flag(iter));
431 if (is_dummy(iter))
432 new_node = flag_dummy(node);
433 else
434 new_node = node;
435 if (uatomic_cmpxchg(&iter_prev->p.next, iter,
436 new_node) != iter)
437 continue; /* retry */
438 else
439 goto gc_end;
440 gc_node:
441 assert(!is_removed(iter));
442 if (is_dummy(iter))
443 new_next = flag_dummy(clear_flag(next));
444 else
445 new_next = clear_flag(next);
446 (void) uatomic_cmpxchg(&iter_prev->p.next, iter, new_next);
447 /* retry */
448 }
449 gc_end:
450 /* Garbage collect logically removed nodes in the bucket */
451 index = hash & (t->size - 1);
452 order = get_count_order_ulong(index + 1);
453 lookup = &t->tbl[order][index & ((1UL << (order - 1)) - 1)];
454 dummy_node = (struct rcu_ht_node *) lookup;
455 _ht_gc_bucket(dummy_node, node);
456 return node;
457 }
458
459 static
460 int _ht_remove(struct rcu_ht *ht, struct rcu_table *t, struct rcu_ht_node *node)
461 {
462 struct rcu_ht_node *dummy, *next, *old;
463 struct _rcu_ht_node *lookup;
464 int flagged = 0;
465 unsigned long hash, index, order;
466
467 /* logically delete the node */
468 old = rcu_dereference(node->p.next);
469 do {
470 next = old;
471 if (is_removed(next))
472 goto end;
473 assert(!is_dummy(next));
474 old = uatomic_cmpxchg(&node->p.next, next,
475 flag_removed(next));
476 } while (old != next);
477
478 /* We performed the (logical) deletion. */
479 flagged = 1;
480
481 /*
482 * Ensure that the node is not visible to readers anymore: lookup for
483 * the node, and remove it (along with any other logically removed node)
484 * if found.
485 */
486 hash = bit_reverse_ulong(node->p.reverse_hash);
487 index = hash & (t->size - 1);
488 order = get_count_order_ulong(index + 1);
489 lookup = &t->tbl[order][index & ((1UL << (order - 1)) - 1)];
490 dummy = (struct rcu_ht_node *) lookup;
491 _ht_gc_bucket(dummy, node);
492 end:
493 /*
494 * Only the flagging action indicated that we (and no other)
495 * removed the node from the hash.
496 */
497 if (flagged) {
498 assert(is_removed(rcu_dereference(node->p.next)));
499 return 0;
500 } else
501 return -ENOENT;
502 }
503
504 static
505 void init_table(struct rcu_ht *ht, struct rcu_table *t,
506 unsigned long first_order, unsigned long len_order)
507 {
508 unsigned long i, end_order;
509
510 dbg_printf("rculfhash: init table: first_order %lu end_order %lu\n",
511 first_order, first_order + len_order);
512 end_order = first_order + len_order;
513 t->size = !first_order ? 0 : (1UL << (first_order - 1));
514 for (i = first_order; i < end_order; i++) {
515 unsigned long j, len;
516
517 len = !i ? 1 : 1UL << (i - 1);
518 dbg_printf("rculfhash: init order %lu len: %lu\n", i, len);
519 t->tbl[i] = calloc(len, sizeof(struct _rcu_ht_node));
520 for (j = 0; j < len; j++) {
521 dbg_printf("rculfhash: init entry: i %lu j %lu hash %lu\n",
522 i, j, !i ? 0 : (1UL << (i - 1)) + j);
523 struct rcu_ht_node *new_node =
524 (struct rcu_ht_node *) &t->tbl[i][j];
525 new_node->p.reverse_hash =
526 bit_reverse_ulong(!i ? 0 : (1UL << (i - 1)) + j);
527 (void) _ht_add(ht, t, new_node, 0, 1);
528 if (CMM_LOAD_SHARED(ht->in_progress_destroy))
529 break;
530 }
531 /* Update table size */
532 t->size = !i ? 1 : (1UL << i);
533 dbg_printf("rculfhash: init new size: %lu\n", t->size);
534 if (CMM_LOAD_SHARED(ht->in_progress_destroy))
535 break;
536 }
537 t->resize_target = t->size;
538 t->resize_initiated = 0;
539 }
540
541 struct rcu_ht *ht_new(ht_hash_fct hash_fct,
542 ht_compare_fct compare_fct,
543 unsigned long hash_seed,
544 unsigned long init_size,
545 void (*ht_call_rcu)(struct rcu_head *head,
546 void (*func)(struct rcu_head *head)))
547 {
548 struct rcu_ht *ht;
549 unsigned long order;
550
551 ht = calloc(1, sizeof(struct rcu_ht));
552 ht->hash_fct = hash_fct;
553 ht->compare_fct = compare_fct;
554 ht->hash_seed = hash_seed;
555 ht->ht_call_rcu = ht_call_rcu;
556 ht->in_progress_resize = 0;
557 /* this mutex should not nest in read-side C.S. */
558 pthread_mutex_init(&ht->resize_mutex, NULL);
559 order = get_count_order_ulong(max(init_size, 1)) + 1;
560 ht->t = calloc(1, sizeof(struct rcu_table)
561 + (order * sizeof(struct _rcu_ht_node *)));
562 ht->t->size = 0;
563 pthread_mutex_lock(&ht->resize_mutex);
564 init_table(ht, ht->t, 0, order);
565 pthread_mutex_unlock(&ht->resize_mutex);
566 return ht;
567 }
568
569 struct rcu_ht_node *ht_lookup(struct rcu_ht *ht, void *key, size_t key_len)
570 {
571 struct rcu_table *t;
572 struct rcu_ht_node *node, *next;
573 struct _rcu_ht_node *lookup;
574 unsigned long hash, reverse_hash, index, order;
575
576 hash = ht->hash_fct(key, key_len, ht->hash_seed);
577 reverse_hash = bit_reverse_ulong(hash);
578
579 t = rcu_dereference(ht->t);
580 index = hash & (t->size - 1);
581 order = get_count_order_ulong(index + 1);
582 lookup = &t->tbl[order][index & ((1UL << (order - 1)) - 1)];
583 dbg_printf("rculfhash: lookup hash %lu index %lu order %lu aridx %lu\n",
584 hash, index, order, index & ((1UL << (order - 1)) - 1));
585 node = (struct rcu_ht_node *) lookup;
586 for (;;) {
587 if (unlikely(!node))
588 break;
589 if (unlikely(node->p.reverse_hash > reverse_hash)) {
590 node = NULL;
591 break;
592 }
593 next = rcu_dereference(node->p.next);
594 if (likely(!is_removed(next))
595 && !is_dummy(next)
596 && likely(!ht->compare_fct(node->key, node->key_len, key, key_len))) {
597 break;
598 }
599 node = clear_flag(next);
600 }
601 assert(!node || !is_dummy(rcu_dereference(node->p.next)));
602 return node;
603 }
604
605 void ht_add(struct rcu_ht *ht, struct rcu_ht_node *node)
606 {
607 struct rcu_table *t;
608 unsigned long hash;
609
610 hash = ht->hash_fct(node->key, node->key_len, ht->hash_seed);
611 node->p.reverse_hash = bit_reverse_ulong((unsigned long) hash);
612
613 t = rcu_dereference(ht->t);
614 (void) _ht_add(ht, t, node, 0, 0);
615 }
616
617 struct rcu_ht_node *ht_add_unique(struct rcu_ht *ht, struct rcu_ht_node *node)
618 {
619 struct rcu_table *t;
620 unsigned long hash;
621
622 hash = ht->hash_fct(node->key, node->key_len, ht->hash_seed);
623 node->p.reverse_hash = bit_reverse_ulong((unsigned long) hash);
624
625 t = rcu_dereference(ht->t);
626 return _ht_add(ht, t, node, 1, 0);
627 }
628
629 int ht_remove(struct rcu_ht *ht, struct rcu_ht_node *node)
630 {
631 struct rcu_table *t;
632
633 t = rcu_dereference(ht->t);
634 return _ht_remove(ht, t, node);
635 }
636
637 static
638 int ht_delete_dummy(struct rcu_ht *ht)
639 {
640 struct rcu_table *t;
641 struct rcu_ht_node *node;
642 struct _rcu_ht_node *lookup;
643 unsigned long order, i;
644
645 t = ht->t;
646 /* Check that the table is empty */
647 lookup = &t->tbl[0][0];
648 node = (struct rcu_ht_node *) lookup;
649 do {
650 node = clear_flag(node)->p.next;
651 if (!is_dummy(node))
652 return -EPERM;
653 assert(!is_removed(node));
654 } while (clear_flag(node));
655 /* Internal sanity check: all nodes left should be dummy */
656 for (order = 0; order < get_count_order_ulong(t->size) + 1; order++) {
657 unsigned long len;
658
659 len = !order ? 1 : 1UL << (order - 1);
660 for (i = 0; i < len; i++) {
661 dbg_printf("rculfhash: delete order %lu i %lu hash %lu\n",
662 order, i,
663 bit_reverse_ulong(t->tbl[order][i].reverse_hash));
664 assert(is_dummy(t->tbl[order][i].next));
665 }
666 free(t->tbl[order]);
667 }
668 return 0;
669 }
670
671 /*
672 * Should only be called when no more concurrent readers nor writers can
673 * possibly access the table.
674 */
675 int ht_destroy(struct rcu_ht *ht)
676 {
677 int ret;
678
679 /* Wait for in-flight resize operations to complete */
680 CMM_STORE_SHARED(ht->in_progress_destroy, 1);
681 while (uatomic_read(&ht->in_progress_resize))
682 poll(NULL, 0, 100); /* wait for 100ms */
683 ret = ht_delete_dummy(ht);
684 if (ret)
685 return ret;
686 free(ht->t);
687 free(ht);
688 return ret;
689 }
690
691 void ht_count_nodes(struct rcu_ht *ht,
692 unsigned long *count,
693 unsigned long *removed)
694 {
695 struct rcu_table *t;
696 struct rcu_ht_node *node, *next;
697 struct _rcu_ht_node *lookup;
698 unsigned long nr_dummy = 0;
699
700 *count = 0;
701 *removed = 0;
702
703 t = rcu_dereference(ht->t);
704 /* Count non-dummy nodes in the table */
705 lookup = &t->tbl[0][0];
706 node = (struct rcu_ht_node *) lookup;
707 do {
708 next = rcu_dereference(node->p.next);
709 if (is_removed(next)) {
710 assert(!is_dummy(next));
711 (*removed)++;
712 } else if (!is_dummy(next))
713 (*count)++;
714 else
715 (nr_dummy)++;
716 node = clear_flag(next);
717 } while (node);
718 dbg_printf("rculfhash: number of dummy nodes: %lu\n", nr_dummy);
719 }
720
721 static
722 void ht_free_table_cb(struct rcu_head *head)
723 {
724 struct rcu_table *t =
725 caa_container_of(head, struct rcu_table, head);
726 free(t);
727 }
728
729 /* called with resize mutex held */
730 static
731 void _do_ht_resize(struct rcu_ht *ht)
732 {
733 unsigned long new_size, old_size, old_order, new_order;
734 struct rcu_table *new_t, *old_t;
735
736 old_t = ht->t;
737 old_size = old_t->size;
738 old_order = get_count_order_ulong(old_size) + 1;
739
740 new_size = CMM_LOAD_SHARED(old_t->resize_target);
741 if (old_size == new_size)
742 return;
743 new_order = get_count_order_ulong(new_size) + 1;
744 printf("rculfhash: resize from %lu (order %lu) to %lu (order %lu) buckets\n",
745 old_size, old_order, new_size, new_order);
746 new_t = malloc(sizeof(struct rcu_table)
747 + (new_order * sizeof(struct _rcu_ht_node *)));
748 assert(new_size > old_size);
749 memcpy(&new_t->tbl, &old_t->tbl,
750 old_order * sizeof(struct _rcu_ht_node *));
751 init_table(ht, new_t, old_order, new_order - old_order);
752 /* Changing table and size atomically wrt lookups */
753 rcu_assign_pointer(ht->t, new_t);
754 ht->ht_call_rcu(&old_t->head, ht_free_table_cb);
755 }
756
757 static
758 unsigned long resize_target_update(struct rcu_table *t,
759 int growth_order)
760 {
761 return _uatomic_max(&t->resize_target,
762 t->size << growth_order);
763 }
764
765 void ht_resize(struct rcu_ht *ht, int growth)
766 {
767 struct rcu_table *t = rcu_dereference(ht->t);
768 unsigned long target_size;
769
770 target_size = resize_target_update(t, growth);
771 if (t->size < target_size) {
772 CMM_STORE_SHARED(t->resize_initiated, 1);
773 pthread_mutex_lock(&ht->resize_mutex);
774 _do_ht_resize(ht);
775 pthread_mutex_unlock(&ht->resize_mutex);
776 }
777 }
778
779 static
780 void do_resize_cb(struct rcu_head *head)
781 {
782 struct rcu_resize_work *work =
783 caa_container_of(head, struct rcu_resize_work, head);
784 struct rcu_ht *ht = work->ht;
785
786 pthread_mutex_lock(&ht->resize_mutex);
787 _do_ht_resize(ht);
788 pthread_mutex_unlock(&ht->resize_mutex);
789 free(work);
790 cmm_smp_mb(); /* finish resize before decrement */
791 uatomic_dec(&ht->in_progress_resize);
792 }
793
794 static
795 void ht_resize_lazy(struct rcu_ht *ht, struct rcu_table *t, int growth)
796 {
797 struct rcu_resize_work *work;
798 unsigned long target_size;
799
800 target_size = resize_target_update(t, growth);
801 if (!CMM_LOAD_SHARED(t->resize_initiated) && t->size < target_size) {
802 uatomic_inc(&ht->in_progress_resize);
803 cmm_smp_mb(); /* increment resize count before calling it */
804 work = malloc(sizeof(*work));
805 work->ht = ht;
806 ht->ht_call_rcu(&work->head, do_resize_cb);
807 CMM_STORE_SHARED(t->resize_initiated, 1);
808 }
809 }
This page took 0.044893 seconds and 5 git commands to generate.