Cleanup order semantic
[urcu.git] / rculfhash.c
1 /*
2 * rculfhash.c
3 *
4 * Userspace RCU library - Lock-Free Resizable RCU Hash Table
5 *
6 * Copyright 2010-2011 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23 /*
24 * Based on the following articles:
25 * - Ori Shalev and Nir Shavit. Split-ordered lists: Lock-free
26 * extensible hash tables. J. ACM 53, 3 (May 2006), 379-405.
27 * - Michael, M. M. High performance dynamic lock-free hash tables
28 * and list-based sets. In Proceedings of the fourteenth annual ACM
29 * symposium on Parallel algorithms and architectures, ACM Press,
30 * (2002), 73-82.
31 *
32 * Some specificities of this Lock-Free Resizable RCU Hash Table
33 * implementation:
34 *
35 * - RCU read-side critical section allows readers to perform hash
36 * table lookups and use the returned objects safely by delaying
37 * memory reclaim of a grace period.
38 * - Add and remove operations are lock-free, and do not need to
39 * allocate memory. They need to be executed within RCU read-side
40 * critical section to ensure the objects they read are valid and to
41 * deal with the cmpxchg ABA problem.
42 * - add and add_unique operations are supported. add_unique checks if
43 * the node key already exists in the hash table. It ensures no key
44 * duplicata exists.
45 * - The resize operation executes concurrently with add/remove/lookup.
46 * - Hash table nodes are contained within a split-ordered list. This
47 * list is ordered by incrementing reversed-bits-hash value.
48 * - An index of dummy nodes is kept. These dummy nodes are the hash
49 * table "buckets", and they are also chained together in the
50 * split-ordered list, which allows recursive expansion.
51 * - The resize operation for small tables only allows expanding the hash table.
52 * It is triggered automatically by detecting long chains in the add
53 * operation.
54 * - The resize operation for larger tables (and available through an
55 * API) allows both expanding and shrinking the hash table.
56 * - Per-CPU Split-counters are used to keep track of the number of
57 * nodes within the hash table for automatic resize triggering.
58 * - Resize operation initiated by long chain detection is executed by a
59 * call_rcu thread, which keeps lock-freedom of add and remove.
60 * - Resize operations are protected by a mutex.
61 * - The removal operation is split in two parts: first, a "removed"
62 * flag is set in the next pointer within the node to remove. Then,
63 * a "garbage collection" is performed in the bucket containing the
64 * removed node (from the start of the bucket up to the removed node).
65 * All encountered nodes with "removed" flag set in their next
66 * pointers are removed from the linked-list. If the cmpxchg used for
67 * removal fails (due to concurrent garbage-collection or concurrent
68 * add), we retry from the beginning of the bucket. This ensures that
69 * the node with "removed" flag set is removed from the hash table
70 * (not visible to lookups anymore) before the RCU read-side critical
71 * section held across removal ends. Furthermore, this ensures that
72 * the node with "removed" flag set is removed from the linked-list
73 * before its memory is reclaimed. Only the thread which removal
74 * successfully set the "removed" flag (with a cmpxchg) into a node's
75 * next pointer is considered to have succeeded its removal (and thus
76 * owns the node to reclaim). Because we garbage-collect starting from
77 * an invariant node (the start-of-bucket dummy node) up to the
78 * "removed" node (or find a reverse-hash that is higher), we are sure
79 * that a successful traversal of the chain leads to a chain that is
80 * present in the linked-list (the start node is never removed) and
81 * that is does not contain the "removed" node anymore, even if
82 * concurrent delete/add operations are changing the structure of the
83 * list concurrently.
84 * - The add operation performs gargage collection of buckets if it
85 * encounters nodes with removed flag set in the bucket where it wants
86 * to add its new node. This ensures lock-freedom of add operation by
87 * helping the remover unlink nodes from the list rather than to wait
88 * for it do to so.
89 * - A RCU "order table" indexed by log2(hash index) is copied and
90 * expanded by the resize operation. This order table allows finding
91 * the "dummy node" tables.
92 * - There is one dummy node table per hash index order. The size of
93 * each dummy node table is half the number of hashes contained in
94 * this order (except for order 0).
95 * - synchronzie_rcu is used to garbage-collect the old dummy node table.
96 * - The per-order dummy node tables contain a compact version of the
97 * hash table nodes. These tables are invariant after they are
98 * populated into the hash table.
99 *
100 * Dummy node tables:
101 *
102 * hash table hash table the last all dummy node tables
103 * order size dummy node 0 1 2 3 4 5 6(index)
104 * table size
105 * 0 1 1 1
106 * 1 2 1 1 1
107 * 2 4 2 1 1 2
108 * 3 8 4 1 1 2 4
109 * 4 16 8 1 1 2 4 8
110 * 5 32 16 1 1 2 4 8 16
111 * 6 64 32 1 1 2 4 8 16 32
112 *
113 * When growing/shrinking, we only focus on the last dummy node table
114 * which size is (!order ? 1 : (1 << (order -1))).
115 *
116 * Example for growing/shrinking:
117 * grow hash table from order 5 to 6: init the index=6 dummy node table
118 * shrink hash table from order 6 to 5: fini the index=6 dummy node table
119 *
120 * A bit of ascii art explanation:
121 *
122 * Order index is the off-by-one compare to the actual power of 2 because
123 * we use index 0 to deal with the 0 special-case.
124 *
125 * This shows the nodes for a small table ordered by reversed bits:
126 *
127 * bits reverse
128 * 0 000 000
129 * 4 100 001
130 * 2 010 010
131 * 6 110 011
132 * 1 001 100
133 * 5 101 101
134 * 3 011 110
135 * 7 111 111
136 *
137 * This shows the nodes in order of non-reversed bits, linked by
138 * reversed-bit order.
139 *
140 * order bits reverse
141 * 0 0 000 000
142 * 1 | 1 001 100 <-
143 * 2 | | 2 010 010 <- |
144 * | | | 3 011 110 | <- |
145 * 3 -> | | | 4 100 001 | |
146 * -> | | 5 101 101 |
147 * -> | 6 110 011
148 * -> 7 111 111
149 */
150
151 #define _LGPL_SOURCE
152 #include <stdlib.h>
153 #include <errno.h>
154 #include <assert.h>
155 #include <stdio.h>
156 #include <stdint.h>
157 #include <string.h>
158
159 #include "config.h"
160 #include <urcu.h>
161 #include <urcu-call-rcu.h>
162 #include <urcu/arch.h>
163 #include <urcu/uatomic.h>
164 #include <urcu/compiler.h>
165 #include <urcu/rculfhash.h>
166 #include <stdio.h>
167 #include <pthread.h>
168
169 #ifdef DEBUG
170 #define dbg_printf(fmt, args...) printf("[debug rculfhash] " fmt, ## args)
171 #else
172 #define dbg_printf(fmt, args...)
173 #endif
174
175 /*
176 * Per-CPU split-counters lazily update the global counter each 1024
177 * addition/removal. It automatically keeps track of resize required.
178 * We use the bucket length as indicator for need to expand for small
179 * tables and machines lacking per-cpu data suppport.
180 */
181 #define COUNT_COMMIT_ORDER 10
182 #define CHAIN_LEN_TARGET 1
183 #define CHAIN_LEN_RESIZE_THRESHOLD 3
184
185 /*
186 * Define the minimum table size.
187 */
188 #define MIN_TABLE_SIZE 1
189
190 #if (CAA_BITS_PER_LONG == 32)
191 #define MAX_TABLE_ORDER 32
192 #else
193 #define MAX_TABLE_ORDER 64
194 #endif
195
196 /*
197 * Minimum number of dummy nodes to touch per thread to parallelize grow/shrink.
198 */
199 #define MIN_PARTITION_PER_THREAD_ORDER 12
200 #define MIN_PARTITION_PER_THREAD (1UL << MIN_PARTITION_PER_THREAD_ORDER)
201
202 #ifndef min
203 #define min(a, b) ((a) < (b) ? (a) : (b))
204 #endif
205
206 #ifndef max
207 #define max(a, b) ((a) > (b) ? (a) : (b))
208 #endif
209
210 /*
211 * The removed flag needs to be updated atomically with the pointer.
212 * It indicates that no node must attach to the node scheduled for
213 * removal, and that node garbage collection must be performed.
214 * The dummy flag does not require to be updated atomically with the
215 * pointer, but it is added as a pointer low bit flag to save space.
216 */
217 #define REMOVED_FLAG (1UL << 0)
218 #define DUMMY_FLAG (1UL << 1)
219 #define FLAGS_MASK ((1UL << 2) - 1)
220
221 /* Value of the end pointer. Should not interact with flags. */
222 #define END_VALUE NULL
223
224 struct ht_items_count {
225 unsigned long add, del;
226 } __attribute__((aligned(CAA_CACHE_LINE_SIZE)));
227
228 struct rcu_level {
229 /* Note: manually update allocation length when adding a field */
230 struct _cds_lfht_node nodes[0];
231 };
232
233 struct rcu_table {
234 unsigned long size; /* always a power of 2, shared (RCU) */
235 unsigned long resize_target;
236 int resize_initiated;
237 struct rcu_level *tbl[MAX_TABLE_ORDER];
238 };
239
240 struct cds_lfht {
241 struct rcu_table t;
242 cds_lfht_hash_fct hash_fct;
243 cds_lfht_compare_fct compare_fct;
244 unsigned long hash_seed;
245 int flags;
246 /*
247 * We need to put the work threads offline (QSBR) when taking this
248 * mutex, because we use synchronize_rcu within this mutex critical
249 * section, which waits on read-side critical sections, and could
250 * therefore cause grace-period deadlock if we hold off RCU G.P.
251 * completion.
252 */
253 pthread_mutex_t resize_mutex; /* resize mutex: add/del mutex */
254 unsigned int in_progress_resize, in_progress_destroy;
255 void (*cds_lfht_call_rcu)(struct rcu_head *head,
256 void (*func)(struct rcu_head *head));
257 void (*cds_lfht_synchronize_rcu)(void);
258 void (*cds_lfht_rcu_read_lock)(void);
259 void (*cds_lfht_rcu_read_unlock)(void);
260 void (*cds_lfht_rcu_thread_offline)(void);
261 void (*cds_lfht_rcu_thread_online)(void);
262 void (*cds_lfht_rcu_register_thread)(void);
263 void (*cds_lfht_rcu_unregister_thread)(void);
264 pthread_attr_t *resize_attr; /* Resize threads attributes */
265 long count; /* global approximate item count */
266 struct ht_items_count *percpu_count; /* per-cpu item count */
267 };
268
269 struct rcu_resize_work {
270 struct rcu_head head;
271 struct cds_lfht *ht;
272 };
273
274 struct partition_resize_work {
275 pthread_t thread_id;
276 struct cds_lfht *ht;
277 unsigned long i, start, len;
278 void (*fct)(struct cds_lfht *ht, unsigned long i,
279 unsigned long start, unsigned long len);
280 };
281
282 static
283 void _cds_lfht_add(struct cds_lfht *ht,
284 unsigned long size,
285 struct cds_lfht_node *node,
286 struct cds_lfht_iter *unique_ret,
287 int dummy);
288
289 /*
290 * Algorithm to reverse bits in a word by lookup table, extended to
291 * 64-bit words.
292 * Source:
293 * http://graphics.stanford.edu/~seander/bithacks.html#BitReverseTable
294 * Originally from Public Domain.
295 */
296
297 static const uint8_t BitReverseTable256[256] =
298 {
299 #define R2(n) (n), (n) + 2*64, (n) + 1*64, (n) + 3*64
300 #define R4(n) R2(n), R2((n) + 2*16), R2((n) + 1*16), R2((n) + 3*16)
301 #define R6(n) R4(n), R4((n) + 2*4 ), R4((n) + 1*4 ), R4((n) + 3*4 )
302 R6(0), R6(2), R6(1), R6(3)
303 };
304 #undef R2
305 #undef R4
306 #undef R6
307
308 static
309 uint8_t bit_reverse_u8(uint8_t v)
310 {
311 return BitReverseTable256[v];
312 }
313
314 static __attribute__((unused))
315 uint32_t bit_reverse_u32(uint32_t v)
316 {
317 return ((uint32_t) bit_reverse_u8(v) << 24) |
318 ((uint32_t) bit_reverse_u8(v >> 8) << 16) |
319 ((uint32_t) bit_reverse_u8(v >> 16) << 8) |
320 ((uint32_t) bit_reverse_u8(v >> 24));
321 }
322
323 static __attribute__((unused))
324 uint64_t bit_reverse_u64(uint64_t v)
325 {
326 return ((uint64_t) bit_reverse_u8(v) << 56) |
327 ((uint64_t) bit_reverse_u8(v >> 8) << 48) |
328 ((uint64_t) bit_reverse_u8(v >> 16) << 40) |
329 ((uint64_t) bit_reverse_u8(v >> 24) << 32) |
330 ((uint64_t) bit_reverse_u8(v >> 32) << 24) |
331 ((uint64_t) bit_reverse_u8(v >> 40) << 16) |
332 ((uint64_t) bit_reverse_u8(v >> 48) << 8) |
333 ((uint64_t) bit_reverse_u8(v >> 56));
334 }
335
336 static
337 unsigned long bit_reverse_ulong(unsigned long v)
338 {
339 #if (CAA_BITS_PER_LONG == 32)
340 return bit_reverse_u32(v);
341 #else
342 return bit_reverse_u64(v);
343 #endif
344 }
345
346 /*
347 * fls: returns the position of the most significant bit.
348 * Returns 0 if no bit is set, else returns the position of the most
349 * significant bit (from 1 to 32 on 32-bit, from 1 to 64 on 64-bit).
350 */
351 #if defined(__i386) || defined(__x86_64)
352 static inline
353 unsigned int fls_u32(uint32_t x)
354 {
355 int r;
356
357 asm("bsrl %1,%0\n\t"
358 "jnz 1f\n\t"
359 "movl $-1,%0\n\t"
360 "1:\n\t"
361 : "=r" (r) : "rm" (x));
362 return r + 1;
363 }
364 #define HAS_FLS_U32
365 #endif
366
367 #if defined(__x86_64)
368 static inline
369 unsigned int fls_u64(uint64_t x)
370 {
371 long r;
372
373 asm("bsrq %1,%0\n\t"
374 "jnz 1f\n\t"
375 "movq $-1,%0\n\t"
376 "1:\n\t"
377 : "=r" (r) : "rm" (x));
378 return r + 1;
379 }
380 #define HAS_FLS_U64
381 #endif
382
383 #ifndef HAS_FLS_U64
384 static __attribute__((unused))
385 unsigned int fls_u64(uint64_t x)
386 {
387 unsigned int r = 64;
388
389 if (!x)
390 return 0;
391
392 if (!(x & 0xFFFFFFFF00000000ULL)) {
393 x <<= 32;
394 r -= 32;
395 }
396 if (!(x & 0xFFFF000000000000ULL)) {
397 x <<= 16;
398 r -= 16;
399 }
400 if (!(x & 0xFF00000000000000ULL)) {
401 x <<= 8;
402 r -= 8;
403 }
404 if (!(x & 0xF000000000000000ULL)) {
405 x <<= 4;
406 r -= 4;
407 }
408 if (!(x & 0xC000000000000000ULL)) {
409 x <<= 2;
410 r -= 2;
411 }
412 if (!(x & 0x8000000000000000ULL)) {
413 x <<= 1;
414 r -= 1;
415 }
416 return r;
417 }
418 #endif
419
420 #ifndef HAS_FLS_U32
421 static __attribute__((unused))
422 unsigned int fls_u32(uint32_t x)
423 {
424 unsigned int r = 32;
425
426 if (!x)
427 return 0;
428 if (!(x & 0xFFFF0000U)) {
429 x <<= 16;
430 r -= 16;
431 }
432 if (!(x & 0xFF000000U)) {
433 x <<= 8;
434 r -= 8;
435 }
436 if (!(x & 0xF0000000U)) {
437 x <<= 4;
438 r -= 4;
439 }
440 if (!(x & 0xC0000000U)) {
441 x <<= 2;
442 r -= 2;
443 }
444 if (!(x & 0x80000000U)) {
445 x <<= 1;
446 r -= 1;
447 }
448 return r;
449 }
450 #endif
451
452 unsigned int fls_ulong(unsigned long x)
453 {
454 #if (CAA_BITS_PER_lONG == 32)
455 return fls_u32(x);
456 #else
457 return fls_u64(x);
458 #endif
459 }
460
461 /*
462 * Return the minimum order for which x <= (1UL << order).
463 * Return -1 if x is 0.
464 */
465 int get_count_order_u32(uint32_t x)
466 {
467 if (!x)
468 return -1;
469
470 return fls_u32(x - 1);
471 }
472
473 /*
474 * Return the minimum order for which x <= (1UL << order).
475 * Return -1 if x is 0.
476 */
477 int get_count_order_ulong(unsigned long x)
478 {
479 if (!x)
480 return -1;
481
482 return fls_ulong(x - 1);
483 }
484
485 #ifdef POISON_FREE
486 #define poison_free(ptr) \
487 do { \
488 memset(ptr, 0x42, sizeof(*(ptr))); \
489 free(ptr); \
490 } while (0)
491 #else
492 #define poison_free(ptr) free(ptr)
493 #endif
494
495 static
496 void cds_lfht_resize_lazy(struct cds_lfht *ht, unsigned long size, int growth);
497
498 /*
499 * If the sched_getcpu() and sysconf(_SC_NPROCESSORS_CONF) calls are
500 * available, then we support hash table item accounting.
501 * In the unfortunate event the number of CPUs reported would be
502 * inaccurate, we use modulo arithmetic on the number of CPUs we got.
503 */
504 #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF)
505
506 static
507 void cds_lfht_resize_lazy_count(struct cds_lfht *ht, unsigned long size,
508 unsigned long count);
509
510 static long nr_cpus_mask = -1;
511
512 static
513 struct ht_items_count *alloc_per_cpu_items_count(void)
514 {
515 struct ht_items_count *count;
516
517 switch (nr_cpus_mask) {
518 case -2:
519 return NULL;
520 case -1:
521 {
522 long maxcpus;
523
524 maxcpus = sysconf(_SC_NPROCESSORS_CONF);
525 if (maxcpus <= 0) {
526 nr_cpus_mask = -2;
527 return NULL;
528 }
529 /*
530 * round up number of CPUs to next power of two, so we
531 * can use & for modulo.
532 */
533 maxcpus = 1UL << get_count_order_ulong(maxcpus);
534 nr_cpus_mask = maxcpus - 1;
535 }
536 /* Fall-through */
537 default:
538 return calloc(nr_cpus_mask + 1, sizeof(*count));
539 }
540 }
541
542 static
543 void free_per_cpu_items_count(struct ht_items_count *count)
544 {
545 poison_free(count);
546 }
547
548 static
549 int ht_get_cpu(void)
550 {
551 int cpu;
552
553 assert(nr_cpus_mask >= 0);
554 cpu = sched_getcpu();
555 if (unlikely(cpu < 0))
556 return cpu;
557 else
558 return cpu & nr_cpus_mask;
559 }
560
561 static
562 void ht_count_add(struct cds_lfht *ht, unsigned long size)
563 {
564 unsigned long percpu_count;
565 int cpu;
566
567 if (unlikely(!ht->percpu_count))
568 return;
569 cpu = ht_get_cpu();
570 if (unlikely(cpu < 0))
571 return;
572 percpu_count = uatomic_add_return(&ht->percpu_count[cpu].add, 1);
573 if (unlikely(!(percpu_count & ((1UL << COUNT_COMMIT_ORDER) - 1)))) {
574 long count;
575
576 dbg_printf("add percpu %lu\n", percpu_count);
577 count = uatomic_add_return(&ht->count,
578 1UL << COUNT_COMMIT_ORDER);
579 /* If power of 2 */
580 if (!(count & (count - 1))) {
581 if ((count >> CHAIN_LEN_RESIZE_THRESHOLD) < size)
582 return;
583 dbg_printf("add set global %ld\n", count);
584 cds_lfht_resize_lazy_count(ht, size,
585 count >> (CHAIN_LEN_TARGET - 1));
586 }
587 }
588 }
589
590 static
591 void ht_count_del(struct cds_lfht *ht, unsigned long size)
592 {
593 unsigned long percpu_count;
594 int cpu;
595
596 if (unlikely(!ht->percpu_count))
597 return;
598 cpu = ht_get_cpu();
599 if (unlikely(cpu < 0))
600 return;
601 percpu_count = uatomic_add_return(&ht->percpu_count[cpu].del, 1);
602 if (unlikely(!(percpu_count & ((1UL << COUNT_COMMIT_ORDER) - 1)))) {
603 long count;
604
605 dbg_printf("del percpu %lu\n", percpu_count);
606 count = uatomic_add_return(&ht->count,
607 -(1UL << COUNT_COMMIT_ORDER));
608 /* If power of 2 */
609 if (!(count & (count - 1))) {
610 if ((count >> CHAIN_LEN_RESIZE_THRESHOLD) >= size)
611 return;
612 dbg_printf("del set global %ld\n", count);
613 /*
614 * Don't shrink table if the number of nodes is below a
615 * certain threshold.
616 */
617 if (count < (1UL << COUNT_COMMIT_ORDER) * (nr_cpus_mask + 1))
618 return;
619 cds_lfht_resize_lazy_count(ht, size,
620 count >> (CHAIN_LEN_TARGET - 1));
621 }
622 }
623 }
624
625 #else /* #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF) */
626
627 static const long nr_cpus_mask = -2;
628
629 static
630 struct ht_items_count *alloc_per_cpu_items_count(void)
631 {
632 return NULL;
633 }
634
635 static
636 void free_per_cpu_items_count(struct ht_items_count *count)
637 {
638 }
639
640 static
641 void ht_count_add(struct cds_lfht *ht, unsigned long size)
642 {
643 }
644
645 static
646 void ht_count_del(struct cds_lfht *ht, unsigned long size)
647 {
648 }
649
650 #endif /* #else #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF) */
651
652
653 static
654 void check_resize(struct cds_lfht *ht, unsigned long size, uint32_t chain_len)
655 {
656 unsigned long count;
657
658 if (!(ht->flags & CDS_LFHT_AUTO_RESIZE))
659 return;
660 count = uatomic_read(&ht->count);
661 /*
662 * Use bucket-local length for small table expand and for
663 * environments lacking per-cpu data support.
664 */
665 if (count >= (1UL << COUNT_COMMIT_ORDER))
666 return;
667 if (chain_len > 100)
668 dbg_printf("WARNING: large chain length: %u.\n",
669 chain_len);
670 if (chain_len >= CHAIN_LEN_RESIZE_THRESHOLD)
671 cds_lfht_resize_lazy(ht, size,
672 get_count_order_u32(chain_len - (CHAIN_LEN_TARGET - 1)));
673 }
674
675 static
676 struct cds_lfht_node *clear_flag(struct cds_lfht_node *node)
677 {
678 return (struct cds_lfht_node *) (((unsigned long) node) & ~FLAGS_MASK);
679 }
680
681 static
682 int is_removed(struct cds_lfht_node *node)
683 {
684 return ((unsigned long) node) & REMOVED_FLAG;
685 }
686
687 static
688 struct cds_lfht_node *flag_removed(struct cds_lfht_node *node)
689 {
690 return (struct cds_lfht_node *) (((unsigned long) node) | REMOVED_FLAG);
691 }
692
693 static
694 int is_dummy(struct cds_lfht_node *node)
695 {
696 return ((unsigned long) node) & DUMMY_FLAG;
697 }
698
699 static
700 struct cds_lfht_node *flag_dummy(struct cds_lfht_node *node)
701 {
702 return (struct cds_lfht_node *) (((unsigned long) node) | DUMMY_FLAG);
703 }
704
705 static
706 struct cds_lfht_node *get_end(void)
707 {
708 return (struct cds_lfht_node *) END_VALUE;
709 }
710
711 static
712 int is_end(struct cds_lfht_node *node)
713 {
714 return clear_flag(node) == (struct cds_lfht_node *) END_VALUE;
715 }
716
717 static
718 unsigned long _uatomic_max(unsigned long *ptr, unsigned long v)
719 {
720 unsigned long old1, old2;
721
722 old1 = uatomic_read(ptr);
723 do {
724 old2 = old1;
725 if (old2 >= v)
726 return old2;
727 } while ((old1 = uatomic_cmpxchg(ptr, old2, v)) != old2);
728 return v;
729 }
730
731 static
732 struct _cds_lfht_node *lookup_bucket(struct cds_lfht *ht, unsigned long size,
733 unsigned long hash)
734 {
735 unsigned long index, order;
736
737 assert(size > 0);
738 index = hash & (size - 1);
739 /*
740 * equivalent to get_count_order_ulong(index + 1), but optimizes
741 * away the non-existing 0 special-case for
742 * get_count_order_ulong.
743 */
744 order = fls_ulong(index);
745
746 dbg_printf("lookup hash %lu index %lu order %lu aridx %lu\n",
747 hash, index, order, index & (!order ? 0 : ((1UL << (order - 1)) - 1)));
748
749 return &ht->t.tbl[order]->nodes[index & (!order ? 0 : ((1UL << (order - 1)) - 1))];
750 }
751
752 /*
753 * Remove all logically deleted nodes from a bucket up to a certain node key.
754 */
755 static
756 void _cds_lfht_gc_bucket(struct cds_lfht_node *dummy, struct cds_lfht_node *node)
757 {
758 struct cds_lfht_node *iter_prev, *iter, *next, *new_next;
759
760 assert(!is_dummy(dummy));
761 assert(!is_removed(dummy));
762 assert(!is_dummy(node));
763 assert(!is_removed(node));
764 for (;;) {
765 iter_prev = dummy;
766 /* We can always skip the dummy node initially */
767 iter = rcu_dereference(iter_prev->p.next);
768 assert(!is_removed(iter));
769 assert(iter_prev->p.reverse_hash <= node->p.reverse_hash);
770 /*
771 * We should never be called with dummy (start of chain)
772 * and logically removed node (end of path compression
773 * marker) being the actual same node. This would be a
774 * bug in the algorithm implementation.
775 */
776 assert(dummy != node);
777 for (;;) {
778 if (unlikely(is_end(iter)))
779 return;
780 if (likely(clear_flag(iter)->p.reverse_hash > node->p.reverse_hash))
781 return;
782 next = rcu_dereference(clear_flag(iter)->p.next);
783 if (likely(is_removed(next)))
784 break;
785 iter_prev = clear_flag(iter);
786 iter = next;
787 }
788 assert(!is_removed(iter));
789 if (is_dummy(iter))
790 new_next = flag_dummy(clear_flag(next));
791 else
792 new_next = clear_flag(next);
793 (void) uatomic_cmpxchg(&iter_prev->p.next, iter, new_next);
794 }
795 return;
796 }
797
798 static
799 int _cds_lfht_replace(struct cds_lfht *ht, unsigned long size,
800 struct cds_lfht_node *old_node,
801 struct cds_lfht_node *old_next,
802 struct cds_lfht_node *new_node)
803 {
804 struct cds_lfht_node *dummy, *ret_next;
805 struct _cds_lfht_node *lookup;
806
807 if (!old_node) /* Return -ENOENT if asked to replace NULL node */
808 return -ENOENT;
809
810 assert(!is_removed(old_node));
811 assert(!is_dummy(old_node));
812 assert(!is_removed(new_node));
813 assert(!is_dummy(new_node));
814 assert(new_node != old_node);
815 for (;;) {
816 /* Insert after node to be replaced */
817 if (is_removed(old_next)) {
818 /*
819 * Too late, the old node has been removed under us
820 * between lookup and replace. Fail.
821 */
822 return -ENOENT;
823 }
824 assert(!is_dummy(old_next));
825 assert(new_node != clear_flag(old_next));
826 new_node->p.next = clear_flag(old_next);
827 /*
828 * Here is the whole trick for lock-free replace: we add
829 * the replacement node _after_ the node we want to
830 * replace by atomically setting its next pointer at the
831 * same time we set its removal flag. Given that
832 * the lookups/get next use an iterator aware of the
833 * next pointer, they will either skip the old node due
834 * to the removal flag and see the new node, or use
835 * the old node, but will not see the new one.
836 */
837 ret_next = uatomic_cmpxchg(&old_node->p.next,
838 old_next, flag_removed(new_node));
839 if (ret_next == old_next)
840 break; /* We performed the replacement. */
841 old_next = ret_next;
842 }
843
844 /*
845 * Ensure that the old node is not visible to readers anymore:
846 * lookup for the node, and remove it (along with any other
847 * logically removed node) if found.
848 */
849 lookup = lookup_bucket(ht, size, bit_reverse_ulong(old_node->p.reverse_hash));
850 dummy = (struct cds_lfht_node *) lookup;
851 _cds_lfht_gc_bucket(dummy, new_node);
852
853 assert(is_removed(rcu_dereference(old_node->p.next)));
854 return 0;
855 }
856
857 /*
858 * A non-NULL unique_ret pointer uses the "add unique" (or uniquify) add
859 * mode. A NULL unique_ret allows creation of duplicate keys.
860 */
861 static
862 void _cds_lfht_add(struct cds_lfht *ht,
863 unsigned long size,
864 struct cds_lfht_node *node,
865 struct cds_lfht_iter *unique_ret,
866 int dummy)
867 {
868 struct cds_lfht_node *iter_prev, *iter, *next, *new_node, *new_next,
869 *return_node;
870 struct _cds_lfht_node *lookup;
871
872 assert(!is_dummy(node));
873 assert(!is_removed(node));
874 if (!size) {
875 assert(dummy);
876 assert(!unique_ret);
877 node->p.next = flag_dummy(get_end());
878 return; /* Initial first add (head) */
879 }
880 lookup = lookup_bucket(ht, size, bit_reverse_ulong(node->p.reverse_hash));
881 for (;;) {
882 uint32_t chain_len = 0;
883
884 /*
885 * iter_prev points to the non-removed node prior to the
886 * insert location.
887 */
888 iter_prev = (struct cds_lfht_node *) lookup;
889 /* We can always skip the dummy node initially */
890 iter = rcu_dereference(iter_prev->p.next);
891 assert(iter_prev->p.reverse_hash <= node->p.reverse_hash);
892 for (;;) {
893 if (unlikely(is_end(iter)))
894 goto insert;
895 if (likely(clear_flag(iter)->p.reverse_hash > node->p.reverse_hash))
896 goto insert;
897 /* dummy node is the first node of the identical-hash-value chain */
898 if (dummy && clear_flag(iter)->p.reverse_hash == node->p.reverse_hash)
899 goto insert;
900 next = rcu_dereference(clear_flag(iter)->p.next);
901 if (unlikely(is_removed(next)))
902 goto gc_node;
903 if (unique_ret
904 && !is_dummy(next)
905 && clear_flag(iter)->p.reverse_hash == node->p.reverse_hash
906 && !ht->compare_fct(node->key, node->key_len,
907 clear_flag(iter)->key,
908 clear_flag(iter)->key_len)) {
909 unique_ret->node = clear_flag(iter);
910 unique_ret->next = next;
911 return;
912 }
913 /* Only account for identical reverse hash once */
914 if (iter_prev->p.reverse_hash != clear_flag(iter)->p.reverse_hash
915 && !is_dummy(next))
916 check_resize(ht, size, ++chain_len);
917 iter_prev = clear_flag(iter);
918 iter = next;
919 }
920
921 insert:
922 assert(node != clear_flag(iter));
923 assert(!is_removed(iter_prev));
924 assert(!is_removed(iter));
925 assert(iter_prev != node);
926 if (!dummy)
927 node->p.next = clear_flag(iter);
928 else
929 node->p.next = flag_dummy(clear_flag(iter));
930 if (is_dummy(iter))
931 new_node = flag_dummy(node);
932 else
933 new_node = node;
934 if (uatomic_cmpxchg(&iter_prev->p.next, iter,
935 new_node) != iter) {
936 continue; /* retry */
937 } else {
938 return_node = node;
939 goto end;
940 }
941
942 gc_node:
943 assert(!is_removed(iter));
944 if (is_dummy(iter))
945 new_next = flag_dummy(clear_flag(next));
946 else
947 new_next = clear_flag(next);
948 (void) uatomic_cmpxchg(&iter_prev->p.next, iter, new_next);
949 /* retry */
950 }
951 end:
952 if (unique_ret) {
953 unique_ret->node = return_node;
954 /* unique_ret->next left unset, never used. */
955 }
956 }
957
958 static
959 int _cds_lfht_del(struct cds_lfht *ht, unsigned long size,
960 struct cds_lfht_node *node,
961 int dummy_removal)
962 {
963 struct cds_lfht_node *dummy, *next, *old;
964 struct _cds_lfht_node *lookup;
965
966 if (!node) /* Return -ENOENT if asked to delete NULL node */
967 return -ENOENT;
968
969 /* logically delete the node */
970 assert(!is_dummy(node));
971 assert(!is_removed(node));
972 old = rcu_dereference(node->p.next);
973 do {
974 struct cds_lfht_node *new_next;
975
976 next = old;
977 if (unlikely(is_removed(next)))
978 return -ENOENT;
979 if (dummy_removal)
980 assert(is_dummy(next));
981 else
982 assert(!is_dummy(next));
983 new_next = flag_removed(next);
984 old = uatomic_cmpxchg(&node->p.next, next, new_next);
985 } while (old != next);
986 /* We performed the (logical) deletion. */
987
988 /*
989 * Ensure that the node is not visible to readers anymore: lookup for
990 * the node, and remove it (along with any other logically removed node)
991 * if found.
992 */
993 lookup = lookup_bucket(ht, size, bit_reverse_ulong(node->p.reverse_hash));
994 dummy = (struct cds_lfht_node *) lookup;
995 _cds_lfht_gc_bucket(dummy, node);
996
997 assert(is_removed(rcu_dereference(node->p.next)));
998 return 0;
999 }
1000
1001 static
1002 void *partition_resize_thread(void *arg)
1003 {
1004 struct partition_resize_work *work = arg;
1005
1006 work->ht->cds_lfht_rcu_register_thread();
1007 work->fct(work->ht, work->i, work->start, work->len);
1008 work->ht->cds_lfht_rcu_unregister_thread();
1009 return NULL;
1010 }
1011
1012 static
1013 void partition_resize_helper(struct cds_lfht *ht, unsigned long i,
1014 unsigned long len,
1015 void (*fct)(struct cds_lfht *ht, unsigned long i,
1016 unsigned long start, unsigned long len))
1017 {
1018 unsigned long partition_len;
1019 struct partition_resize_work *work;
1020 int thread, ret;
1021 unsigned long nr_threads;
1022
1023 /*
1024 * Note: nr_cpus_mask + 1 is always power of 2.
1025 * We spawn just the number of threads we need to satisfy the minimum
1026 * partition size, up to the number of CPUs in the system.
1027 */
1028 if (nr_cpus_mask > 0) {
1029 nr_threads = min(nr_cpus_mask + 1,
1030 len >> MIN_PARTITION_PER_THREAD_ORDER);
1031 } else {
1032 nr_threads = 1;
1033 }
1034 partition_len = len >> get_count_order_ulong(nr_threads);
1035 work = calloc(nr_threads, sizeof(*work));
1036 assert(work);
1037 for (thread = 0; thread < nr_threads; thread++) {
1038 work[thread].ht = ht;
1039 work[thread].i = i;
1040 work[thread].len = partition_len;
1041 work[thread].start = thread * partition_len;
1042 work[thread].fct = fct;
1043 ret = pthread_create(&(work[thread].thread_id), ht->resize_attr,
1044 partition_resize_thread, &work[thread]);
1045 assert(!ret);
1046 }
1047 for (thread = 0; thread < nr_threads; thread++) {
1048 ret = pthread_join(work[thread].thread_id, NULL);
1049 assert(!ret);
1050 }
1051 free(work);
1052 }
1053
1054 /*
1055 * Holding RCU read lock to protect _cds_lfht_add against memory
1056 * reclaim that could be performed by other call_rcu worker threads (ABA
1057 * problem).
1058 *
1059 * When we reach a certain length, we can split this population phase over
1060 * many worker threads, based on the number of CPUs available in the system.
1061 * This should therefore take care of not having the expand lagging behind too
1062 * many concurrent insertion threads by using the scheduler's ability to
1063 * schedule dummy node population fairly with insertions.
1064 */
1065 static
1066 void init_table_populate_partition(struct cds_lfht *ht, unsigned long i,
1067 unsigned long start, unsigned long len)
1068 {
1069 unsigned long j;
1070
1071 ht->cds_lfht_rcu_read_lock();
1072 for (j = start; j < start + len; j++) {
1073 struct cds_lfht_node *new_node =
1074 (struct cds_lfht_node *) &ht->t.tbl[i]->nodes[j];
1075
1076 dbg_printf("init populate: i %lu j %lu hash %lu\n",
1077 i, j, !i ? 0 : (1UL << (i - 1)) + j);
1078 new_node->p.reverse_hash =
1079 bit_reverse_ulong(!i ? 0 : (1UL << (i - 1)) + j);
1080 _cds_lfht_add(ht, !i ? 0 : (1UL << (i - 1)),
1081 new_node, NULL, 1);
1082 }
1083 ht->cds_lfht_rcu_read_unlock();
1084 }
1085
1086 static
1087 void init_table_populate(struct cds_lfht *ht, unsigned long i,
1088 unsigned long len)
1089 {
1090 assert(nr_cpus_mask != -1);
1091 if (nr_cpus_mask < 0 || len < 2 * MIN_PARTITION_PER_THREAD) {
1092 ht->cds_lfht_rcu_thread_online();
1093 init_table_populate_partition(ht, i, 0, len);
1094 ht->cds_lfht_rcu_thread_offline();
1095 return;
1096 }
1097 partition_resize_helper(ht, i, len, init_table_populate_partition);
1098 }
1099
1100 static
1101 void init_table(struct cds_lfht *ht,
1102 unsigned long first_order, unsigned long last_order)
1103 {
1104 unsigned long i;
1105
1106 dbg_printf("init table: first_order %lu last_order %lu\n",
1107 first_order, last_order);
1108 for (i = first_order; i <= last_order; i++) {
1109 unsigned long len;
1110
1111 len = !i ? 1 : 1UL << (i - 1);
1112 dbg_printf("init order %lu len: %lu\n", i, len);
1113
1114 /* Stop expand if the resize target changes under us */
1115 if (CMM_LOAD_SHARED(ht->t.resize_target) < (!i ? 1 : (1UL << i)))
1116 break;
1117
1118 ht->t.tbl[i] = calloc(1, len * sizeof(struct _cds_lfht_node));
1119 assert(ht->t.tbl[i]);
1120
1121 /*
1122 * Set all dummy nodes reverse hash values for a level and
1123 * link all dummy nodes into the table.
1124 */
1125 init_table_populate(ht, i, len);
1126
1127 /*
1128 * Update table size.
1129 */
1130 cmm_smp_wmb(); /* populate data before RCU size */
1131 CMM_STORE_SHARED(ht->t.size, !i ? 1 : (1UL << i));
1132
1133 dbg_printf("init new size: %lu\n", !i ? 1 : (1UL << i));
1134 if (CMM_LOAD_SHARED(ht->in_progress_destroy))
1135 break;
1136 }
1137 }
1138
1139 /*
1140 * Holding RCU read lock to protect _cds_lfht_remove against memory
1141 * reclaim that could be performed by other call_rcu worker threads (ABA
1142 * problem).
1143 * For a single level, we logically remove and garbage collect each node.
1144 *
1145 * As a design choice, we perform logical removal and garbage collection on a
1146 * node-per-node basis to simplify this algorithm. We also assume keeping good
1147 * cache locality of the operation would overweight possible performance gain
1148 * that could be achieved by batching garbage collection for multiple levels.
1149 * However, this would have to be justified by benchmarks.
1150 *
1151 * Concurrent removal and add operations are helping us perform garbage
1152 * collection of logically removed nodes. We guarantee that all logically
1153 * removed nodes have been garbage-collected (unlinked) before call_rcu is
1154 * invoked to free a hole level of dummy nodes (after a grace period).
1155 *
1156 * Logical removal and garbage collection can therefore be done in batch or on a
1157 * node-per-node basis, as long as the guarantee above holds.
1158 *
1159 * When we reach a certain length, we can split this removal over many worker
1160 * threads, based on the number of CPUs available in the system. This should
1161 * take care of not letting resize process lag behind too many concurrent
1162 * updater threads actively inserting into the hash table.
1163 */
1164 static
1165 void remove_table_partition(struct cds_lfht *ht, unsigned long i,
1166 unsigned long start, unsigned long len)
1167 {
1168 unsigned long j;
1169
1170 ht->cds_lfht_rcu_read_lock();
1171 for (j = start; j < start + len; j++) {
1172 struct cds_lfht_node *fini_node =
1173 (struct cds_lfht_node *) &ht->t.tbl[i]->nodes[j];
1174
1175 dbg_printf("remove entry: i %lu j %lu hash %lu\n",
1176 i, j, !i ? 0 : (1UL << (i - 1)) + j);
1177 fini_node->p.reverse_hash =
1178 bit_reverse_ulong(!i ? 0 : (1UL << (i - 1)) + j);
1179 (void) _cds_lfht_del(ht, !i ? 0 : (1UL << (i - 1)),
1180 fini_node, 1);
1181 }
1182 ht->cds_lfht_rcu_read_unlock();
1183 }
1184
1185 static
1186 void remove_table(struct cds_lfht *ht, unsigned long i, unsigned long len)
1187 {
1188
1189 assert(nr_cpus_mask != -1);
1190 if (nr_cpus_mask < 0 || len < 2 * MIN_PARTITION_PER_THREAD) {
1191 ht->cds_lfht_rcu_thread_online();
1192 remove_table_partition(ht, i, 0, len);
1193 ht->cds_lfht_rcu_thread_offline();
1194 return;
1195 }
1196 partition_resize_helper(ht, i, len, remove_table_partition);
1197 }
1198
1199 static
1200 void fini_table(struct cds_lfht *ht,
1201 unsigned long first_order, unsigned long last_order)
1202 {
1203 long i;
1204 void *free_by_rcu = NULL;
1205
1206 dbg_printf("fini table: first_order %lu last_order %lu\n",
1207 first_order, last_order);
1208 assert(first_order > 0);
1209 for (i = last_order; i >= first_order; i--) {
1210 unsigned long len;
1211
1212 len = !i ? 1 : 1UL << (i - 1);
1213 dbg_printf("fini order %lu len: %lu\n", i, len);
1214
1215 /* Stop shrink if the resize target changes under us */
1216 if (CMM_LOAD_SHARED(ht->t.resize_target) > (1UL << (i - 1)))
1217 break;
1218
1219 cmm_smp_wmb(); /* populate data before RCU size */
1220 CMM_STORE_SHARED(ht->t.size, 1UL << (i - 1));
1221
1222 /*
1223 * We need to wait for all add operations to reach Q.S. (and
1224 * thus use the new table for lookups) before we can start
1225 * releasing the old dummy nodes. Otherwise their lookup will
1226 * return a logically removed node as insert position.
1227 */
1228 ht->cds_lfht_synchronize_rcu();
1229 if (free_by_rcu)
1230 free(free_by_rcu);
1231
1232 /*
1233 * Set "removed" flag in dummy nodes about to be removed.
1234 * Unlink all now-logically-removed dummy node pointers.
1235 * Concurrent add/remove operation are helping us doing
1236 * the gc.
1237 */
1238 remove_table(ht, i, len);
1239
1240 free_by_rcu = ht->t.tbl[i];
1241
1242 dbg_printf("fini new size: %lu\n", 1UL << i);
1243 if (CMM_LOAD_SHARED(ht->in_progress_destroy))
1244 break;
1245 }
1246
1247 if (free_by_rcu) {
1248 ht->cds_lfht_synchronize_rcu();
1249 free(free_by_rcu);
1250 }
1251 }
1252
1253 struct cds_lfht *_cds_lfht_new(cds_lfht_hash_fct hash_fct,
1254 cds_lfht_compare_fct compare_fct,
1255 unsigned long hash_seed,
1256 unsigned long init_size,
1257 int flags,
1258 void (*cds_lfht_call_rcu)(struct rcu_head *head,
1259 void (*func)(struct rcu_head *head)),
1260 void (*cds_lfht_synchronize_rcu)(void),
1261 void (*cds_lfht_rcu_read_lock)(void),
1262 void (*cds_lfht_rcu_read_unlock)(void),
1263 void (*cds_lfht_rcu_thread_offline)(void),
1264 void (*cds_lfht_rcu_thread_online)(void),
1265 void (*cds_lfht_rcu_register_thread)(void),
1266 void (*cds_lfht_rcu_unregister_thread)(void),
1267 pthread_attr_t *attr)
1268 {
1269 struct cds_lfht *ht;
1270 unsigned long order;
1271
1272 /* init_size must be power of two */
1273 if (init_size && (init_size & (init_size - 1)))
1274 return NULL;
1275 ht = calloc(1, sizeof(struct cds_lfht));
1276 assert(ht);
1277 ht->hash_fct = hash_fct;
1278 ht->compare_fct = compare_fct;
1279 ht->hash_seed = hash_seed;
1280 ht->cds_lfht_call_rcu = cds_lfht_call_rcu;
1281 ht->cds_lfht_synchronize_rcu = cds_lfht_synchronize_rcu;
1282 ht->cds_lfht_rcu_read_lock = cds_lfht_rcu_read_lock;
1283 ht->cds_lfht_rcu_read_unlock = cds_lfht_rcu_read_unlock;
1284 ht->cds_lfht_rcu_thread_offline = cds_lfht_rcu_thread_offline;
1285 ht->cds_lfht_rcu_thread_online = cds_lfht_rcu_thread_online;
1286 ht->cds_lfht_rcu_register_thread = cds_lfht_rcu_register_thread;
1287 ht->cds_lfht_rcu_unregister_thread = cds_lfht_rcu_unregister_thread;
1288 ht->resize_attr = attr;
1289 ht->percpu_count = alloc_per_cpu_items_count();
1290 /* this mutex should not nest in read-side C.S. */
1291 pthread_mutex_init(&ht->resize_mutex, NULL);
1292 order = get_count_order_ulong(max(init_size, MIN_TABLE_SIZE));
1293 ht->flags = flags;
1294 ht->cds_lfht_rcu_thread_offline();
1295 pthread_mutex_lock(&ht->resize_mutex);
1296 ht->t.resize_target = 1UL << order;
1297 init_table(ht, 0, order);
1298 pthread_mutex_unlock(&ht->resize_mutex);
1299 ht->cds_lfht_rcu_thread_online();
1300 return ht;
1301 }
1302
1303 void cds_lfht_lookup(struct cds_lfht *ht, void *key, size_t key_len,
1304 struct cds_lfht_iter *iter)
1305 {
1306 struct cds_lfht_node *node, *next, *dummy_node;
1307 struct _cds_lfht_node *lookup;
1308 unsigned long hash, reverse_hash, size;
1309
1310 hash = ht->hash_fct(key, key_len, ht->hash_seed);
1311 reverse_hash = bit_reverse_ulong(hash);
1312
1313 size = rcu_dereference(ht->t.size);
1314 lookup = lookup_bucket(ht, size, hash);
1315 dummy_node = (struct cds_lfht_node *) lookup;
1316 /* We can always skip the dummy node initially */
1317 node = rcu_dereference(dummy_node->p.next);
1318 node = clear_flag(node);
1319 for (;;) {
1320 if (unlikely(is_end(node))) {
1321 node = next = NULL;
1322 break;
1323 }
1324 if (unlikely(node->p.reverse_hash > reverse_hash)) {
1325 node = next = NULL;
1326 break;
1327 }
1328 next = rcu_dereference(node->p.next);
1329 if (likely(!is_removed(next))
1330 && !is_dummy(next)
1331 && clear_flag(node)->p.reverse_hash == reverse_hash
1332 && likely(!ht->compare_fct(node->key, node->key_len, key, key_len))) {
1333 break;
1334 }
1335 node = clear_flag(next);
1336 }
1337 assert(!node || !is_dummy(rcu_dereference(node->p.next)));
1338 iter->node = node;
1339 iter->next = next;
1340 }
1341
1342 void cds_lfht_next_duplicate(struct cds_lfht *ht, struct cds_lfht_iter *iter)
1343 {
1344 struct cds_lfht_node *node, *next;
1345 unsigned long reverse_hash;
1346 void *key;
1347 size_t key_len;
1348
1349 node = iter->node;
1350 reverse_hash = node->p.reverse_hash;
1351 key = node->key;
1352 key_len = node->key_len;
1353 next = iter->next;
1354 node = clear_flag(next);
1355
1356 for (;;) {
1357 if (unlikely(is_end(node))) {
1358 node = next = NULL;
1359 break;
1360 }
1361 if (unlikely(node->p.reverse_hash > reverse_hash)) {
1362 node = next = NULL;
1363 break;
1364 }
1365 next = rcu_dereference(node->p.next);
1366 if (likely(!is_removed(next))
1367 && !is_dummy(next)
1368 && likely(!ht->compare_fct(node->key, node->key_len, key, key_len))) {
1369 break;
1370 }
1371 node = clear_flag(next);
1372 }
1373 assert(!node || !is_dummy(rcu_dereference(node->p.next)));
1374 iter->node = node;
1375 iter->next = next;
1376 }
1377
1378 void cds_lfht_next(struct cds_lfht *ht, struct cds_lfht_iter *iter)
1379 {
1380 struct cds_lfht_node *node, *next;
1381
1382 node = clear_flag(iter->next);
1383 for (;;) {
1384 if (unlikely(is_end(node))) {
1385 node = next = NULL;
1386 break;
1387 }
1388 next = rcu_dereference(node->p.next);
1389 if (likely(!is_removed(next))
1390 && !is_dummy(next)) {
1391 break;
1392 }
1393 node = clear_flag(next);
1394 }
1395 assert(!node || !is_dummy(rcu_dereference(node->p.next)));
1396 iter->node = node;
1397 iter->next = next;
1398 }
1399
1400 void cds_lfht_first(struct cds_lfht *ht, struct cds_lfht_iter *iter)
1401 {
1402 struct _cds_lfht_node *lookup;
1403
1404 /*
1405 * Get next after first dummy node. The first dummy node is the
1406 * first node of the linked list.
1407 */
1408 lookup = &ht->t.tbl[0]->nodes[0];
1409 iter->next = lookup->next;
1410 cds_lfht_next(ht, iter);
1411 }
1412
1413 void cds_lfht_add(struct cds_lfht *ht, struct cds_lfht_node *node)
1414 {
1415 unsigned long hash, size;
1416
1417 hash = ht->hash_fct(node->key, node->key_len, ht->hash_seed);
1418 node->p.reverse_hash = bit_reverse_ulong((unsigned long) hash);
1419
1420 size = rcu_dereference(ht->t.size);
1421 _cds_lfht_add(ht, size, node, NULL, 0);
1422 ht_count_add(ht, size);
1423 }
1424
1425 struct cds_lfht_node *cds_lfht_add_unique(struct cds_lfht *ht,
1426 struct cds_lfht_node *node)
1427 {
1428 unsigned long hash, size;
1429 struct cds_lfht_iter iter;
1430
1431 hash = ht->hash_fct(node->key, node->key_len, ht->hash_seed);
1432 node->p.reverse_hash = bit_reverse_ulong((unsigned long) hash);
1433
1434 size = rcu_dereference(ht->t.size);
1435 _cds_lfht_add(ht, size, node, &iter, 0);
1436 if (iter.node == node)
1437 ht_count_add(ht, size);
1438 return iter.node;
1439 }
1440
1441 struct cds_lfht_node *cds_lfht_add_replace(struct cds_lfht *ht,
1442 struct cds_lfht_node *node)
1443 {
1444 unsigned long hash, size;
1445 struct cds_lfht_iter iter;
1446
1447 hash = ht->hash_fct(node->key, node->key_len, ht->hash_seed);
1448 node->p.reverse_hash = bit_reverse_ulong((unsigned long) hash);
1449
1450 size = rcu_dereference(ht->t.size);
1451 for (;;) {
1452 _cds_lfht_add(ht, size, node, &iter, 0);
1453 if (iter.node == node) {
1454 ht_count_add(ht, size);
1455 return NULL;
1456 }
1457
1458 if (!_cds_lfht_replace(ht, size, iter.node, iter.next, node))
1459 return iter.node;
1460 }
1461 }
1462
1463 int cds_lfht_replace(struct cds_lfht *ht, struct cds_lfht_iter *old_iter,
1464 struct cds_lfht_node *new_node)
1465 {
1466 unsigned long size;
1467
1468 size = rcu_dereference(ht->t.size);
1469 return _cds_lfht_replace(ht, size, old_iter->node, old_iter->next,
1470 new_node);
1471 }
1472
1473 int cds_lfht_del(struct cds_lfht *ht, struct cds_lfht_iter *iter)
1474 {
1475 unsigned long size;
1476 int ret;
1477
1478 size = rcu_dereference(ht->t.size);
1479 ret = _cds_lfht_del(ht, size, iter->node, 0);
1480 if (!ret)
1481 ht_count_del(ht, size);
1482 return ret;
1483 }
1484
1485 static
1486 int cds_lfht_delete_dummy(struct cds_lfht *ht)
1487 {
1488 struct cds_lfht_node *node;
1489 struct _cds_lfht_node *lookup;
1490 unsigned long order, i, size;
1491
1492 /* Check that the table is empty */
1493 lookup = &ht->t.tbl[0]->nodes[0];
1494 node = (struct cds_lfht_node *) lookup;
1495 do {
1496 node = clear_flag(node)->p.next;
1497 if (!is_dummy(node))
1498 return -EPERM;
1499 assert(!is_removed(node));
1500 } while (!is_end(node));
1501 /*
1502 * size accessed without rcu_dereference because hash table is
1503 * being destroyed.
1504 */
1505 size = ht->t.size;
1506 /* Internal sanity check: all nodes left should be dummy */
1507 for (order = 0; order < get_count_order_ulong(size) + 1; order++) {
1508 unsigned long len;
1509
1510 len = !order ? 1 : 1UL << (order - 1);
1511 for (i = 0; i < len; i++) {
1512 dbg_printf("delete order %lu i %lu hash %lu\n",
1513 order, i,
1514 bit_reverse_ulong(ht->t.tbl[order]->nodes[i].reverse_hash));
1515 assert(is_dummy(ht->t.tbl[order]->nodes[i].next));
1516 }
1517 poison_free(ht->t.tbl[order]);
1518 }
1519 return 0;
1520 }
1521
1522 /*
1523 * Should only be called when no more concurrent readers nor writers can
1524 * possibly access the table.
1525 */
1526 int cds_lfht_destroy(struct cds_lfht *ht, pthread_attr_t **attr)
1527 {
1528 int ret;
1529
1530 /* Wait for in-flight resize operations to complete */
1531 _CMM_STORE_SHARED(ht->in_progress_destroy, 1);
1532 cmm_smp_mb(); /* Store destroy before load resize */
1533 while (uatomic_read(&ht->in_progress_resize))
1534 poll(NULL, 0, 100); /* wait for 100ms */
1535 ret = cds_lfht_delete_dummy(ht);
1536 if (ret)
1537 return ret;
1538 free_per_cpu_items_count(ht->percpu_count);
1539 if (attr)
1540 *attr = ht->resize_attr;
1541 poison_free(ht);
1542 return ret;
1543 }
1544
1545 void cds_lfht_count_nodes(struct cds_lfht *ht,
1546 long *approx_before,
1547 unsigned long *count,
1548 unsigned long *removed,
1549 long *approx_after)
1550 {
1551 struct cds_lfht_node *node, *next;
1552 struct _cds_lfht_node *lookup;
1553 unsigned long nr_dummy = 0;
1554
1555 *approx_before = 0;
1556 if (nr_cpus_mask >= 0) {
1557 int i;
1558
1559 for (i = 0; i < nr_cpus_mask + 1; i++) {
1560 *approx_before += uatomic_read(&ht->percpu_count[i].add);
1561 *approx_before -= uatomic_read(&ht->percpu_count[i].del);
1562 }
1563 }
1564
1565 *count = 0;
1566 *removed = 0;
1567
1568 /* Count non-dummy nodes in the table */
1569 lookup = &ht->t.tbl[0]->nodes[0];
1570 node = (struct cds_lfht_node *) lookup;
1571 do {
1572 next = rcu_dereference(node->p.next);
1573 if (is_removed(next)) {
1574 if (!is_dummy(next))
1575 (*removed)++;
1576 else
1577 (nr_dummy)++;
1578 } else if (!is_dummy(next))
1579 (*count)++;
1580 else
1581 (nr_dummy)++;
1582 node = clear_flag(next);
1583 } while (!is_end(node));
1584 dbg_printf("number of dummy nodes: %lu\n", nr_dummy);
1585 *approx_after = 0;
1586 if (nr_cpus_mask >= 0) {
1587 int i;
1588
1589 for (i = 0; i < nr_cpus_mask + 1; i++) {
1590 *approx_after += uatomic_read(&ht->percpu_count[i].add);
1591 *approx_after -= uatomic_read(&ht->percpu_count[i].del);
1592 }
1593 }
1594 }
1595
1596 /* called with resize mutex held */
1597 static
1598 void _do_cds_lfht_grow(struct cds_lfht *ht,
1599 unsigned long old_size, unsigned long new_size)
1600 {
1601 unsigned long old_order, new_order;
1602
1603 old_order = get_count_order_ulong(old_size);
1604 new_order = get_count_order_ulong(new_size);
1605 dbg_printf("resize from %lu (order %lu) to %lu (order %lu) buckets\n",
1606 old_size, old_order, new_size, new_order);
1607 assert(new_size > old_size);
1608 init_table(ht, old_order + 1, new_order);
1609 }
1610
1611 /* called with resize mutex held */
1612 static
1613 void _do_cds_lfht_shrink(struct cds_lfht *ht,
1614 unsigned long old_size, unsigned long new_size)
1615 {
1616 unsigned long old_order, new_order;
1617
1618 new_size = max(new_size, MIN_TABLE_SIZE);
1619 old_order = get_count_order_ulong(old_size);
1620 new_order = get_count_order_ulong(new_size);
1621 dbg_printf("resize from %lu (order %lu) to %lu (order %lu) buckets\n",
1622 old_size, old_order, new_size, new_order);
1623 assert(new_size < old_size);
1624
1625 /* Remove and unlink all dummy nodes to remove. */
1626 fini_table(ht, new_order + 1, old_order);
1627 }
1628
1629
1630 /* called with resize mutex held */
1631 static
1632 void _do_cds_lfht_resize(struct cds_lfht *ht)
1633 {
1634 unsigned long new_size, old_size;
1635
1636 /*
1637 * Resize table, re-do if the target size has changed under us.
1638 */
1639 do {
1640 assert(uatomic_read(&ht->in_progress_resize));
1641 if (CMM_LOAD_SHARED(ht->in_progress_destroy))
1642 break;
1643 ht->t.resize_initiated = 1;
1644 old_size = ht->t.size;
1645 new_size = CMM_LOAD_SHARED(ht->t.resize_target);
1646 if (old_size < new_size)
1647 _do_cds_lfht_grow(ht, old_size, new_size);
1648 else if (old_size > new_size)
1649 _do_cds_lfht_shrink(ht, old_size, new_size);
1650 ht->t.resize_initiated = 0;
1651 /* write resize_initiated before read resize_target */
1652 cmm_smp_mb();
1653 } while (ht->t.size != CMM_LOAD_SHARED(ht->t.resize_target));
1654 }
1655
1656 static
1657 unsigned long resize_target_update(struct cds_lfht *ht, unsigned long size,
1658 int growth_order)
1659 {
1660 return _uatomic_max(&ht->t.resize_target,
1661 size << growth_order);
1662 }
1663
1664 static
1665 void resize_target_update_count(struct cds_lfht *ht,
1666 unsigned long count)
1667 {
1668 count = max(count, MIN_TABLE_SIZE);
1669 uatomic_set(&ht->t.resize_target, count);
1670 }
1671
1672 void cds_lfht_resize(struct cds_lfht *ht, unsigned long new_size)
1673 {
1674 resize_target_update_count(ht, new_size);
1675 CMM_STORE_SHARED(ht->t.resize_initiated, 1);
1676 ht->cds_lfht_rcu_thread_offline();
1677 pthread_mutex_lock(&ht->resize_mutex);
1678 _do_cds_lfht_resize(ht);
1679 pthread_mutex_unlock(&ht->resize_mutex);
1680 ht->cds_lfht_rcu_thread_online();
1681 }
1682
1683 static
1684 void do_resize_cb(struct rcu_head *head)
1685 {
1686 struct rcu_resize_work *work =
1687 caa_container_of(head, struct rcu_resize_work, head);
1688 struct cds_lfht *ht = work->ht;
1689
1690 ht->cds_lfht_rcu_thread_offline();
1691 pthread_mutex_lock(&ht->resize_mutex);
1692 _do_cds_lfht_resize(ht);
1693 pthread_mutex_unlock(&ht->resize_mutex);
1694 ht->cds_lfht_rcu_thread_online();
1695 poison_free(work);
1696 cmm_smp_mb(); /* finish resize before decrement */
1697 uatomic_dec(&ht->in_progress_resize);
1698 }
1699
1700 static
1701 void cds_lfht_resize_lazy(struct cds_lfht *ht, unsigned long size, int growth)
1702 {
1703 struct rcu_resize_work *work;
1704 unsigned long target_size;
1705
1706 target_size = resize_target_update(ht, size, growth);
1707 /* Store resize_target before read resize_initiated */
1708 cmm_smp_mb();
1709 if (!CMM_LOAD_SHARED(ht->t.resize_initiated) && size < target_size) {
1710 uatomic_inc(&ht->in_progress_resize);
1711 cmm_smp_mb(); /* increment resize count before load destroy */
1712 if (CMM_LOAD_SHARED(ht->in_progress_destroy)) {
1713 uatomic_dec(&ht->in_progress_resize);
1714 return;
1715 }
1716 work = malloc(sizeof(*work));
1717 work->ht = ht;
1718 ht->cds_lfht_call_rcu(&work->head, do_resize_cb);
1719 CMM_STORE_SHARED(ht->t.resize_initiated, 1);
1720 }
1721 }
1722
1723 #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF)
1724
1725 static
1726 void cds_lfht_resize_lazy_count(struct cds_lfht *ht, unsigned long size,
1727 unsigned long count)
1728 {
1729 struct rcu_resize_work *work;
1730
1731 if (!(ht->flags & CDS_LFHT_AUTO_RESIZE))
1732 return;
1733 resize_target_update_count(ht, count);
1734 /* Store resize_target before read resize_initiated */
1735 cmm_smp_mb();
1736 if (!CMM_LOAD_SHARED(ht->t.resize_initiated)) {
1737 uatomic_inc(&ht->in_progress_resize);
1738 cmm_smp_mb(); /* increment resize count before load destroy */
1739 if (CMM_LOAD_SHARED(ht->in_progress_destroy)) {
1740 uatomic_dec(&ht->in_progress_resize);
1741 return;
1742 }
1743 work = malloc(sizeof(*work));
1744 work->ht = ht;
1745 ht->cds_lfht_call_rcu(&work->head, do_resize_cb);
1746 CMM_STORE_SHARED(ht->t.resize_initiated, 1);
1747 }
1748 }
1749
1750 #endif
This page took 0.066329 seconds and 5 git commands to generate.