4 * Userspace RCU library - Lock-Free Resizable RCU Hash Table
6 * Copyright 2010-2011 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24 * Based on the following articles:
25 * - Ori Shalev and Nir Shavit. Split-ordered lists: Lock-free
26 * extensible hash tables. J. ACM 53, 3 (May 2006), 379-405.
27 * - Michael, M. M. High performance dynamic lock-free hash tables
28 * and list-based sets. In Proceedings of the fourteenth annual ACM
29 * symposium on Parallel algorithms and architectures, ACM Press,
32 * Some specificities of this Lock-Free Resizable RCU Hash Table
35 * - RCU read-side critical section allows readers to perform hash
36 * table lookups and use the returned objects safely by delaying
37 * memory reclaim of a grace period.
38 * - Add and remove operations are lock-free, and do not need to
39 * allocate memory. They need to be executed within RCU read-side
40 * critical section to ensure the objects they read are valid and to
41 * deal with the cmpxchg ABA problem.
42 * - add and add_unique operations are supported. add_unique checks if
43 * the node key already exists in the hash table. It ensures no key
45 * - The resize operation executes concurrently with add/remove/lookup.
46 * - Hash table nodes are contained within a split-ordered list. This
47 * list is ordered by incrementing reversed-bits-hash value.
48 * - An index of dummy nodes is kept. These dummy nodes are the hash
49 * table "buckets", and they are also chained together in the
50 * split-ordered list, which allows recursive expansion.
51 * - The resize operation for small tables only allows expanding the hash table.
52 * It is triggered automatically by detecting long chains in the add
54 * - The resize operation for larger tables (and available through an
55 * API) allows both expanding and shrinking the hash table.
56 * - Split-counters are used to keep track of the number of
57 * nodes within the hash table for automatic resize triggering.
58 * - Resize operation initiated by long chain detection is executed by a
59 * call_rcu thread, which keeps lock-freedom of add and remove.
60 * - Resize operations are protected by a mutex.
61 * - The removal operation is split in two parts: first, a "removed"
62 * flag is set in the next pointer within the node to remove. Then,
63 * a "garbage collection" is performed in the bucket containing the
64 * removed node (from the start of the bucket up to the removed node).
65 * All encountered nodes with "removed" flag set in their next
66 * pointers are removed from the linked-list. If the cmpxchg used for
67 * removal fails (due to concurrent garbage-collection or concurrent
68 * add), we retry from the beginning of the bucket. This ensures that
69 * the node with "removed" flag set is removed from the hash table
70 * (not visible to lookups anymore) before the RCU read-side critical
71 * section held across removal ends. Furthermore, this ensures that
72 * the node with "removed" flag set is removed from the linked-list
73 * before its memory is reclaimed. Only the thread which removal
74 * successfully set the "removed" flag (with a cmpxchg) into a node's
75 * next pointer is considered to have succeeded its removal (and thus
76 * owns the node to reclaim). Because we garbage-collect starting from
77 * an invariant node (the start-of-bucket dummy node) up to the
78 * "removed" node (or find a reverse-hash that is higher), we are sure
79 * that a successful traversal of the chain leads to a chain that is
80 * present in the linked-list (the start node is never removed) and
81 * that is does not contain the "removed" node anymore, even if
82 * concurrent delete/add operations are changing the structure of the
84 * - The add operation performs gargage collection of buckets if it
85 * encounters nodes with removed flag set in the bucket where it wants
86 * to add its new node. This ensures lock-freedom of add operation by
87 * helping the remover unlink nodes from the list rather than to wait
89 * - A RCU "order table" indexed by log2(hash index) is copied and
90 * expanded by the resize operation. This order table allows finding
91 * the "dummy node" tables.
92 * - There is one dummy node table per hash index order. The size of
93 * each dummy node table is half the number of hashes contained in
94 * this order (except for order 0).
95 * - synchronzie_rcu is used to garbage-collect the old dummy node table.
96 * - The per-order dummy node tables contain a compact version of the
97 * hash table nodes. These tables are invariant after they are
98 * populated into the hash table.
102 * hash table hash table the last all dummy node tables
103 * order size dummy node 0 1 2 3 4 5 6(index)
110 * 5 32 16 1 1 2 4 8 16
111 * 6 64 32 1 1 2 4 8 16 32
113 * When growing/shrinking, we only focus on the last dummy node table
114 * which size is (!order ? 1 : (1 << (order -1))).
116 * Example for growing/shrinking:
117 * grow hash table from order 5 to 6: init the index=6 dummy node table
118 * shrink hash table from order 6 to 5: fini the index=6 dummy node table
120 * A bit of ascii art explanation:
122 * Order index is the off-by-one compare to the actual power of 2 because
123 * we use index 0 to deal with the 0 special-case.
125 * This shows the nodes for a small table ordered by reversed bits:
137 * This shows the nodes in order of non-reversed bits, linked by
138 * reversed-bit order.
143 * 2 | | 2 010 010 <- |
144 * | | | 3 011 110 | <- |
145 * 3 -> | | | 4 100 001 | |
161 #include <urcu-call-rcu.h>
162 #include <urcu/arch.h>
163 #include <urcu/uatomic.h>
164 #include <urcu/compiler.h>
165 #include <urcu/rculfhash.h>
170 #define dbg_printf(fmt, args...) printf("[debug rculfhash] " fmt, ## args)
172 #define dbg_printf(fmt, args...)
176 * Split-counters lazily update the global counter each 1024
177 * addition/removal. It automatically keeps track of resize required.
178 * We use the bucket length as indicator for need to expand for small
179 * tables and machines lacking per-cpu data suppport.
181 #define COUNT_COMMIT_ORDER 10
182 #define CHAIN_LEN_TARGET 1
183 #define CHAIN_LEN_RESIZE_THRESHOLD 3
186 * Define the minimum table size.
188 #define MIN_TABLE_SIZE 1
190 #if (CAA_BITS_PER_LONG == 32)
191 #define MAX_TABLE_ORDER 32
193 #define MAX_TABLE_ORDER 64
197 * Minimum number of dummy nodes to touch per thread to parallelize grow/shrink.
199 #define MIN_PARTITION_PER_THREAD_ORDER 12
200 #define MIN_PARTITION_PER_THREAD (1UL << MIN_PARTITION_PER_THREAD_ORDER)
203 #define min(a, b) ((a) < (b) ? (a) : (b))
207 #define max(a, b) ((a) > (b) ? (a) : (b))
211 * The removed flag needs to be updated atomically with the pointer.
212 * It indicates that no node must attach to the node scheduled for
213 * removal, and that node garbage collection must be performed.
214 * The dummy flag does not require to be updated atomically with the
215 * pointer, but it is added as a pointer low bit flag to save space.
217 #define REMOVED_FLAG (1UL << 0)
218 #define DUMMY_FLAG (1UL << 1)
219 #define FLAGS_MASK ((1UL << 2) - 1)
221 /* Value of the end pointer. Should not interact with flags. */
222 #define END_VALUE NULL
224 struct ht_items_count
{
225 unsigned long add
, del
;
226 } __attribute__((aligned(CAA_CACHE_LINE_SIZE
)));
229 /* Note: manually update allocation length when adding a field */
230 struct _cds_lfht_node nodes
[0];
234 unsigned long size
; /* always a power of 2, shared (RCU) */
235 unsigned long resize_target
;
236 int resize_initiated
;
237 struct rcu_level
*tbl
[MAX_TABLE_ORDER
];
242 cds_lfht_hash_fct hash_fct
;
243 cds_lfht_compare_fct compare_fct
;
244 unsigned long min_alloc_order
;
245 unsigned long min_alloc_size
;
246 unsigned long hash_seed
;
249 * We need to put the work threads offline (QSBR) when taking this
250 * mutex, because we use synchronize_rcu within this mutex critical
251 * section, which waits on read-side critical sections, and could
252 * therefore cause grace-period deadlock if we hold off RCU G.P.
255 pthread_mutex_t resize_mutex
; /* resize mutex: add/del mutex */
256 unsigned int in_progress_resize
, in_progress_destroy
;
257 void (*cds_lfht_call_rcu
)(struct rcu_head
*head
,
258 void (*func
)(struct rcu_head
*head
));
259 void (*cds_lfht_synchronize_rcu
)(void);
260 void (*cds_lfht_rcu_read_lock
)(void);
261 void (*cds_lfht_rcu_read_unlock
)(void);
262 void (*cds_lfht_rcu_thread_offline
)(void);
263 void (*cds_lfht_rcu_thread_online
)(void);
264 void (*cds_lfht_rcu_register_thread
)(void);
265 void (*cds_lfht_rcu_unregister_thread
)(void);
266 pthread_attr_t
*resize_attr
; /* Resize threads attributes */
267 long count
; /* global approximate item count */
268 struct ht_items_count
*split_count
; /* split item count */
271 struct rcu_resize_work
{
272 struct rcu_head head
;
276 struct partition_resize_work
{
279 unsigned long i
, start
, len
;
280 void (*fct
)(struct cds_lfht
*ht
, unsigned long i
,
281 unsigned long start
, unsigned long len
);
285 void _cds_lfht_add(struct cds_lfht
*ht
,
287 struct cds_lfht_node
*node
,
288 struct cds_lfht_iter
*unique_ret
,
292 * Algorithm to reverse bits in a word by lookup table, extended to
295 * http://graphics.stanford.edu/~seander/bithacks.html#BitReverseTable
296 * Originally from Public Domain.
299 static const uint8_t BitReverseTable256
[256] =
301 #define R2(n) (n), (n) + 2*64, (n) + 1*64, (n) + 3*64
302 #define R4(n) R2(n), R2((n) + 2*16), R2((n) + 1*16), R2((n) + 3*16)
303 #define R6(n) R4(n), R4((n) + 2*4 ), R4((n) + 1*4 ), R4((n) + 3*4 )
304 R6(0), R6(2), R6(1), R6(3)
311 uint8_t bit_reverse_u8(uint8_t v
)
313 return BitReverseTable256
[v
];
316 static __attribute__((unused
))
317 uint32_t bit_reverse_u32(uint32_t v
)
319 return ((uint32_t) bit_reverse_u8(v
) << 24) |
320 ((uint32_t) bit_reverse_u8(v
>> 8) << 16) |
321 ((uint32_t) bit_reverse_u8(v
>> 16) << 8) |
322 ((uint32_t) bit_reverse_u8(v
>> 24));
325 static __attribute__((unused
))
326 uint64_t bit_reverse_u64(uint64_t v
)
328 return ((uint64_t) bit_reverse_u8(v
) << 56) |
329 ((uint64_t) bit_reverse_u8(v
>> 8) << 48) |
330 ((uint64_t) bit_reverse_u8(v
>> 16) << 40) |
331 ((uint64_t) bit_reverse_u8(v
>> 24) << 32) |
332 ((uint64_t) bit_reverse_u8(v
>> 32) << 24) |
333 ((uint64_t) bit_reverse_u8(v
>> 40) << 16) |
334 ((uint64_t) bit_reverse_u8(v
>> 48) << 8) |
335 ((uint64_t) bit_reverse_u8(v
>> 56));
339 unsigned long bit_reverse_ulong(unsigned long v
)
341 #if (CAA_BITS_PER_LONG == 32)
342 return bit_reverse_u32(v
);
344 return bit_reverse_u64(v
);
349 * fls: returns the position of the most significant bit.
350 * Returns 0 if no bit is set, else returns the position of the most
351 * significant bit (from 1 to 32 on 32-bit, from 1 to 64 on 64-bit).
353 #if defined(__i386) || defined(__x86_64)
355 unsigned int fls_u32(uint32_t x
)
363 : "=r" (r
) : "rm" (x
));
369 #if defined(__x86_64)
371 unsigned int fls_u64(uint64_t x
)
379 : "=r" (r
) : "rm" (x
));
386 static __attribute__((unused
))
387 unsigned int fls_u64(uint64_t x
)
394 if (!(x
& 0xFFFFFFFF00000000ULL
)) {
398 if (!(x
& 0xFFFF000000000000ULL
)) {
402 if (!(x
& 0xFF00000000000000ULL
)) {
406 if (!(x
& 0xF000000000000000ULL
)) {
410 if (!(x
& 0xC000000000000000ULL
)) {
414 if (!(x
& 0x8000000000000000ULL
)) {
423 static __attribute__((unused
))
424 unsigned int fls_u32(uint32_t x
)
430 if (!(x
& 0xFFFF0000U
)) {
434 if (!(x
& 0xFF000000U
)) {
438 if (!(x
& 0xF0000000U
)) {
442 if (!(x
& 0xC0000000U
)) {
446 if (!(x
& 0x80000000U
)) {
454 unsigned int fls_ulong(unsigned long x
)
456 #if (CAA_BITS_PER_lONG == 32)
464 * Return the minimum order for which x <= (1UL << order).
465 * Return -1 if x is 0.
467 int get_count_order_u32(uint32_t x
)
472 return fls_u32(x
- 1);
476 * Return the minimum order for which x <= (1UL << order).
477 * Return -1 if x is 0.
479 int get_count_order_ulong(unsigned long x
)
484 return fls_ulong(x
- 1);
488 #define poison_free(ptr) \
490 memset(ptr, 0x42, sizeof(*(ptr))); \
494 #define poison_free(ptr) free(ptr)
498 void cds_lfht_resize_lazy(struct cds_lfht
*ht
, unsigned long size
, int growth
);
501 * If the sched_getcpu() and sysconf(_SC_NPROCESSORS_CONF) calls are
502 * available, then we support hash table item accounting.
503 * In the unfortunate event the number of CPUs reported would be
504 * inaccurate, we use modulo arithmetic on the number of CPUs we got.
506 #if defined(HAVE_SYSCONF)
509 void cds_lfht_resize_lazy_count(struct cds_lfht
*ht
, unsigned long size
,
510 unsigned long count
);
512 static long nr_cpus_mask
= -1;
513 static long split_count_mask
= -1;
515 static void ht_init_nr_cpus_mask(void)
519 maxcpus
= sysconf(_SC_NPROCESSORS_CONF
);
525 * round up number of CPUs to next power of two, so we
526 * can use & for modulo.
528 maxcpus
= 1UL << get_count_order_ulong(maxcpus
);
529 nr_cpus_mask
= maxcpus
- 1;
533 struct ht_items_count
*alloc_split_items_count(void)
535 struct ht_items_count
*count
;
537 if (nr_cpus_mask
== -1) {
538 ht_init_nr_cpus_mask();
539 split_count_mask
= nr_cpus_mask
;
542 if (split_count_mask
< 0)
545 return calloc(split_count_mask
+ 1, sizeof(*count
));
549 void free_split_items_count(struct ht_items_count
*count
)
554 #if defined(HAVE_SCHED_GETCPU)
556 int ht_get_split_count_index(unsigned long hash
)
560 assert(split_count_mask
>= 0);
561 cpu
= sched_getcpu();
562 if (unlikely(cpu
< 0))
563 return hash
& split_count_mask
;
565 return cpu
& split_count_mask
;
567 #else /* #if defined(HAVE_SCHED_GETCPU) */
569 int ht_get_split_count_index(unsigned long hash
)
571 return hash
& split_count_mask
;
573 #endif /* #else #if defined(HAVE_SCHED_GETCPU) */
576 void ht_count_add(struct cds_lfht
*ht
, unsigned long size
, unsigned long hash
)
578 unsigned long split_count
;
581 if (unlikely(!ht
->split_count
))
583 index
= ht_get_split_count_index(hash
);
584 split_count
= uatomic_add_return(&ht
->split_count
[index
].add
, 1);
585 if (unlikely(!(split_count
& ((1UL << COUNT_COMMIT_ORDER
) - 1)))) {
588 dbg_printf("add split count %lu\n", split_count
);
589 count
= uatomic_add_return(&ht
->count
,
590 1UL << COUNT_COMMIT_ORDER
);
592 if (!(count
& (count
- 1))) {
593 if ((count
>> CHAIN_LEN_RESIZE_THRESHOLD
) < size
)
595 dbg_printf("add set global %ld\n", count
);
596 cds_lfht_resize_lazy_count(ht
, size
,
597 count
>> (CHAIN_LEN_TARGET
- 1));
603 void ht_count_del(struct cds_lfht
*ht
, unsigned long size
, unsigned long hash
)
605 unsigned long split_count
;
608 if (unlikely(!ht
->split_count
))
610 index
= ht_get_split_count_index(hash
);
611 split_count
= uatomic_add_return(&ht
->split_count
[index
].del
, 1);
612 if (unlikely(!(split_count
& ((1UL << COUNT_COMMIT_ORDER
) - 1)))) {
615 dbg_printf("del split count %lu\n", split_count
);
616 count
= uatomic_add_return(&ht
->count
,
617 -(1UL << COUNT_COMMIT_ORDER
));
619 if (!(count
& (count
- 1))) {
620 if ((count
>> CHAIN_LEN_RESIZE_THRESHOLD
) >= size
)
622 dbg_printf("del set global %ld\n", count
);
624 * Don't shrink table if the number of nodes is below a
627 if (count
< (1UL << COUNT_COMMIT_ORDER
) * (split_count_mask
+ 1))
629 cds_lfht_resize_lazy_count(ht
, size
,
630 count
>> (CHAIN_LEN_TARGET
- 1));
635 #else /* #if defined(HAVE_SYSCONF) */
637 static const long nr_cpus_mask
= -2;
638 static const long split_count_mask
= -2;
641 struct ht_items_count
*alloc_split_items_count(void)
647 void free_split_items_count(struct ht_items_count
*count
)
652 void ht_count_add(struct cds_lfht
*ht
, unsigned long size
, unsigned long hash
)
657 void ht_count_del(struct cds_lfht
*ht
, unsigned long size
, unsigned long hash
)
661 #endif /* #else #if defined(HAVE_SYSCONF) */
665 void check_resize(struct cds_lfht
*ht
, unsigned long size
, uint32_t chain_len
)
669 if (!(ht
->flags
& CDS_LFHT_AUTO_RESIZE
))
671 count
= uatomic_read(&ht
->count
);
673 * Use bucket-local length for small table expand and for
674 * environments lacking per-cpu data support.
676 if (count
>= (1UL << COUNT_COMMIT_ORDER
))
679 dbg_printf("WARNING: large chain length: %u.\n",
681 if (chain_len
>= CHAIN_LEN_RESIZE_THRESHOLD
)
682 cds_lfht_resize_lazy(ht
, size
,
683 get_count_order_u32(chain_len
- (CHAIN_LEN_TARGET
- 1)));
687 struct cds_lfht_node
*clear_flag(struct cds_lfht_node
*node
)
689 return (struct cds_lfht_node
*) (((unsigned long) node
) & ~FLAGS_MASK
);
693 int is_removed(struct cds_lfht_node
*node
)
695 return ((unsigned long) node
) & REMOVED_FLAG
;
699 struct cds_lfht_node
*flag_removed(struct cds_lfht_node
*node
)
701 return (struct cds_lfht_node
*) (((unsigned long) node
) | REMOVED_FLAG
);
705 int is_dummy(struct cds_lfht_node
*node
)
707 return ((unsigned long) node
) & DUMMY_FLAG
;
711 struct cds_lfht_node
*flag_dummy(struct cds_lfht_node
*node
)
713 return (struct cds_lfht_node
*) (((unsigned long) node
) | DUMMY_FLAG
);
717 struct cds_lfht_node
*get_end(void)
719 return (struct cds_lfht_node
*) END_VALUE
;
723 int is_end(struct cds_lfht_node
*node
)
725 return clear_flag(node
) == (struct cds_lfht_node
*) END_VALUE
;
729 unsigned long _uatomic_max(unsigned long *ptr
, unsigned long v
)
731 unsigned long old1
, old2
;
733 old1
= uatomic_read(ptr
);
738 } while ((old1
= uatomic_cmpxchg(ptr
, old2
, v
)) != old2
);
743 struct _cds_lfht_node
*lookup_bucket(struct cds_lfht
*ht
, unsigned long size
,
746 unsigned long index
, order
;
749 index
= hash
& (size
- 1);
751 if (index
< ht
->min_alloc_size
) {
752 dbg_printf("lookup hash %lu index %lu order 0 aridx 0\n",
754 return &ht
->t
.tbl
[0]->nodes
[index
];
757 * equivalent to get_count_order_ulong(index + 1), but optimizes
758 * away the non-existing 0 special-case for
759 * get_count_order_ulong.
761 order
= fls_ulong(index
);
762 dbg_printf("lookup hash %lu index %lu order %lu aridx %lu\n",
763 hash
, index
, order
, index
& ((1UL << (order
- 1)) - 1));
764 return &ht
->t
.tbl
[order
]->nodes
[index
& ((1UL << (order
- 1)) - 1)];
768 * Remove all logically deleted nodes from a bucket up to a certain node key.
771 void _cds_lfht_gc_bucket(struct cds_lfht_node
*dummy
, struct cds_lfht_node
*node
)
773 struct cds_lfht_node
*iter_prev
, *iter
, *next
, *new_next
;
775 assert(!is_dummy(dummy
));
776 assert(!is_removed(dummy
));
777 assert(!is_dummy(node
));
778 assert(!is_removed(node
));
781 /* We can always skip the dummy node initially */
782 iter
= rcu_dereference(iter_prev
->p
.next
);
783 assert(!is_removed(iter
));
784 assert(iter_prev
->p
.reverse_hash
<= node
->p
.reverse_hash
);
786 * We should never be called with dummy (start of chain)
787 * and logically removed node (end of path compression
788 * marker) being the actual same node. This would be a
789 * bug in the algorithm implementation.
791 assert(dummy
!= node
);
793 if (unlikely(is_end(iter
)))
795 if (likely(clear_flag(iter
)->p
.reverse_hash
> node
->p
.reverse_hash
))
797 next
= rcu_dereference(clear_flag(iter
)->p
.next
);
798 if (likely(is_removed(next
)))
800 iter_prev
= clear_flag(iter
);
803 assert(!is_removed(iter
));
805 new_next
= flag_dummy(clear_flag(next
));
807 new_next
= clear_flag(next
);
808 (void) uatomic_cmpxchg(&iter_prev
->p
.next
, iter
, new_next
);
814 int _cds_lfht_replace(struct cds_lfht
*ht
, unsigned long size
,
815 struct cds_lfht_node
*old_node
,
816 struct cds_lfht_node
*old_next
,
817 struct cds_lfht_node
*new_node
)
819 struct cds_lfht_node
*dummy
, *ret_next
;
820 struct _cds_lfht_node
*lookup
;
822 if (!old_node
) /* Return -ENOENT if asked to replace NULL node */
825 assert(!is_removed(old_node
));
826 assert(!is_dummy(old_node
));
827 assert(!is_removed(new_node
));
828 assert(!is_dummy(new_node
));
829 assert(new_node
!= old_node
);
831 /* Insert after node to be replaced */
832 if (is_removed(old_next
)) {
834 * Too late, the old node has been removed under us
835 * between lookup and replace. Fail.
839 assert(!is_dummy(old_next
));
840 assert(new_node
!= clear_flag(old_next
));
841 new_node
->p
.next
= clear_flag(old_next
);
843 * Here is the whole trick for lock-free replace: we add
844 * the replacement node _after_ the node we want to
845 * replace by atomically setting its next pointer at the
846 * same time we set its removal flag. Given that
847 * the lookups/get next use an iterator aware of the
848 * next pointer, they will either skip the old node due
849 * to the removal flag and see the new node, or use
850 * the old node, but will not see the new one.
852 ret_next
= uatomic_cmpxchg(&old_node
->p
.next
,
853 old_next
, flag_removed(new_node
));
854 if (ret_next
== old_next
)
855 break; /* We performed the replacement. */
860 * Ensure that the old node is not visible to readers anymore:
861 * lookup for the node, and remove it (along with any other
862 * logically removed node) if found.
864 lookup
= lookup_bucket(ht
, size
, bit_reverse_ulong(old_node
->p
.reverse_hash
));
865 dummy
= (struct cds_lfht_node
*) lookup
;
866 _cds_lfht_gc_bucket(dummy
, new_node
);
868 assert(is_removed(rcu_dereference(old_node
->p
.next
)));
873 * A non-NULL unique_ret pointer uses the "add unique" (or uniquify) add
874 * mode. A NULL unique_ret allows creation of duplicate keys.
877 void _cds_lfht_add(struct cds_lfht
*ht
,
879 struct cds_lfht_node
*node
,
880 struct cds_lfht_iter
*unique_ret
,
883 struct cds_lfht_node
*iter_prev
, *iter
, *next
, *new_node
, *new_next
,
885 struct _cds_lfht_node
*lookup
;
887 assert(!is_dummy(node
));
888 assert(!is_removed(node
));
889 lookup
= lookup_bucket(ht
, size
, bit_reverse_ulong(node
->p
.reverse_hash
));
891 uint32_t chain_len
= 0;
894 * iter_prev points to the non-removed node prior to the
897 iter_prev
= (struct cds_lfht_node
*) lookup
;
898 /* We can always skip the dummy node initially */
899 iter
= rcu_dereference(iter_prev
->p
.next
);
900 assert(iter_prev
->p
.reverse_hash
<= node
->p
.reverse_hash
);
902 if (unlikely(is_end(iter
)))
904 if (likely(clear_flag(iter
)->p
.reverse_hash
> node
->p
.reverse_hash
))
907 /* dummy node is the first node of the identical-hash-value chain */
908 if (dummy
&& clear_flag(iter
)->p
.reverse_hash
== node
->p
.reverse_hash
)
911 next
= rcu_dereference(clear_flag(iter
)->p
.next
);
912 if (unlikely(is_removed(next
)))
918 && clear_flag(iter
)->p
.reverse_hash
== node
->p
.reverse_hash
) {
919 struct cds_lfht_iter d_iter
= { .node
= node
, .next
= iter
, };
922 * uniquely adding inserts the node as the first
923 * node of the identical-hash-value node chain.
925 * This semantic ensures no duplicated keys
926 * should ever be observable in the table
927 * (including observe one node by one node
928 * by forward iterations)
930 cds_lfht_next_duplicate(ht
, &d_iter
);
934 *unique_ret
= d_iter
;
938 /* Only account for identical reverse hash once */
939 if (iter_prev
->p
.reverse_hash
!= clear_flag(iter
)->p
.reverse_hash
941 check_resize(ht
, size
, ++chain_len
);
942 iter_prev
= clear_flag(iter
);
947 assert(node
!= clear_flag(iter
));
948 assert(!is_removed(iter_prev
));
949 assert(!is_removed(iter
));
950 assert(iter_prev
!= node
);
952 node
->p
.next
= clear_flag(iter
);
954 node
->p
.next
= flag_dummy(clear_flag(iter
));
956 new_node
= flag_dummy(node
);
959 if (uatomic_cmpxchg(&iter_prev
->p
.next
, iter
,
961 continue; /* retry */
968 assert(!is_removed(iter
));
970 new_next
= flag_dummy(clear_flag(next
));
972 new_next
= clear_flag(next
);
973 (void) uatomic_cmpxchg(&iter_prev
->p
.next
, iter
, new_next
);
978 unique_ret
->node
= return_node
;
979 /* unique_ret->next left unset, never used. */
984 int _cds_lfht_del(struct cds_lfht
*ht
, unsigned long size
,
985 struct cds_lfht_node
*node
,
988 struct cds_lfht_node
*dummy
, *next
, *old
;
989 struct _cds_lfht_node
*lookup
;
991 if (!node
) /* Return -ENOENT if asked to delete NULL node */
994 /* logically delete the node */
995 assert(!is_dummy(node
));
996 assert(!is_removed(node
));
997 old
= rcu_dereference(node
->p
.next
);
999 struct cds_lfht_node
*new_next
;
1002 if (unlikely(is_removed(next
)))
1005 assert(is_dummy(next
));
1007 assert(!is_dummy(next
));
1008 new_next
= flag_removed(next
);
1009 old
= uatomic_cmpxchg(&node
->p
.next
, next
, new_next
);
1010 } while (old
!= next
);
1011 /* We performed the (logical) deletion. */
1014 * Ensure that the node is not visible to readers anymore: lookup for
1015 * the node, and remove it (along with any other logically removed node)
1018 lookup
= lookup_bucket(ht
, size
, bit_reverse_ulong(node
->p
.reverse_hash
));
1019 dummy
= (struct cds_lfht_node
*) lookup
;
1020 _cds_lfht_gc_bucket(dummy
, node
);
1022 assert(is_removed(rcu_dereference(node
->p
.next
)));
1027 void *partition_resize_thread(void *arg
)
1029 struct partition_resize_work
*work
= arg
;
1031 work
->ht
->cds_lfht_rcu_register_thread();
1032 work
->fct(work
->ht
, work
->i
, work
->start
, work
->len
);
1033 work
->ht
->cds_lfht_rcu_unregister_thread();
1038 void partition_resize_helper(struct cds_lfht
*ht
, unsigned long i
,
1040 void (*fct
)(struct cds_lfht
*ht
, unsigned long i
,
1041 unsigned long start
, unsigned long len
))
1043 unsigned long partition_len
;
1044 struct partition_resize_work
*work
;
1046 unsigned long nr_threads
;
1049 * Note: nr_cpus_mask + 1 is always power of 2.
1050 * We spawn just the number of threads we need to satisfy the minimum
1051 * partition size, up to the number of CPUs in the system.
1053 if (nr_cpus_mask
> 0) {
1054 nr_threads
= min(nr_cpus_mask
+ 1,
1055 len
>> MIN_PARTITION_PER_THREAD_ORDER
);
1059 partition_len
= len
>> get_count_order_ulong(nr_threads
);
1060 work
= calloc(nr_threads
, sizeof(*work
));
1062 for (thread
= 0; thread
< nr_threads
; thread
++) {
1063 work
[thread
].ht
= ht
;
1065 work
[thread
].len
= partition_len
;
1066 work
[thread
].start
= thread
* partition_len
;
1067 work
[thread
].fct
= fct
;
1068 ret
= pthread_create(&(work
[thread
].thread_id
), ht
->resize_attr
,
1069 partition_resize_thread
, &work
[thread
]);
1072 for (thread
= 0; thread
< nr_threads
; thread
++) {
1073 ret
= pthread_join(work
[thread
].thread_id
, NULL
);
1080 * Holding RCU read lock to protect _cds_lfht_add against memory
1081 * reclaim that could be performed by other call_rcu worker threads (ABA
1084 * When we reach a certain length, we can split this population phase over
1085 * many worker threads, based on the number of CPUs available in the system.
1086 * This should therefore take care of not having the expand lagging behind too
1087 * many concurrent insertion threads by using the scheduler's ability to
1088 * schedule dummy node population fairly with insertions.
1091 void init_table_populate_partition(struct cds_lfht
*ht
, unsigned long i
,
1092 unsigned long start
, unsigned long len
)
1096 assert(i
> ht
->min_alloc_order
);
1097 ht
->cds_lfht_rcu_read_lock();
1098 for (j
= start
; j
< start
+ len
; j
++) {
1099 struct cds_lfht_node
*new_node
=
1100 (struct cds_lfht_node
*) &ht
->t
.tbl
[i
]->nodes
[j
];
1102 dbg_printf("init populate: i %lu j %lu hash %lu\n",
1103 i
, j
, (1UL << (i
- 1)) + j
);
1104 new_node
->p
.reverse_hash
=
1105 bit_reverse_ulong((1UL << (i
- 1)) + j
);
1106 _cds_lfht_add(ht
, 1UL << (i
- 1),
1109 ht
->cds_lfht_rcu_read_unlock();
1113 void init_table_populate(struct cds_lfht
*ht
, unsigned long i
,
1116 assert(nr_cpus_mask
!= -1);
1117 if (nr_cpus_mask
< 0 || len
< 2 * MIN_PARTITION_PER_THREAD
) {
1118 ht
->cds_lfht_rcu_thread_online();
1119 init_table_populate_partition(ht
, i
, 0, len
);
1120 ht
->cds_lfht_rcu_thread_offline();
1123 partition_resize_helper(ht
, i
, len
, init_table_populate_partition
);
1127 void init_table(struct cds_lfht
*ht
,
1128 unsigned long first_order
, unsigned long last_order
)
1132 dbg_printf("init table: first_order %lu last_order %lu\n",
1133 first_order
, last_order
);
1134 assert(first_order
> ht
->min_alloc_order
);
1135 for (i
= first_order
; i
<= last_order
; i
++) {
1138 len
= 1UL << (i
- 1);
1139 dbg_printf("init order %lu len: %lu\n", i
, len
);
1141 /* Stop expand if the resize target changes under us */
1142 if (CMM_LOAD_SHARED(ht
->t
.resize_target
) < (1UL << i
))
1145 ht
->t
.tbl
[i
] = calloc(1, len
* sizeof(struct _cds_lfht_node
));
1146 assert(ht
->t
.tbl
[i
]);
1149 * Set all dummy nodes reverse hash values for a level and
1150 * link all dummy nodes into the table.
1152 init_table_populate(ht
, i
, len
);
1155 * Update table size.
1157 cmm_smp_wmb(); /* populate data before RCU size */
1158 CMM_STORE_SHARED(ht
->t
.size
, 1UL << i
);
1160 dbg_printf("init new size: %lu\n", 1UL << i
);
1161 if (CMM_LOAD_SHARED(ht
->in_progress_destroy
))
1167 * Holding RCU read lock to protect _cds_lfht_remove against memory
1168 * reclaim that could be performed by other call_rcu worker threads (ABA
1170 * For a single level, we logically remove and garbage collect each node.
1172 * As a design choice, we perform logical removal and garbage collection on a
1173 * node-per-node basis to simplify this algorithm. We also assume keeping good
1174 * cache locality of the operation would overweight possible performance gain
1175 * that could be achieved by batching garbage collection for multiple levels.
1176 * However, this would have to be justified by benchmarks.
1178 * Concurrent removal and add operations are helping us perform garbage
1179 * collection of logically removed nodes. We guarantee that all logically
1180 * removed nodes have been garbage-collected (unlinked) before call_rcu is
1181 * invoked to free a hole level of dummy nodes (after a grace period).
1183 * Logical removal and garbage collection can therefore be done in batch or on a
1184 * node-per-node basis, as long as the guarantee above holds.
1186 * When we reach a certain length, we can split this removal over many worker
1187 * threads, based on the number of CPUs available in the system. This should
1188 * take care of not letting resize process lag behind too many concurrent
1189 * updater threads actively inserting into the hash table.
1192 void remove_table_partition(struct cds_lfht
*ht
, unsigned long i
,
1193 unsigned long start
, unsigned long len
)
1197 assert(i
> ht
->min_alloc_order
);
1198 ht
->cds_lfht_rcu_read_lock();
1199 for (j
= start
; j
< start
+ len
; j
++) {
1200 struct cds_lfht_node
*fini_node
=
1201 (struct cds_lfht_node
*) &ht
->t
.tbl
[i
]->nodes
[j
];
1203 dbg_printf("remove entry: i %lu j %lu hash %lu\n",
1204 i
, j
, (1UL << (i
- 1)) + j
);
1205 fini_node
->p
.reverse_hash
=
1206 bit_reverse_ulong((1UL << (i
- 1)) + j
);
1207 (void) _cds_lfht_del(ht
, 1UL << (i
- 1), fini_node
, 1);
1209 ht
->cds_lfht_rcu_read_unlock();
1213 void remove_table(struct cds_lfht
*ht
, unsigned long i
, unsigned long len
)
1216 assert(nr_cpus_mask
!= -1);
1217 if (nr_cpus_mask
< 0 || len
< 2 * MIN_PARTITION_PER_THREAD
) {
1218 ht
->cds_lfht_rcu_thread_online();
1219 remove_table_partition(ht
, i
, 0, len
);
1220 ht
->cds_lfht_rcu_thread_offline();
1223 partition_resize_helper(ht
, i
, len
, remove_table_partition
);
1227 void fini_table(struct cds_lfht
*ht
,
1228 unsigned long first_order
, unsigned long last_order
)
1231 void *free_by_rcu
= NULL
;
1233 dbg_printf("fini table: first_order %lu last_order %lu\n",
1234 first_order
, last_order
);
1235 assert(first_order
> ht
->min_alloc_order
);
1236 for (i
= last_order
; i
>= first_order
; i
--) {
1239 len
= 1UL << (i
- 1);
1240 dbg_printf("fini order %lu len: %lu\n", i
, len
);
1242 /* Stop shrink if the resize target changes under us */
1243 if (CMM_LOAD_SHARED(ht
->t
.resize_target
) > (1UL << (i
- 1)))
1246 cmm_smp_wmb(); /* populate data before RCU size */
1247 CMM_STORE_SHARED(ht
->t
.size
, 1UL << (i
- 1));
1250 * We need to wait for all add operations to reach Q.S. (and
1251 * thus use the new table for lookups) before we can start
1252 * releasing the old dummy nodes. Otherwise their lookup will
1253 * return a logically removed node as insert position.
1255 ht
->cds_lfht_synchronize_rcu();
1260 * Set "removed" flag in dummy nodes about to be removed.
1261 * Unlink all now-logically-removed dummy node pointers.
1262 * Concurrent add/remove operation are helping us doing
1265 remove_table(ht
, i
, len
);
1267 free_by_rcu
= ht
->t
.tbl
[i
];
1269 dbg_printf("fini new size: %lu\n", 1UL << i
);
1270 if (CMM_LOAD_SHARED(ht
->in_progress_destroy
))
1275 ht
->cds_lfht_synchronize_rcu();
1281 void cds_lfht_create_dummy(struct cds_lfht
*ht
, unsigned long size
)
1283 struct _cds_lfht_node
*prev
, *node
;
1284 unsigned long order
, len
, i
, j
;
1286 ht
->t
.tbl
[0] = calloc(1, ht
->min_alloc_size
* sizeof(struct _cds_lfht_node
));
1287 assert(ht
->t
.tbl
[0]);
1289 dbg_printf("create dummy: order %lu index %lu hash %lu\n", 0, 0, 0);
1290 ht
->t
.tbl
[0]->nodes
[0].next
= flag_dummy(get_end());
1291 ht
->t
.tbl
[0]->nodes
[0].reverse_hash
= 0;
1293 for (order
= 1; order
< get_count_order_ulong(size
) + 1; order
++) {
1294 len
= 1UL << (order
- 1);
1295 if (order
<= ht
->min_alloc_order
) {
1296 ht
->t
.tbl
[order
] = (struct rcu_level
*) (ht
->t
.tbl
[0]->nodes
+ len
);
1298 ht
->t
.tbl
[order
] = calloc(1, len
* sizeof(struct _cds_lfht_node
));
1299 assert(ht
->t
.tbl
[order
]);
1303 prev
= ht
->t
.tbl
[i
]->nodes
;
1304 for (j
= 0; j
< len
; j
++) {
1305 if (j
& (j
- 1)) { /* Between power of 2 */
1307 } else if (j
) { /* At each power of 2 */
1309 prev
= ht
->t
.tbl
[i
]->nodes
;
1312 node
= &ht
->t
.tbl
[order
]->nodes
[j
];
1313 dbg_printf("create dummy: order %lu index %lu hash %lu\n",
1315 node
->next
= prev
->next
;
1316 assert(is_dummy(node
->next
));
1317 node
->reverse_hash
= bit_reverse_ulong(j
+ len
);
1318 prev
->next
= flag_dummy((struct cds_lfht_node
*)node
);
1323 struct cds_lfht
*_cds_lfht_new(cds_lfht_hash_fct hash_fct
,
1324 cds_lfht_compare_fct compare_fct
,
1325 unsigned long hash_seed
,
1326 unsigned long init_size
,
1327 unsigned long min_alloc_size
,
1329 void (*cds_lfht_call_rcu
)(struct rcu_head
*head
,
1330 void (*func
)(struct rcu_head
*head
)),
1331 void (*cds_lfht_synchronize_rcu
)(void),
1332 void (*cds_lfht_rcu_read_lock
)(void),
1333 void (*cds_lfht_rcu_read_unlock
)(void),
1334 void (*cds_lfht_rcu_thread_offline
)(void),
1335 void (*cds_lfht_rcu_thread_online
)(void),
1336 void (*cds_lfht_rcu_register_thread
)(void),
1337 void (*cds_lfht_rcu_unregister_thread
)(void),
1338 pthread_attr_t
*attr
)
1340 struct cds_lfht
*ht
;
1341 unsigned long order
;
1343 /* min_alloc_size must be power of two */
1344 if (!min_alloc_size
|| (min_alloc_size
& (min_alloc_size
- 1)))
1346 /* init_size must be power of two */
1347 if (!init_size
|| (init_size
& (init_size
- 1)))
1349 min_alloc_size
= max(min_alloc_size
, MIN_TABLE_SIZE
);
1350 init_size
= max(init_size
, min_alloc_size
);
1351 ht
= calloc(1, sizeof(struct cds_lfht
));
1353 ht
->hash_fct
= hash_fct
;
1354 ht
->compare_fct
= compare_fct
;
1355 ht
->hash_seed
= hash_seed
;
1356 ht
->cds_lfht_call_rcu
= cds_lfht_call_rcu
;
1357 ht
->cds_lfht_synchronize_rcu
= cds_lfht_synchronize_rcu
;
1358 ht
->cds_lfht_rcu_read_lock
= cds_lfht_rcu_read_lock
;
1359 ht
->cds_lfht_rcu_read_unlock
= cds_lfht_rcu_read_unlock
;
1360 ht
->cds_lfht_rcu_thread_offline
= cds_lfht_rcu_thread_offline
;
1361 ht
->cds_lfht_rcu_thread_online
= cds_lfht_rcu_thread_online
;
1362 ht
->cds_lfht_rcu_register_thread
= cds_lfht_rcu_register_thread
;
1363 ht
->cds_lfht_rcu_unregister_thread
= cds_lfht_rcu_unregister_thread
;
1364 ht
->resize_attr
= attr
;
1365 ht
->split_count
= alloc_split_items_count();
1366 /* this mutex should not nest in read-side C.S. */
1367 pthread_mutex_init(&ht
->resize_mutex
, NULL
);
1369 order
= get_count_order_ulong(init_size
);
1370 ht
->t
.resize_target
= 1UL << order
;
1371 cds_lfht_create_dummy(ht
, 1UL << order
);
1372 ht
->t
.size
= 1UL << order
;
1373 ht
->min_alloc_size
= min_alloc_size
;
1374 ht
->min_alloc_order
= get_count_order_ulong(min_alloc_size
);
1378 void cds_lfht_lookup(struct cds_lfht
*ht
, void *key
, size_t key_len
,
1379 struct cds_lfht_iter
*iter
)
1381 struct cds_lfht_node
*node
, *next
, *dummy_node
;
1382 struct _cds_lfht_node
*lookup
;
1383 unsigned long hash
, reverse_hash
, size
;
1385 hash
= ht
->hash_fct(key
, key_len
, ht
->hash_seed
);
1386 reverse_hash
= bit_reverse_ulong(hash
);
1388 size
= rcu_dereference(ht
->t
.size
);
1389 lookup
= lookup_bucket(ht
, size
, hash
);
1390 dummy_node
= (struct cds_lfht_node
*) lookup
;
1391 /* We can always skip the dummy node initially */
1392 node
= rcu_dereference(dummy_node
->p
.next
);
1393 node
= clear_flag(node
);
1395 if (unlikely(is_end(node
))) {
1399 if (unlikely(node
->p
.reverse_hash
> reverse_hash
)) {
1403 next
= rcu_dereference(node
->p
.next
);
1404 if (likely(!is_removed(next
))
1406 && clear_flag(node
)->p
.reverse_hash
== reverse_hash
1407 && likely(!ht
->compare_fct(node
->key
, node
->key_len
, key
, key_len
))) {
1410 node
= clear_flag(next
);
1412 assert(!node
|| !is_dummy(rcu_dereference(node
->p
.next
)));
1417 void cds_lfht_next_duplicate(struct cds_lfht
*ht
, struct cds_lfht_iter
*iter
)
1419 struct cds_lfht_node
*node
, *next
;
1420 unsigned long reverse_hash
;
1425 reverse_hash
= node
->p
.reverse_hash
;
1427 key_len
= node
->key_len
;
1429 node
= clear_flag(next
);
1432 if (unlikely(is_end(node
))) {
1436 if (unlikely(node
->p
.reverse_hash
> reverse_hash
)) {
1440 next
= rcu_dereference(node
->p
.next
);
1441 if (likely(!is_removed(next
))
1443 && likely(!ht
->compare_fct(node
->key
, node
->key_len
, key
, key_len
))) {
1446 node
= clear_flag(next
);
1448 assert(!node
|| !is_dummy(rcu_dereference(node
->p
.next
)));
1453 void cds_lfht_next(struct cds_lfht
*ht
, struct cds_lfht_iter
*iter
)
1455 struct cds_lfht_node
*node
, *next
;
1457 node
= clear_flag(iter
->next
);
1459 if (unlikely(is_end(node
))) {
1463 next
= rcu_dereference(node
->p
.next
);
1464 if (likely(!is_removed(next
))
1465 && !is_dummy(next
)) {
1468 node
= clear_flag(next
);
1470 assert(!node
|| !is_dummy(rcu_dereference(node
->p
.next
)));
1475 void cds_lfht_first(struct cds_lfht
*ht
, struct cds_lfht_iter
*iter
)
1477 struct _cds_lfht_node
*lookup
;
1480 * Get next after first dummy node. The first dummy node is the
1481 * first node of the linked list.
1483 lookup
= &ht
->t
.tbl
[0]->nodes
[0];
1484 iter
->next
= lookup
->next
;
1485 cds_lfht_next(ht
, iter
);
1488 void cds_lfht_add(struct cds_lfht
*ht
, struct cds_lfht_node
*node
)
1490 unsigned long hash
, size
;
1492 hash
= ht
->hash_fct(node
->key
, node
->key_len
, ht
->hash_seed
);
1493 node
->p
.reverse_hash
= bit_reverse_ulong((unsigned long) hash
);
1495 size
= rcu_dereference(ht
->t
.size
);
1496 _cds_lfht_add(ht
, size
, node
, NULL
, 0);
1497 ht_count_add(ht
, size
, hash
);
1500 struct cds_lfht_node
*cds_lfht_add_unique(struct cds_lfht
*ht
,
1501 struct cds_lfht_node
*node
)
1503 unsigned long hash
, size
;
1504 struct cds_lfht_iter iter
;
1506 hash
= ht
->hash_fct(node
->key
, node
->key_len
, ht
->hash_seed
);
1507 node
->p
.reverse_hash
= bit_reverse_ulong((unsigned long) hash
);
1509 size
= rcu_dereference(ht
->t
.size
);
1510 _cds_lfht_add(ht
, size
, node
, &iter
, 0);
1511 if (iter
.node
== node
)
1512 ht_count_add(ht
, size
, hash
);
1516 struct cds_lfht_node
*cds_lfht_add_replace(struct cds_lfht
*ht
,
1517 struct cds_lfht_node
*node
)
1519 unsigned long hash
, size
;
1520 struct cds_lfht_iter iter
;
1522 hash
= ht
->hash_fct(node
->key
, node
->key_len
, ht
->hash_seed
);
1523 node
->p
.reverse_hash
= bit_reverse_ulong((unsigned long) hash
);
1525 size
= rcu_dereference(ht
->t
.size
);
1527 _cds_lfht_add(ht
, size
, node
, &iter
, 0);
1528 if (iter
.node
== node
) {
1529 ht_count_add(ht
, size
, hash
);
1533 if (!_cds_lfht_replace(ht
, size
, iter
.node
, iter
.next
, node
))
1538 int cds_lfht_replace(struct cds_lfht
*ht
, struct cds_lfht_iter
*old_iter
,
1539 struct cds_lfht_node
*new_node
)
1543 size
= rcu_dereference(ht
->t
.size
);
1544 return _cds_lfht_replace(ht
, size
, old_iter
->node
, old_iter
->next
,
1548 int cds_lfht_del(struct cds_lfht
*ht
, struct cds_lfht_iter
*iter
)
1550 unsigned long size
, hash
;
1553 size
= rcu_dereference(ht
->t
.size
);
1554 ret
= _cds_lfht_del(ht
, size
, iter
->node
, 0);
1556 hash
= bit_reverse_ulong(iter
->node
->p
.reverse_hash
);
1557 ht_count_del(ht
, size
, hash
);
1563 int cds_lfht_delete_dummy(struct cds_lfht
*ht
)
1565 struct cds_lfht_node
*node
;
1566 struct _cds_lfht_node
*lookup
;
1567 unsigned long order
, i
, size
;
1569 /* Check that the table is empty */
1570 lookup
= &ht
->t
.tbl
[0]->nodes
[0];
1571 node
= (struct cds_lfht_node
*) lookup
;
1573 node
= clear_flag(node
)->p
.next
;
1574 if (!is_dummy(node
))
1576 assert(!is_removed(node
));
1577 } while (!is_end(node
));
1579 * size accessed without rcu_dereference because hash table is
1583 /* Internal sanity check: all nodes left should be dummy */
1584 for (order
= 0; order
< get_count_order_ulong(size
) + 1; order
++) {
1587 len
= !order
? 1 : 1UL << (order
- 1);
1588 for (i
= 0; i
< len
; i
++) {
1589 dbg_printf("delete order %lu i %lu hash %lu\n",
1591 bit_reverse_ulong(ht
->t
.tbl
[order
]->nodes
[i
].reverse_hash
));
1592 assert(is_dummy(ht
->t
.tbl
[order
]->nodes
[i
].next
));
1595 if (order
== ht
->min_alloc_order
)
1596 poison_free(ht
->t
.tbl
[0]);
1597 else if (order
> ht
->min_alloc_order
)
1598 poison_free(ht
->t
.tbl
[order
]);
1599 /* Nothing to delete for order < ht->min_alloc_order */
1605 * Should only be called when no more concurrent readers nor writers can
1606 * possibly access the table.
1608 int cds_lfht_destroy(struct cds_lfht
*ht
, pthread_attr_t
**attr
)
1612 /* Wait for in-flight resize operations to complete */
1613 _CMM_STORE_SHARED(ht
->in_progress_destroy
, 1);
1614 cmm_smp_mb(); /* Store destroy before load resize */
1615 while (uatomic_read(&ht
->in_progress_resize
))
1616 poll(NULL
, 0, 100); /* wait for 100ms */
1617 ret
= cds_lfht_delete_dummy(ht
);
1620 free_split_items_count(ht
->split_count
);
1622 *attr
= ht
->resize_attr
;
1627 void cds_lfht_count_nodes(struct cds_lfht
*ht
,
1628 long *approx_before
,
1629 unsigned long *count
,
1630 unsigned long *removed
,
1633 struct cds_lfht_node
*node
, *next
;
1634 struct _cds_lfht_node
*lookup
;
1635 unsigned long nr_dummy
= 0;
1638 if (split_count_mask
>= 0) {
1641 for (i
= 0; i
< split_count_mask
+ 1; i
++) {
1642 *approx_before
+= uatomic_read(&ht
->split_count
[i
].add
);
1643 *approx_before
-= uatomic_read(&ht
->split_count
[i
].del
);
1650 /* Count non-dummy nodes in the table */
1651 lookup
= &ht
->t
.tbl
[0]->nodes
[0];
1652 node
= (struct cds_lfht_node
*) lookup
;
1654 next
= rcu_dereference(node
->p
.next
);
1655 if (is_removed(next
)) {
1656 if (!is_dummy(next
))
1660 } else if (!is_dummy(next
))
1664 node
= clear_flag(next
);
1665 } while (!is_end(node
));
1666 dbg_printf("number of dummy nodes: %lu\n", nr_dummy
);
1668 if (split_count_mask
>= 0) {
1671 for (i
= 0; i
< split_count_mask
+ 1; i
++) {
1672 *approx_after
+= uatomic_read(&ht
->split_count
[i
].add
);
1673 *approx_after
-= uatomic_read(&ht
->split_count
[i
].del
);
1678 /* called with resize mutex held */
1680 void _do_cds_lfht_grow(struct cds_lfht
*ht
,
1681 unsigned long old_size
, unsigned long new_size
)
1683 unsigned long old_order
, new_order
;
1685 old_order
= get_count_order_ulong(old_size
);
1686 new_order
= get_count_order_ulong(new_size
);
1687 dbg_printf("resize from %lu (order %lu) to %lu (order %lu) buckets\n",
1688 old_size
, old_order
, new_size
, new_order
);
1689 assert(new_size
> old_size
);
1690 init_table(ht
, old_order
+ 1, new_order
);
1693 /* called with resize mutex held */
1695 void _do_cds_lfht_shrink(struct cds_lfht
*ht
,
1696 unsigned long old_size
, unsigned long new_size
)
1698 unsigned long old_order
, new_order
;
1700 new_size
= max(new_size
, ht
->min_alloc_size
);
1701 old_order
= get_count_order_ulong(old_size
);
1702 new_order
= get_count_order_ulong(new_size
);
1703 dbg_printf("resize from %lu (order %lu) to %lu (order %lu) buckets\n",
1704 old_size
, old_order
, new_size
, new_order
);
1705 assert(new_size
< old_size
);
1707 /* Remove and unlink all dummy nodes to remove. */
1708 fini_table(ht
, new_order
+ 1, old_order
);
1712 /* called with resize mutex held */
1714 void _do_cds_lfht_resize(struct cds_lfht
*ht
)
1716 unsigned long new_size
, old_size
;
1719 * Resize table, re-do if the target size has changed under us.
1722 assert(uatomic_read(&ht
->in_progress_resize
));
1723 if (CMM_LOAD_SHARED(ht
->in_progress_destroy
))
1725 ht
->t
.resize_initiated
= 1;
1726 old_size
= ht
->t
.size
;
1727 new_size
= CMM_LOAD_SHARED(ht
->t
.resize_target
);
1728 if (old_size
< new_size
)
1729 _do_cds_lfht_grow(ht
, old_size
, new_size
);
1730 else if (old_size
> new_size
)
1731 _do_cds_lfht_shrink(ht
, old_size
, new_size
);
1732 ht
->t
.resize_initiated
= 0;
1733 /* write resize_initiated before read resize_target */
1735 } while (ht
->t
.size
!= CMM_LOAD_SHARED(ht
->t
.resize_target
));
1739 unsigned long resize_target_update(struct cds_lfht
*ht
, unsigned long size
,
1742 return _uatomic_max(&ht
->t
.resize_target
,
1743 size
<< growth_order
);
1747 void resize_target_update_count(struct cds_lfht
*ht
,
1748 unsigned long count
)
1750 count
= max(count
, ht
->min_alloc_size
);
1751 uatomic_set(&ht
->t
.resize_target
, count
);
1754 void cds_lfht_resize(struct cds_lfht
*ht
, unsigned long new_size
)
1756 resize_target_update_count(ht
, new_size
);
1757 CMM_STORE_SHARED(ht
->t
.resize_initiated
, 1);
1758 ht
->cds_lfht_rcu_thread_offline();
1759 pthread_mutex_lock(&ht
->resize_mutex
);
1760 _do_cds_lfht_resize(ht
);
1761 pthread_mutex_unlock(&ht
->resize_mutex
);
1762 ht
->cds_lfht_rcu_thread_online();
1766 void do_resize_cb(struct rcu_head
*head
)
1768 struct rcu_resize_work
*work
=
1769 caa_container_of(head
, struct rcu_resize_work
, head
);
1770 struct cds_lfht
*ht
= work
->ht
;
1772 ht
->cds_lfht_rcu_thread_offline();
1773 pthread_mutex_lock(&ht
->resize_mutex
);
1774 _do_cds_lfht_resize(ht
);
1775 pthread_mutex_unlock(&ht
->resize_mutex
);
1776 ht
->cds_lfht_rcu_thread_online();
1778 cmm_smp_mb(); /* finish resize before decrement */
1779 uatomic_dec(&ht
->in_progress_resize
);
1783 void cds_lfht_resize_lazy(struct cds_lfht
*ht
, unsigned long size
, int growth
)
1785 struct rcu_resize_work
*work
;
1786 unsigned long target_size
;
1788 target_size
= resize_target_update(ht
, size
, growth
);
1789 /* Store resize_target before read resize_initiated */
1791 if (!CMM_LOAD_SHARED(ht
->t
.resize_initiated
) && size
< target_size
) {
1792 uatomic_inc(&ht
->in_progress_resize
);
1793 cmm_smp_mb(); /* increment resize count before load destroy */
1794 if (CMM_LOAD_SHARED(ht
->in_progress_destroy
)) {
1795 uatomic_dec(&ht
->in_progress_resize
);
1798 work
= malloc(sizeof(*work
));
1800 ht
->cds_lfht_call_rcu(&work
->head
, do_resize_cb
);
1801 CMM_STORE_SHARED(ht
->t
.resize_initiated
, 1);
1805 #if defined(HAVE_SYSCONF)
1808 void cds_lfht_resize_lazy_count(struct cds_lfht
*ht
, unsigned long size
,
1809 unsigned long count
)
1811 struct rcu_resize_work
*work
;
1813 if (!(ht
->flags
& CDS_LFHT_AUTO_RESIZE
))
1815 resize_target_update_count(ht
, count
);
1816 /* Store resize_target before read resize_initiated */
1818 if (!CMM_LOAD_SHARED(ht
->t
.resize_initiated
)) {
1819 uatomic_inc(&ht
->in_progress_resize
);
1820 cmm_smp_mb(); /* increment resize count before load destroy */
1821 if (CMM_LOAD_SHARED(ht
->in_progress_destroy
)) {
1822 uatomic_dec(&ht
->in_progress_resize
);
1825 work
= malloc(sizeof(*work
));
1827 ht
->cds_lfht_call_rcu(&work
->head
, do_resize_cb
);
1828 CMM_STORE_SHARED(ht
->t
.resize_initiated
, 1);