4 * Userspace RCU library - Lock-Free Resizable RCU Hash Table
6 * Copyright 2010-2011 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 * Copyright 2011 - Lai Jiangshan <laijs@cn.fujitsu.com>
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with this library; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
25 * Based on the following articles:
26 * - Ori Shalev and Nir Shavit. Split-ordered lists: Lock-free
27 * extensible hash tables. J. ACM 53, 3 (May 2006), 379-405.
28 * - Michael, M. M. High performance dynamic lock-free hash tables
29 * and list-based sets. In Proceedings of the fourteenth annual ACM
30 * symposium on Parallel algorithms and architectures, ACM Press,
33 * Some specificities of this Lock-Free Resizable RCU Hash Table
36 * - RCU read-side critical section allows readers to perform hash
37 * table lookups and use the returned objects safely by delaying
38 * memory reclaim of a grace period.
39 * - Add and remove operations are lock-free, and do not need to
40 * allocate memory. They need to be executed within RCU read-side
41 * critical section to ensure the objects they read are valid and to
42 * deal with the cmpxchg ABA problem.
43 * - add and add_unique operations are supported. add_unique checks if
44 * the node key already exists in the hash table. It ensures no key
46 * - The resize operation executes concurrently with add/remove/lookup.
47 * - Hash table nodes are contained within a split-ordered list. This
48 * list is ordered by incrementing reversed-bits-hash value.
49 * - An index of bucket nodes is kept. These bucket nodes are the hash
50 * table "buckets", and they are also chained together in the
51 * split-ordered list, which allows recursive expansion.
52 * - The resize operation for small tables only allows expanding the hash table.
53 * It is triggered automatically by detecting long chains in the add
55 * - The resize operation for larger tables (and available through an
56 * API) allows both expanding and shrinking the hash table.
57 * - Split-counters are used to keep track of the number of
58 * nodes within the hash table for automatic resize triggering.
59 * - Resize operation initiated by long chain detection is executed by a
60 * call_rcu thread, which keeps lock-freedom of add and remove.
61 * - Resize operations are protected by a mutex.
62 * - The removal operation is split in two parts: first, a "removed"
63 * flag is set in the next pointer within the node to remove. Then,
64 * a "garbage collection" is performed in the bucket containing the
65 * removed node (from the start of the bucket up to the removed node).
66 * All encountered nodes with "removed" flag set in their next
67 * pointers are removed from the linked-list. If the cmpxchg used for
68 * removal fails (due to concurrent garbage-collection or concurrent
69 * add), we retry from the beginning of the bucket. This ensures that
70 * the node with "removed" flag set is removed from the hash table
71 * (not visible to lookups anymore) before the RCU read-side critical
72 * section held across removal ends. Furthermore, this ensures that
73 * the node with "removed" flag set is removed from the linked-list
74 * before its memory is reclaimed. After setting the "removal" flag,
75 * only the thread which removal is the first to set the "removal
76 * owner" flag (with an xchg) into a node's next pointer is considered
77 * to have succeeded its removal (and thus owns the node to reclaim).
78 * Because we garbage-collect starting from an invariant node (the
79 * start-of-bucket bucket node) up to the "removed" node (or find a
80 * reverse-hash that is higher), we are sure that a successful
81 * traversal of the chain leads to a chain that is present in the
82 * linked-list (the start node is never removed) and that is does not
83 * contain the "removed" node anymore, even if concurrent delete/add
84 * operations are changing the structure of the list concurrently.
85 * - The add operation performs garbage collection of buckets if it
86 * encounters nodes with removed flag set in the bucket where it wants
87 * to add its new node. This ensures lock-freedom of add operation by
88 * helping the remover unlink nodes from the list rather than to wait
90 * - A RCU "order table" indexed by log2(hash index) is copied and
91 * expanded by the resize operation. This order table allows finding
92 * the "bucket node" tables.
93 * - There is one bucket node table per hash index order. The size of
94 * each bucket node table is half the number of hashes contained in
95 * this order (except for order 0).
96 * - synchronzie_rcu is used to garbage-collect the old bucket node table.
97 * - The per-order bucket node tables contain a compact version of the
98 * hash table nodes. These tables are invariant after they are
99 * populated into the hash table.
101 * Linearizability Guarantees:
103 * To discuss these guarantees, we first define "read" operations as any
104 * of the following operations surrounded by an RCU read-side lock/unlock
107 * - cds_lfht_lookup followed by iteration with cds_lfht_next_duplicate
108 * - cds_lfht_first followed iteration with cds_lfht_next
110 * We define "write" operations as any of cds_lfht_add,
111 * cds_lfht_add_unique, cds_lfht_add_replace, cds_lfht_del.
113 * The following guarantees are offered by this hash table:
115 * A) "read" after "write" will always return the result of the latest
117 * B) "write" after "read" will never be returned by the read.
118 * C) It is guaranteed that after a grace period following a "del" and
119 * "replace" operation, no reference to the removed items exists in
121 * D) Uniqueness guarantee: when using add_unique and/or add_replace to
122 * insert nodes into the table, if there was previously one node or
123 * less with the same key being inserted by one or more concurrent
124 * add_unique and/or add_replace, all concurrent "read" performed on
125 * the hash table are guaranteed to find one, and only one node with
128 * Bucket node tables:
130 * hash table hash table the last all bucket node tables
131 * order size bucket node 0 1 2 3 4 5 6(index)
138 * 5 32 16 1 1 2 4 8 16
139 * 6 64 32 1 1 2 4 8 16 32
141 * When growing/shrinking, we only focus on the last bucket node table
142 * which size is (!order ? 1 : (1 << (order -1))).
144 * Example for growing/shrinking:
145 * grow hash table from order 5 to 6: init the index=6 bucket node table
146 * shrink hash table from order 6 to 5: fini the index=6 bucket node table
148 * A bit of ascii art explanation:
150 * Order index is the off-by-one compare to the actual power of 2 because
151 * we use index 0 to deal with the 0 special-case.
153 * This shows the nodes for a small table ordered by reversed bits:
165 * This shows the nodes in order of non-reversed bits, linked by
166 * reversed-bit order.
171 * 2 | | 2 010 010 <- |
172 * | | | 3 011 110 | <- |
173 * 3 -> | | | 4 100 001 | |
191 #include <urcu-call-rcu.h>
192 #include <urcu-flavor.h>
193 #include <urcu/arch.h>
194 #include <urcu/uatomic.h>
195 #include <urcu/compiler.h>
196 #include <urcu/rculfhash.h>
197 #include <rculfhash-internal.h>
202 * Split-counters lazily update the global counter each 1024
203 * addition/removal. It automatically keeps track of resize required.
204 * We use the bucket length as indicator for need to expand for small
205 * tables and machines lacking per-cpu data suppport.
207 #define COUNT_COMMIT_ORDER 10
208 #define DEFAULT_SPLIT_COUNT_MASK 0xFUL
209 #define CHAIN_LEN_TARGET 1
210 #define CHAIN_LEN_RESIZE_THRESHOLD 3
213 * Define the minimum table size.
215 #define MIN_TABLE_ORDER 0
216 #define MIN_TABLE_SIZE (1UL << MIN_TABLE_ORDER)
219 * Minimum number of bucket nodes to touch per thread to parallelize grow/shrink.
221 #define MIN_PARTITION_PER_THREAD_ORDER 12
222 #define MIN_PARTITION_PER_THREAD (1UL << MIN_PARTITION_PER_THREAD_ORDER)
225 * The removed flag needs to be updated atomically with the pointer.
226 * It indicates that no node must attach to the node scheduled for
227 * removal, and that node garbage collection must be performed.
228 * The bucket flag does not require to be updated atomically with the
229 * pointer, but it is added as a pointer low bit flag to save space.
231 #define REMOVED_FLAG (1UL << 0)
232 #define BUCKET_FLAG (1UL << 1)
233 #define REMOVAL_OWNER_FLAG (1UL << 2)
234 #define FLAGS_MASK ((1UL << 3) - 1)
236 /* Value of the end pointer. Should not interact with flags. */
237 #define END_VALUE NULL
240 * ht_items_count: Split-counters counting the number of node addition
241 * and removal in the table. Only used if the CDS_LFHT_ACCOUNTING flag
242 * is set at hash table creation.
244 * These are free-running counters, never reset to zero. They count the
245 * number of add/remove, and trigger every (1 << COUNT_COMMIT_ORDER)
246 * operations to update the global counter. We choose a power-of-2 value
247 * for the trigger to deal with 32 or 64-bit overflow of the counter.
249 struct ht_items_count
{
250 unsigned long add
, del
;
251 } __attribute__((aligned(CAA_CACHE_LINE_SIZE
)));
254 * rcu_resize_work: Contains arguments passed to RCU worker thread
255 * responsible for performing lazy resize.
257 struct rcu_resize_work
{
258 struct rcu_head head
;
263 * partition_resize_work: Contains arguments passed to worker threads
264 * executing the hash table resize on partitions of the hash table
265 * assigned to each processor's worker thread.
267 struct partition_resize_work
{
270 unsigned long i
, start
, len
;
271 void (*fct
)(struct cds_lfht
*ht
, unsigned long i
,
272 unsigned long start
, unsigned long len
);
276 * Algorithm to reverse bits in a word by lookup table, extended to
279 * http://graphics.stanford.edu/~seander/bithacks.html#BitReverseTable
280 * Originally from Public Domain.
283 static const uint8_t BitReverseTable256
[256] =
285 #define R2(n) (n), (n) + 2*64, (n) + 1*64, (n) + 3*64
286 #define R4(n) R2(n), R2((n) + 2*16), R2((n) + 1*16), R2((n) + 3*16)
287 #define R6(n) R4(n), R4((n) + 2*4 ), R4((n) + 1*4 ), R4((n) + 3*4 )
288 R6(0), R6(2), R6(1), R6(3)
295 uint8_t bit_reverse_u8(uint8_t v
)
297 return BitReverseTable256
[v
];
300 static __attribute__((unused
))
301 uint32_t bit_reverse_u32(uint32_t v
)
303 return ((uint32_t) bit_reverse_u8(v
) << 24) |
304 ((uint32_t) bit_reverse_u8(v
>> 8) << 16) |
305 ((uint32_t) bit_reverse_u8(v
>> 16) << 8) |
306 ((uint32_t) bit_reverse_u8(v
>> 24));
309 static __attribute__((unused
))
310 uint64_t bit_reverse_u64(uint64_t v
)
312 return ((uint64_t) bit_reverse_u8(v
) << 56) |
313 ((uint64_t) bit_reverse_u8(v
>> 8) << 48) |
314 ((uint64_t) bit_reverse_u8(v
>> 16) << 40) |
315 ((uint64_t) bit_reverse_u8(v
>> 24) << 32) |
316 ((uint64_t) bit_reverse_u8(v
>> 32) << 24) |
317 ((uint64_t) bit_reverse_u8(v
>> 40) << 16) |
318 ((uint64_t) bit_reverse_u8(v
>> 48) << 8) |
319 ((uint64_t) bit_reverse_u8(v
>> 56));
323 unsigned long bit_reverse_ulong(unsigned long v
)
325 #if (CAA_BITS_PER_LONG == 32)
326 return bit_reverse_u32(v
);
328 return bit_reverse_u64(v
);
333 * fls: returns the position of the most significant bit.
334 * Returns 0 if no bit is set, else returns the position of the most
335 * significant bit (from 1 to 32 on 32-bit, from 1 to 64 on 64-bit).
337 #if defined(__i386) || defined(__x86_64)
339 unsigned int fls_u32(uint32_t x
)
347 : "=r" (r
) : "rm" (x
));
353 #if defined(__x86_64)
355 unsigned int fls_u64(uint64_t x
)
363 : "=r" (r
) : "rm" (x
));
370 static __attribute__((unused
))
371 unsigned int fls_u64(uint64_t x
)
378 if (!(x
& 0xFFFFFFFF00000000ULL
)) {
382 if (!(x
& 0xFFFF000000000000ULL
)) {
386 if (!(x
& 0xFF00000000000000ULL
)) {
390 if (!(x
& 0xF000000000000000ULL
)) {
394 if (!(x
& 0xC000000000000000ULL
)) {
398 if (!(x
& 0x8000000000000000ULL
)) {
407 static __attribute__((unused
))
408 unsigned int fls_u32(uint32_t x
)
414 if (!(x
& 0xFFFF0000U
)) {
418 if (!(x
& 0xFF000000U
)) {
422 if (!(x
& 0xF0000000U
)) {
426 if (!(x
& 0xC0000000U
)) {
430 if (!(x
& 0x80000000U
)) {
438 unsigned int cds_lfht_fls_ulong(unsigned long x
)
440 #if (CAA_BITS_PER_LONG == 32)
448 * Return the minimum order for which x <= (1UL << order).
449 * Return -1 if x is 0.
451 int cds_lfht_get_count_order_u32(uint32_t x
)
456 return fls_u32(x
- 1);
460 * Return the minimum order for which x <= (1UL << order).
461 * Return -1 if x is 0.
463 int cds_lfht_get_count_order_ulong(unsigned long x
)
468 return cds_lfht_fls_ulong(x
- 1);
472 void cds_lfht_resize_lazy_grow(struct cds_lfht
*ht
, unsigned long size
, int growth
);
475 void cds_lfht_resize_lazy_count(struct cds_lfht
*ht
, unsigned long size
,
476 unsigned long count
);
478 static long nr_cpus_mask
= -1;
479 static long split_count_mask
= -1;
481 #if defined(HAVE_SYSCONF)
482 static void ht_init_nr_cpus_mask(void)
486 maxcpus
= sysconf(_SC_NPROCESSORS_CONF
);
492 * round up number of CPUs to next power of two, so we
493 * can use & for modulo.
495 maxcpus
= 1UL << cds_lfht_get_count_order_ulong(maxcpus
);
496 nr_cpus_mask
= maxcpus
- 1;
498 #else /* #if defined(HAVE_SYSCONF) */
499 static void ht_init_nr_cpus_mask(void)
503 #endif /* #else #if defined(HAVE_SYSCONF) */
506 void alloc_split_items_count(struct cds_lfht
*ht
)
508 struct ht_items_count
*count
;
510 if (nr_cpus_mask
== -1) {
511 ht_init_nr_cpus_mask();
512 if (nr_cpus_mask
< 0)
513 split_count_mask
= DEFAULT_SPLIT_COUNT_MASK
;
515 split_count_mask
= nr_cpus_mask
;
518 assert(split_count_mask
>= 0);
520 if (ht
->flags
& CDS_LFHT_ACCOUNTING
) {
521 ht
->split_count
= calloc(split_count_mask
+ 1, sizeof(*count
));
522 assert(ht
->split_count
);
524 ht
->split_count
= NULL
;
529 void free_split_items_count(struct cds_lfht
*ht
)
531 poison_free(ht
->split_count
);
534 #if defined(HAVE_SCHED_GETCPU)
536 int ht_get_split_count_index(unsigned long hash
)
540 assert(split_count_mask
>= 0);
541 cpu
= sched_getcpu();
542 if (caa_unlikely(cpu
< 0))
543 return hash
& split_count_mask
;
545 return cpu
& split_count_mask
;
547 #else /* #if defined(HAVE_SCHED_GETCPU) */
549 int ht_get_split_count_index(unsigned long hash
)
551 return hash
& split_count_mask
;
553 #endif /* #else #if defined(HAVE_SCHED_GETCPU) */
556 void ht_count_add(struct cds_lfht
*ht
, unsigned long size
, unsigned long hash
)
558 unsigned long split_count
;
562 if (caa_unlikely(!ht
->split_count
))
564 index
= ht_get_split_count_index(hash
);
565 split_count
= uatomic_add_return(&ht
->split_count
[index
].add
, 1);
566 if (caa_likely(split_count
& ((1UL << COUNT_COMMIT_ORDER
) - 1)))
568 /* Only if number of add multiple of 1UL << COUNT_COMMIT_ORDER */
570 dbg_printf("add split count %lu\n", split_count
);
571 count
= uatomic_add_return(&ht
->count
,
572 1UL << COUNT_COMMIT_ORDER
);
573 if (caa_likely(count
& (count
- 1)))
575 /* Only if global count is power of 2 */
577 if ((count
>> CHAIN_LEN_RESIZE_THRESHOLD
) < size
)
579 dbg_printf("add set global %ld\n", count
);
580 cds_lfht_resize_lazy_count(ht
, size
,
581 count
>> (CHAIN_LEN_TARGET
- 1));
585 void ht_count_del(struct cds_lfht
*ht
, unsigned long size
, unsigned long hash
)
587 unsigned long split_count
;
591 if (caa_unlikely(!ht
->split_count
))
593 index
= ht_get_split_count_index(hash
);
594 split_count
= uatomic_add_return(&ht
->split_count
[index
].del
, 1);
595 if (caa_likely(split_count
& ((1UL << COUNT_COMMIT_ORDER
) - 1)))
597 /* Only if number of deletes multiple of 1UL << COUNT_COMMIT_ORDER */
599 dbg_printf("del split count %lu\n", split_count
);
600 count
= uatomic_add_return(&ht
->count
,
601 -(1UL << COUNT_COMMIT_ORDER
));
602 if (caa_likely(count
& (count
- 1)))
604 /* Only if global count is power of 2 */
606 if ((count
>> CHAIN_LEN_RESIZE_THRESHOLD
) >= size
)
608 dbg_printf("del set global %ld\n", count
);
610 * Don't shrink table if the number of nodes is below a
613 if (count
< (1UL << COUNT_COMMIT_ORDER
) * (split_count_mask
+ 1))
615 cds_lfht_resize_lazy_count(ht
, size
,
616 count
>> (CHAIN_LEN_TARGET
- 1));
620 void check_resize(struct cds_lfht
*ht
, unsigned long size
, uint32_t chain_len
)
624 if (!(ht
->flags
& CDS_LFHT_AUTO_RESIZE
))
626 count
= uatomic_read(&ht
->count
);
628 * Use bucket-local length for small table expand and for
629 * environments lacking per-cpu data support.
631 if (count
>= (1UL << COUNT_COMMIT_ORDER
))
634 dbg_printf("WARNING: large chain length: %u.\n",
636 if (chain_len
>= CHAIN_LEN_RESIZE_THRESHOLD
)
637 cds_lfht_resize_lazy_grow(ht
, size
,
638 cds_lfht_get_count_order_u32(chain_len
- (CHAIN_LEN_TARGET
- 1)));
642 struct cds_lfht_node
*clear_flag(struct cds_lfht_node
*node
)
644 return (struct cds_lfht_node
*) (((unsigned long) node
) & ~FLAGS_MASK
);
648 int is_removed(struct cds_lfht_node
*node
)
650 return ((unsigned long) node
) & REMOVED_FLAG
;
654 struct cds_lfht_node
*flag_removed(struct cds_lfht_node
*node
)
656 return (struct cds_lfht_node
*) (((unsigned long) node
) | REMOVED_FLAG
);
660 int is_bucket(struct cds_lfht_node
*node
)
662 return ((unsigned long) node
) & BUCKET_FLAG
;
666 struct cds_lfht_node
*flag_bucket(struct cds_lfht_node
*node
)
668 return (struct cds_lfht_node
*) (((unsigned long) node
) | BUCKET_FLAG
);
672 int is_removal_owner(struct cds_lfht_node
*node
)
674 return ((unsigned long) node
) & REMOVAL_OWNER_FLAG
;
678 struct cds_lfht_node
*flag_removal_owner(struct cds_lfht_node
*node
)
680 return (struct cds_lfht_node
*) (((unsigned long) node
) | REMOVAL_OWNER_FLAG
);
684 struct cds_lfht_node
*get_end(void)
686 return (struct cds_lfht_node
*) END_VALUE
;
690 int is_end(struct cds_lfht_node
*node
)
692 return clear_flag(node
) == (struct cds_lfht_node
*) END_VALUE
;
696 unsigned long _uatomic_xchg_monotonic_increase(unsigned long *ptr
,
699 unsigned long old1
, old2
;
701 old1
= uatomic_read(ptr
);
706 } while ((old1
= uatomic_cmpxchg(ptr
, old2
, v
)) != old2
);
711 void cds_lfht_alloc_bucket_table(struct cds_lfht
*ht
, unsigned long order
)
713 return ht
->mm
->alloc_bucket_table(ht
, order
);
717 * cds_lfht_free_bucket_table() should be called with decreasing order.
718 * When cds_lfht_free_bucket_table(0) is called, it means the whole
722 void cds_lfht_free_bucket_table(struct cds_lfht
*ht
, unsigned long order
)
724 return ht
->mm
->free_bucket_table(ht
, order
);
728 struct cds_lfht_node
*bucket_at(struct cds_lfht
*ht
, unsigned long index
)
730 return ht
->bucket_at(ht
, index
);
734 struct cds_lfht_node
*lookup_bucket(struct cds_lfht
*ht
, unsigned long size
,
738 return bucket_at(ht
, hash
& (size
- 1));
742 * Remove all logically deleted nodes from a bucket up to a certain node key.
745 void _cds_lfht_gc_bucket(struct cds_lfht_node
*bucket
, struct cds_lfht_node
*node
)
747 struct cds_lfht_node
*iter_prev
, *iter
, *next
, *new_next
;
749 assert(!is_bucket(bucket
));
750 assert(!is_removed(bucket
));
751 assert(!is_bucket(node
));
752 assert(!is_removed(node
));
755 /* We can always skip the bucket node initially */
756 iter
= rcu_dereference(iter_prev
->next
);
757 assert(!is_removed(iter
));
758 assert(iter_prev
->reverse_hash
<= node
->reverse_hash
);
760 * We should never be called with bucket (start of chain)
761 * and logically removed node (end of path compression
762 * marker) being the actual same node. This would be a
763 * bug in the algorithm implementation.
765 assert(bucket
!= node
);
767 if (caa_unlikely(is_end(iter
)))
769 if (caa_likely(clear_flag(iter
)->reverse_hash
> node
->reverse_hash
))
771 next
= rcu_dereference(clear_flag(iter
)->next
);
772 if (caa_likely(is_removed(next
)))
774 iter_prev
= clear_flag(iter
);
777 assert(!is_removed(iter
));
779 new_next
= flag_bucket(clear_flag(next
));
781 new_next
= clear_flag(next
);
782 (void) uatomic_cmpxchg(&iter_prev
->next
, iter
, new_next
);
787 int _cds_lfht_replace(struct cds_lfht
*ht
, unsigned long size
,
788 struct cds_lfht_node
*old_node
,
789 struct cds_lfht_node
*old_next
,
790 struct cds_lfht_node
*new_node
)
792 struct cds_lfht_node
*bucket
, *ret_next
;
794 if (!old_node
) /* Return -ENOENT if asked to replace NULL node */
797 assert(!is_removed(old_node
));
798 assert(!is_bucket(old_node
));
799 assert(!is_removed(new_node
));
800 assert(!is_bucket(new_node
));
801 assert(new_node
!= old_node
);
803 /* Insert after node to be replaced */
804 if (is_removed(old_next
)) {
806 * Too late, the old node has been removed under us
807 * between lookup and replace. Fail.
811 assert(old_next
== clear_flag(old_next
));
812 assert(new_node
!= old_next
);
813 new_node
->next
= old_next
;
815 * Here is the whole trick for lock-free replace: we add
816 * the replacement node _after_ the node we want to
817 * replace by atomically setting its next pointer at the
818 * same time we set its removal flag. Given that
819 * the lookups/get next use an iterator aware of the
820 * next pointer, they will either skip the old node due
821 * to the removal flag and see the new node, or use
822 * the old node, but will not see the new one.
823 * This is a replacement of a node with another node
824 * that has the same value: we are therefore not
825 * removing a value from the hash table.
827 ret_next
= uatomic_cmpxchg(&old_node
->next
,
828 old_next
, flag_removed(new_node
));
829 if (ret_next
== old_next
)
830 break; /* We performed the replacement. */
835 * Ensure that the old node is not visible to readers anymore:
836 * lookup for the node, and remove it (along with any other
837 * logically removed node) if found.
839 bucket
= lookup_bucket(ht
, size
, bit_reverse_ulong(old_node
->reverse_hash
));
840 _cds_lfht_gc_bucket(bucket
, new_node
);
842 assert(is_removed(rcu_dereference(old_node
->next
)));
847 * A non-NULL unique_ret pointer uses the "add unique" (or uniquify) add
848 * mode. A NULL unique_ret allows creation of duplicate keys.
851 void _cds_lfht_add(struct cds_lfht
*ht
,
853 cds_lfht_match_fct match
,
856 struct cds_lfht_node
*node
,
857 struct cds_lfht_iter
*unique_ret
,
860 struct cds_lfht_node
*iter_prev
, *iter
, *next
, *new_node
, *new_next
,
862 struct cds_lfht_node
*bucket
;
864 assert(!is_bucket(node
));
865 assert(!is_removed(node
));
866 bucket
= lookup_bucket(ht
, size
, hash
);
868 uint32_t chain_len
= 0;
871 * iter_prev points to the non-removed node prior to the
875 /* We can always skip the bucket node initially */
876 iter
= rcu_dereference(iter_prev
->next
);
877 assert(iter_prev
->reverse_hash
<= node
->reverse_hash
);
879 if (caa_unlikely(is_end(iter
)))
881 if (caa_likely(clear_flag(iter
)->reverse_hash
> node
->reverse_hash
))
884 /* bucket node is the first node of the identical-hash-value chain */
885 if (bucket_flag
&& clear_flag(iter
)->reverse_hash
== node
->reverse_hash
)
888 next
= rcu_dereference(clear_flag(iter
)->next
);
889 if (caa_unlikely(is_removed(next
)))
895 && clear_flag(iter
)->reverse_hash
== node
->reverse_hash
) {
896 struct cds_lfht_iter d_iter
= { .node
= node
, .next
= iter
, };
899 * uniquely adding inserts the node as the first
900 * node of the identical-hash-value node chain.
902 * This semantic ensures no duplicated keys
903 * should ever be observable in the table
904 * (including observe one node by one node
905 * by forward iterations)
907 cds_lfht_next_duplicate(ht
, match
, key
, &d_iter
);
911 *unique_ret
= d_iter
;
915 /* Only account for identical reverse hash once */
916 if (iter_prev
->reverse_hash
!= clear_flag(iter
)->reverse_hash
918 check_resize(ht
, size
, ++chain_len
);
919 iter_prev
= clear_flag(iter
);
924 assert(node
!= clear_flag(iter
));
925 assert(!is_removed(iter_prev
));
926 assert(!is_removed(iter
));
927 assert(iter_prev
!= node
);
929 node
->next
= clear_flag(iter
);
931 node
->next
= flag_bucket(clear_flag(iter
));
933 new_node
= flag_bucket(node
);
936 if (uatomic_cmpxchg(&iter_prev
->next
, iter
,
938 continue; /* retry */
945 assert(!is_removed(iter
));
947 new_next
= flag_bucket(clear_flag(next
));
949 new_next
= clear_flag(next
);
950 (void) uatomic_cmpxchg(&iter_prev
->next
, iter
, new_next
);
955 unique_ret
->node
= return_node
;
956 /* unique_ret->next left unset, never used. */
961 int _cds_lfht_del(struct cds_lfht
*ht
, unsigned long size
,
962 struct cds_lfht_node
*node
)
964 struct cds_lfht_node
*bucket
, *next
;
966 if (!node
) /* Return -ENOENT if asked to delete NULL node */
969 /* logically delete the node */
970 assert(!is_bucket(node
));
971 assert(!is_removed(node
));
972 assert(!is_removal_owner(node
));
975 * We are first checking if the node had previously been
976 * logically removed (this check is not atomic with setting the
977 * logical removal flag). Return -ENOENT if the node had
978 * previously been removed.
980 next
= rcu_dereference(node
->next
);
981 if (caa_unlikely(is_removed(next
)))
983 assert(!is_bucket(next
));
985 * We set the REMOVED_FLAG unconditionally. Note that there may
986 * be more than one concurrent thread setting this flag.
987 * Knowing which wins the race will be known after the garbage
988 * collection phase, stay tuned!
990 uatomic_or(&node
->next
, REMOVED_FLAG
);
991 /* We performed the (logical) deletion. */
994 * Ensure that the node is not visible to readers anymore: lookup for
995 * the node, and remove it (along with any other logically removed node)
998 bucket
= lookup_bucket(ht
, size
, bit_reverse_ulong(node
->reverse_hash
));
999 _cds_lfht_gc_bucket(bucket
, node
);
1001 assert(is_removed(rcu_dereference(node
->next
)));
1003 * Last phase: atomically exchange node->next with a version
1004 * having "REMOVAL_OWNER_FLAG" set. If the returned node->next
1005 * pointer did _not_ have "REMOVAL_OWNER_FLAG" set, we now own
1006 * the node and win the removal race.
1007 * It is interesting to note that all "add" paths are forbidden
1008 * to change the next pointer starting from the point where the
1009 * REMOVED_FLAG is set, so here using a read, followed by a
1010 * xchg() suffice to guarantee that the xchg() will ever only
1011 * set the "REMOVAL_OWNER_FLAG" (or change nothing if the flag
1014 if (!is_removal_owner(uatomic_xchg(&node
->next
,
1015 flag_removal_owner(node
->next
))))
1022 void *partition_resize_thread(void *arg
)
1024 struct partition_resize_work
*work
= arg
;
1026 work
->ht
->flavor
->register_thread();
1027 work
->fct(work
->ht
, work
->i
, work
->start
, work
->len
);
1028 work
->ht
->flavor
->unregister_thread();
1033 void partition_resize_helper(struct cds_lfht
*ht
, unsigned long i
,
1035 void (*fct
)(struct cds_lfht
*ht
, unsigned long i
,
1036 unsigned long start
, unsigned long len
))
1038 unsigned long partition_len
;
1039 struct partition_resize_work
*work
;
1041 unsigned long nr_threads
;
1044 * Note: nr_cpus_mask + 1 is always power of 2.
1045 * We spawn just the number of threads we need to satisfy the minimum
1046 * partition size, up to the number of CPUs in the system.
1048 if (nr_cpus_mask
> 0) {
1049 nr_threads
= min(nr_cpus_mask
+ 1,
1050 len
>> MIN_PARTITION_PER_THREAD_ORDER
);
1054 partition_len
= len
>> cds_lfht_get_count_order_ulong(nr_threads
);
1055 work
= calloc(nr_threads
, sizeof(*work
));
1057 for (thread
= 0; thread
< nr_threads
; thread
++) {
1058 work
[thread
].ht
= ht
;
1060 work
[thread
].len
= partition_len
;
1061 work
[thread
].start
= thread
* partition_len
;
1062 work
[thread
].fct
= fct
;
1063 ret
= pthread_create(&(work
[thread
].thread_id
), ht
->resize_attr
,
1064 partition_resize_thread
, &work
[thread
]);
1067 for (thread
= 0; thread
< nr_threads
; thread
++) {
1068 ret
= pthread_join(work
[thread
].thread_id
, NULL
);
1075 * Holding RCU read lock to protect _cds_lfht_add against memory
1076 * reclaim that could be performed by other call_rcu worker threads (ABA
1079 * When we reach a certain length, we can split this population phase over
1080 * many worker threads, based on the number of CPUs available in the system.
1081 * This should therefore take care of not having the expand lagging behind too
1082 * many concurrent insertion threads by using the scheduler's ability to
1083 * schedule bucket node population fairly with insertions.
1086 void init_table_populate_partition(struct cds_lfht
*ht
, unsigned long i
,
1087 unsigned long start
, unsigned long len
)
1089 unsigned long j
, size
= 1UL << (i
- 1);
1091 assert(i
> MIN_TABLE_ORDER
);
1092 ht
->flavor
->read_lock();
1093 for (j
= size
+ start
; j
< size
+ start
+ len
; j
++) {
1094 struct cds_lfht_node
*new_node
= bucket_at(ht
, j
);
1096 assert(j
>= size
&& j
< (size
<< 1));
1097 dbg_printf("init populate: order %lu index %lu hash %lu\n",
1099 new_node
->reverse_hash
= bit_reverse_ulong(j
);
1100 _cds_lfht_add(ht
, j
, NULL
, NULL
, size
, new_node
, NULL
, 1);
1102 ht
->flavor
->read_unlock();
1106 void init_table_populate(struct cds_lfht
*ht
, unsigned long i
,
1109 assert(nr_cpus_mask
!= -1);
1110 if (nr_cpus_mask
< 0 || len
< 2 * MIN_PARTITION_PER_THREAD
) {
1111 ht
->flavor
->thread_online();
1112 init_table_populate_partition(ht
, i
, 0, len
);
1113 ht
->flavor
->thread_offline();
1116 partition_resize_helper(ht
, i
, len
, init_table_populate_partition
);
1120 void init_table(struct cds_lfht
*ht
,
1121 unsigned long first_order
, unsigned long last_order
)
1125 dbg_printf("init table: first_order %lu last_order %lu\n",
1126 first_order
, last_order
);
1127 assert(first_order
> MIN_TABLE_ORDER
);
1128 for (i
= first_order
; i
<= last_order
; i
++) {
1131 len
= 1UL << (i
- 1);
1132 dbg_printf("init order %lu len: %lu\n", i
, len
);
1134 /* Stop expand if the resize target changes under us */
1135 if (CMM_LOAD_SHARED(ht
->resize_target
) < (1UL << i
))
1138 cds_lfht_alloc_bucket_table(ht
, i
);
1141 * Set all bucket nodes reverse hash values for a level and
1142 * link all bucket nodes into the table.
1144 init_table_populate(ht
, i
, len
);
1147 * Update table size.
1149 cmm_smp_wmb(); /* populate data before RCU size */
1150 CMM_STORE_SHARED(ht
->size
, 1UL << i
);
1152 dbg_printf("init new size: %lu\n", 1UL << i
);
1153 if (CMM_LOAD_SHARED(ht
->in_progress_destroy
))
1159 * Holding RCU read lock to protect _cds_lfht_remove against memory
1160 * reclaim that could be performed by other call_rcu worker threads (ABA
1162 * For a single level, we logically remove and garbage collect each node.
1164 * As a design choice, we perform logical removal and garbage collection on a
1165 * node-per-node basis to simplify this algorithm. We also assume keeping good
1166 * cache locality of the operation would overweight possible performance gain
1167 * that could be achieved by batching garbage collection for multiple levels.
1168 * However, this would have to be justified by benchmarks.
1170 * Concurrent removal and add operations are helping us perform garbage
1171 * collection of logically removed nodes. We guarantee that all logically
1172 * removed nodes have been garbage-collected (unlinked) before call_rcu is
1173 * invoked to free a hole level of bucket nodes (after a grace period).
1175 * Logical removal and garbage collection can therefore be done in batch or on a
1176 * node-per-node basis, as long as the guarantee above holds.
1178 * When we reach a certain length, we can split this removal over many worker
1179 * threads, based on the number of CPUs available in the system. This should
1180 * take care of not letting resize process lag behind too many concurrent
1181 * updater threads actively inserting into the hash table.
1184 void remove_table_partition(struct cds_lfht
*ht
, unsigned long i
,
1185 unsigned long start
, unsigned long len
)
1187 unsigned long j
, size
= 1UL << (i
- 1);
1189 assert(i
> MIN_TABLE_ORDER
);
1190 ht
->flavor
->read_lock();
1191 for (j
= size
+ start
; j
< size
+ start
+ len
; j
++) {
1192 struct cds_lfht_node
*fini_bucket
= bucket_at(ht
, j
);
1193 struct cds_lfht_node
*parent_bucket
= bucket_at(ht
, j
- size
);
1195 assert(j
>= size
&& j
< (size
<< 1));
1196 dbg_printf("remove entry: order %lu index %lu hash %lu\n",
1198 /* Set the REMOVED_FLAG to freeze the ->next for gc */
1199 uatomic_or(&fini_bucket
->next
, REMOVED_FLAG
);
1200 _cds_lfht_gc_bucket(parent_bucket
, fini_bucket
);
1202 ht
->flavor
->read_unlock();
1206 void remove_table(struct cds_lfht
*ht
, unsigned long i
, unsigned long len
)
1209 assert(nr_cpus_mask
!= -1);
1210 if (nr_cpus_mask
< 0 || len
< 2 * MIN_PARTITION_PER_THREAD
) {
1211 ht
->flavor
->thread_online();
1212 remove_table_partition(ht
, i
, 0, len
);
1213 ht
->flavor
->thread_offline();
1216 partition_resize_helper(ht
, i
, len
, remove_table_partition
);
1220 * fini_table() is never called for first_order == 0, which is why
1221 * free_by_rcu_order == 0 can be used as criterion to know if free must
1225 void fini_table(struct cds_lfht
*ht
,
1226 unsigned long first_order
, unsigned long last_order
)
1229 unsigned long free_by_rcu_order
= 0;
1231 dbg_printf("fini table: first_order %lu last_order %lu\n",
1232 first_order
, last_order
);
1233 assert(first_order
> MIN_TABLE_ORDER
);
1234 for (i
= last_order
; i
>= first_order
; i
--) {
1237 len
= 1UL << (i
- 1);
1238 dbg_printf("fini order %lu len: %lu\n", i
, len
);
1240 /* Stop shrink if the resize target changes under us */
1241 if (CMM_LOAD_SHARED(ht
->resize_target
) > (1UL << (i
- 1)))
1244 cmm_smp_wmb(); /* populate data before RCU size */
1245 CMM_STORE_SHARED(ht
->size
, 1UL << (i
- 1));
1248 * We need to wait for all add operations to reach Q.S. (and
1249 * thus use the new table for lookups) before we can start
1250 * releasing the old bucket nodes. Otherwise their lookup will
1251 * return a logically removed node as insert position.
1253 ht
->flavor
->update_synchronize_rcu();
1254 if (free_by_rcu_order
)
1255 cds_lfht_free_bucket_table(ht
, free_by_rcu_order
);
1258 * Set "removed" flag in bucket nodes about to be removed.
1259 * Unlink all now-logically-removed bucket node pointers.
1260 * Concurrent add/remove operation are helping us doing
1263 remove_table(ht
, i
, len
);
1265 free_by_rcu_order
= i
;
1267 dbg_printf("fini new size: %lu\n", 1UL << i
);
1268 if (CMM_LOAD_SHARED(ht
->in_progress_destroy
))
1272 if (free_by_rcu_order
) {
1273 ht
->flavor
->update_synchronize_rcu();
1274 cds_lfht_free_bucket_table(ht
, free_by_rcu_order
);
1279 void cds_lfht_create_bucket(struct cds_lfht
*ht
, unsigned long size
)
1281 struct cds_lfht_node
*prev
, *node
;
1282 unsigned long order
, len
, i
;
1284 cds_lfht_alloc_bucket_table(ht
, 0);
1286 dbg_printf("create bucket: order 0 index 0 hash 0\n");
1287 node
= bucket_at(ht
, 0);
1288 node
->next
= flag_bucket(get_end());
1289 node
->reverse_hash
= 0;
1291 for (order
= 1; order
< cds_lfht_get_count_order_ulong(size
) + 1; order
++) {
1292 len
= 1UL << (order
- 1);
1293 cds_lfht_alloc_bucket_table(ht
, order
);
1295 for (i
= 0; i
< len
; i
++) {
1297 * Now, we are trying to init the node with the
1298 * hash=(len+i) (which is also a bucket with the
1299 * index=(len+i)) and insert it into the hash table,
1300 * so this node has to be inserted after the bucket
1301 * with the index=(len+i)&(len-1)=i. And because there
1302 * is no other non-bucket node nor bucket node with
1303 * larger index/hash inserted, so the bucket node
1304 * being inserted should be inserted directly linked
1305 * after the bucket node with index=i.
1307 prev
= bucket_at(ht
, i
);
1308 node
= bucket_at(ht
, len
+ i
);
1310 dbg_printf("create bucket: order %lu index %lu hash %lu\n",
1311 order
, len
+ i
, len
+ i
);
1312 node
->reverse_hash
= bit_reverse_ulong(len
+ i
);
1314 /* insert after prev */
1315 assert(is_bucket(prev
->next
));
1316 node
->next
= prev
->next
;
1317 prev
->next
= flag_bucket(node
);
1322 struct cds_lfht
*_cds_lfht_new(unsigned long init_size
,
1323 unsigned long min_nr_alloc_buckets
,
1324 unsigned long max_nr_buckets
,
1326 const struct cds_lfht_mm_type
*mm
,
1327 const struct rcu_flavor_struct
*flavor
,
1328 pthread_attr_t
*attr
)
1330 struct cds_lfht
*ht
;
1331 unsigned long order
;
1333 /* min_nr_alloc_buckets must be power of two */
1334 if (!min_nr_alloc_buckets
|| (min_nr_alloc_buckets
& (min_nr_alloc_buckets
- 1)))
1337 /* init_size must be power of two */
1338 if (!init_size
|| (init_size
& (init_size
- 1)))
1342 * Memory management plugin default.
1345 if (CAA_BITS_PER_LONG
> 32
1347 && max_nr_buckets
<= (1ULL << 32)) {
1349 * For 64-bit architectures, with max number of
1350 * buckets small enough not to use the entire
1351 * 64-bit memory mapping space (and allowing a
1352 * fair number of hash table instances), use the
1353 * mmap allocator, which is faster than the
1356 mm
= &cds_lfht_mm_mmap
;
1359 * The fallback is to use the order allocator.
1361 mm
= &cds_lfht_mm_order
;
1365 /* max_nr_buckets == 0 for order based mm means infinite */
1366 if (mm
== &cds_lfht_mm_order
&& !max_nr_buckets
)
1367 max_nr_buckets
= 1UL << (MAX_TABLE_ORDER
- 1);
1369 /* max_nr_buckets must be power of two */
1370 if (!max_nr_buckets
|| (max_nr_buckets
& (max_nr_buckets
- 1)))
1373 min_nr_alloc_buckets
= max(min_nr_alloc_buckets
, MIN_TABLE_SIZE
);
1374 init_size
= max(init_size
, MIN_TABLE_SIZE
);
1375 max_nr_buckets
= max(max_nr_buckets
, min_nr_alloc_buckets
);
1376 init_size
= min(init_size
, max_nr_buckets
);
1378 ht
= mm
->alloc_cds_lfht(min_nr_alloc_buckets
, max_nr_buckets
);
1380 assert(ht
->mm
== mm
);
1381 assert(ht
->bucket_at
== mm
->bucket_at
);
1384 ht
->flavor
= flavor
;
1385 ht
->resize_attr
= attr
;
1386 alloc_split_items_count(ht
);
1387 /* this mutex should not nest in read-side C.S. */
1388 pthread_mutex_init(&ht
->resize_mutex
, NULL
);
1389 order
= cds_lfht_get_count_order_ulong(init_size
);
1390 ht
->resize_target
= 1UL << order
;
1391 cds_lfht_create_bucket(ht
, 1UL << order
);
1392 ht
->size
= 1UL << order
;
1396 void cds_lfht_lookup(struct cds_lfht
*ht
, unsigned long hash
,
1397 cds_lfht_match_fct match
, const void *key
,
1398 struct cds_lfht_iter
*iter
)
1400 struct cds_lfht_node
*node
, *next
, *bucket
;
1401 unsigned long reverse_hash
, size
;
1403 reverse_hash
= bit_reverse_ulong(hash
);
1405 size
= rcu_dereference(ht
->size
);
1406 bucket
= lookup_bucket(ht
, size
, hash
);
1407 /* We can always skip the bucket node initially */
1408 node
= rcu_dereference(bucket
->next
);
1409 node
= clear_flag(node
);
1411 if (caa_unlikely(is_end(node
))) {
1415 if (caa_unlikely(node
->reverse_hash
> reverse_hash
)) {
1419 next
= rcu_dereference(node
->next
);
1420 assert(node
== clear_flag(node
));
1421 if (caa_likely(!is_removed(next
))
1423 && node
->reverse_hash
== reverse_hash
1424 && caa_likely(match(node
, key
))) {
1427 node
= clear_flag(next
);
1429 assert(!node
|| !is_bucket(rcu_dereference(node
->next
)));
1434 void cds_lfht_next_duplicate(struct cds_lfht
*ht
, cds_lfht_match_fct match
,
1435 const void *key
, struct cds_lfht_iter
*iter
)
1437 struct cds_lfht_node
*node
, *next
;
1438 unsigned long reverse_hash
;
1441 reverse_hash
= node
->reverse_hash
;
1443 node
= clear_flag(next
);
1446 if (caa_unlikely(is_end(node
))) {
1450 if (caa_unlikely(node
->reverse_hash
> reverse_hash
)) {
1454 next
= rcu_dereference(node
->next
);
1455 if (caa_likely(!is_removed(next
))
1457 && caa_likely(match(node
, key
))) {
1460 node
= clear_flag(next
);
1462 assert(!node
|| !is_bucket(rcu_dereference(node
->next
)));
1467 void cds_lfht_next(struct cds_lfht
*ht
, struct cds_lfht_iter
*iter
)
1469 struct cds_lfht_node
*node
, *next
;
1471 node
= clear_flag(iter
->next
);
1473 if (caa_unlikely(is_end(node
))) {
1477 next
= rcu_dereference(node
->next
);
1478 if (caa_likely(!is_removed(next
))
1479 && !is_bucket(next
)) {
1482 node
= clear_flag(next
);
1484 assert(!node
|| !is_bucket(rcu_dereference(node
->next
)));
1489 void cds_lfht_first(struct cds_lfht
*ht
, struct cds_lfht_iter
*iter
)
1492 * Get next after first bucket node. The first bucket node is the
1493 * first node of the linked list.
1495 iter
->next
= bucket_at(ht
, 0)->next
;
1496 cds_lfht_next(ht
, iter
);
1499 void cds_lfht_add(struct cds_lfht
*ht
, unsigned long hash
,
1500 struct cds_lfht_node
*node
)
1504 node
->reverse_hash
= bit_reverse_ulong(hash
);
1505 size
= rcu_dereference(ht
->size
);
1506 _cds_lfht_add(ht
, hash
, NULL
, NULL
, size
, node
, NULL
, 0);
1507 ht_count_add(ht
, size
, hash
);
1510 struct cds_lfht_node
*cds_lfht_add_unique(struct cds_lfht
*ht
,
1512 cds_lfht_match_fct match
,
1514 struct cds_lfht_node
*node
)
1517 struct cds_lfht_iter iter
;
1519 node
->reverse_hash
= bit_reverse_ulong(hash
);
1520 size
= rcu_dereference(ht
->size
);
1521 _cds_lfht_add(ht
, hash
, match
, key
, size
, node
, &iter
, 0);
1522 if (iter
.node
== node
)
1523 ht_count_add(ht
, size
, hash
);
1527 struct cds_lfht_node
*cds_lfht_add_replace(struct cds_lfht
*ht
,
1529 cds_lfht_match_fct match
,
1531 struct cds_lfht_node
*node
)
1534 struct cds_lfht_iter iter
;
1536 node
->reverse_hash
= bit_reverse_ulong(hash
);
1537 size
= rcu_dereference(ht
->size
);
1539 _cds_lfht_add(ht
, hash
, match
, key
, size
, node
, &iter
, 0);
1540 if (iter
.node
== node
) {
1541 ht_count_add(ht
, size
, hash
);
1545 if (!_cds_lfht_replace(ht
, size
, iter
.node
, iter
.next
, node
))
1550 int cds_lfht_replace(struct cds_lfht
*ht
,
1551 struct cds_lfht_iter
*old_iter
,
1553 cds_lfht_match_fct match
,
1555 struct cds_lfht_node
*new_node
)
1559 new_node
->reverse_hash
= bit_reverse_ulong(hash
);
1560 if (!old_iter
->node
)
1562 if (caa_unlikely(old_iter
->node
->reverse_hash
!= new_node
->reverse_hash
))
1564 if (caa_unlikely(!match(old_iter
->node
, key
)))
1566 size
= rcu_dereference(ht
->size
);
1567 return _cds_lfht_replace(ht
, size
, old_iter
->node
, old_iter
->next
,
1571 int cds_lfht_del(struct cds_lfht
*ht
, struct cds_lfht_node
*node
)
1573 unsigned long size
, hash
;
1576 size
= rcu_dereference(ht
->size
);
1577 ret
= _cds_lfht_del(ht
, size
, node
);
1579 hash
= bit_reverse_ulong(node
->reverse_hash
);
1580 ht_count_del(ht
, size
, hash
);
1585 int cds_lfht_is_node_deleted(struct cds_lfht_node
*node
)
1587 return is_removed(rcu_dereference(node
->next
));
1591 int cds_lfht_delete_bucket(struct cds_lfht
*ht
)
1593 struct cds_lfht_node
*node
;
1594 unsigned long order
, i
, size
;
1596 /* Check that the table is empty */
1597 node
= bucket_at(ht
, 0);
1599 node
= clear_flag(node
)->next
;
1600 if (!is_bucket(node
))
1602 assert(!is_removed(node
));
1603 } while (!is_end(node
));
1605 * size accessed without rcu_dereference because hash table is
1609 /* Internal sanity check: all nodes left should be bucket */
1610 for (i
= 0; i
< size
; i
++) {
1611 node
= bucket_at(ht
, i
);
1612 dbg_printf("delete bucket: index %lu expected hash %lu hash %lu\n",
1613 i
, i
, bit_reverse_ulong(node
->reverse_hash
));
1614 assert(is_bucket(node
->next
));
1617 for (order
= cds_lfht_get_count_order_ulong(size
); (long)order
>= 0; order
--)
1618 cds_lfht_free_bucket_table(ht
, order
);
1624 * Should only be called when no more concurrent readers nor writers can
1625 * possibly access the table.
1627 int cds_lfht_destroy(struct cds_lfht
*ht
, pthread_attr_t
**attr
)
1631 /* Wait for in-flight resize operations to complete */
1632 _CMM_STORE_SHARED(ht
->in_progress_destroy
, 1);
1633 cmm_smp_mb(); /* Store destroy before load resize */
1634 while (uatomic_read(&ht
->in_progress_resize
))
1635 poll(NULL
, 0, 100); /* wait for 100ms */
1636 ret
= cds_lfht_delete_bucket(ht
);
1639 free_split_items_count(ht
);
1641 *attr
= ht
->resize_attr
;
1646 void cds_lfht_count_nodes(struct cds_lfht
*ht
,
1647 long *approx_before
,
1648 unsigned long *count
,
1651 struct cds_lfht_node
*node
, *next
;
1652 unsigned long nr_bucket
= 0, nr_removed
= 0;
1655 if (ht
->split_count
) {
1658 for (i
= 0; i
< split_count_mask
+ 1; i
++) {
1659 *approx_before
+= uatomic_read(&ht
->split_count
[i
].add
);
1660 *approx_before
-= uatomic_read(&ht
->split_count
[i
].del
);
1666 /* Count non-bucket nodes in the table */
1667 node
= bucket_at(ht
, 0);
1669 next
= rcu_dereference(node
->next
);
1670 if (is_removed(next
)) {
1671 if (!is_bucket(next
))
1675 } else if (!is_bucket(next
))
1679 node
= clear_flag(next
);
1680 } while (!is_end(node
));
1681 dbg_printf("number of logically removed nodes: %lu\n", nr_removed
);
1682 dbg_printf("number of bucket nodes: %lu\n", nr_bucket
);
1684 if (ht
->split_count
) {
1687 for (i
= 0; i
< split_count_mask
+ 1; i
++) {
1688 *approx_after
+= uatomic_read(&ht
->split_count
[i
].add
);
1689 *approx_after
-= uatomic_read(&ht
->split_count
[i
].del
);
1694 /* called with resize mutex held */
1696 void _do_cds_lfht_grow(struct cds_lfht
*ht
,
1697 unsigned long old_size
, unsigned long new_size
)
1699 unsigned long old_order
, new_order
;
1701 old_order
= cds_lfht_get_count_order_ulong(old_size
);
1702 new_order
= cds_lfht_get_count_order_ulong(new_size
);
1703 dbg_printf("resize from %lu (order %lu) to %lu (order %lu) buckets\n",
1704 old_size
, old_order
, new_size
, new_order
);
1705 assert(new_size
> old_size
);
1706 init_table(ht
, old_order
+ 1, new_order
);
1709 /* called with resize mutex held */
1711 void _do_cds_lfht_shrink(struct cds_lfht
*ht
,
1712 unsigned long old_size
, unsigned long new_size
)
1714 unsigned long old_order
, new_order
;
1716 new_size
= max(new_size
, MIN_TABLE_SIZE
);
1717 old_order
= cds_lfht_get_count_order_ulong(old_size
);
1718 new_order
= cds_lfht_get_count_order_ulong(new_size
);
1719 dbg_printf("resize from %lu (order %lu) to %lu (order %lu) buckets\n",
1720 old_size
, old_order
, new_size
, new_order
);
1721 assert(new_size
< old_size
);
1723 /* Remove and unlink all bucket nodes to remove. */
1724 fini_table(ht
, new_order
+ 1, old_order
);
1728 /* called with resize mutex held */
1730 void _do_cds_lfht_resize(struct cds_lfht
*ht
)
1732 unsigned long new_size
, old_size
;
1735 * Resize table, re-do if the target size has changed under us.
1738 assert(uatomic_read(&ht
->in_progress_resize
));
1739 if (CMM_LOAD_SHARED(ht
->in_progress_destroy
))
1741 ht
->resize_initiated
= 1;
1742 old_size
= ht
->size
;
1743 new_size
= CMM_LOAD_SHARED(ht
->resize_target
);
1744 if (old_size
< new_size
)
1745 _do_cds_lfht_grow(ht
, old_size
, new_size
);
1746 else if (old_size
> new_size
)
1747 _do_cds_lfht_shrink(ht
, old_size
, new_size
);
1748 ht
->resize_initiated
= 0;
1749 /* write resize_initiated before read resize_target */
1751 } while (ht
->size
!= CMM_LOAD_SHARED(ht
->resize_target
));
1755 unsigned long resize_target_grow(struct cds_lfht
*ht
, unsigned long new_size
)
1757 return _uatomic_xchg_monotonic_increase(&ht
->resize_target
, new_size
);
1761 void resize_target_update_count(struct cds_lfht
*ht
,
1762 unsigned long count
)
1764 count
= max(count
, MIN_TABLE_SIZE
);
1765 count
= min(count
, ht
->max_nr_buckets
);
1766 uatomic_set(&ht
->resize_target
, count
);
1769 void cds_lfht_resize(struct cds_lfht
*ht
, unsigned long new_size
)
1771 resize_target_update_count(ht
, new_size
);
1772 CMM_STORE_SHARED(ht
->resize_initiated
, 1);
1773 ht
->flavor
->thread_offline();
1774 pthread_mutex_lock(&ht
->resize_mutex
);
1775 _do_cds_lfht_resize(ht
);
1776 pthread_mutex_unlock(&ht
->resize_mutex
);
1777 ht
->flavor
->thread_online();
1781 void do_resize_cb(struct rcu_head
*head
)
1783 struct rcu_resize_work
*work
=
1784 caa_container_of(head
, struct rcu_resize_work
, head
);
1785 struct cds_lfht
*ht
= work
->ht
;
1787 ht
->flavor
->thread_offline();
1788 pthread_mutex_lock(&ht
->resize_mutex
);
1789 _do_cds_lfht_resize(ht
);
1790 pthread_mutex_unlock(&ht
->resize_mutex
);
1791 ht
->flavor
->thread_online();
1793 cmm_smp_mb(); /* finish resize before decrement */
1794 uatomic_dec(&ht
->in_progress_resize
);
1798 void __cds_lfht_resize_lazy_launch(struct cds_lfht
*ht
)
1800 struct rcu_resize_work
*work
;
1802 /* Store resize_target before read resize_initiated */
1804 if (!CMM_LOAD_SHARED(ht
->resize_initiated
)) {
1805 uatomic_inc(&ht
->in_progress_resize
);
1806 cmm_smp_mb(); /* increment resize count before load destroy */
1807 if (CMM_LOAD_SHARED(ht
->in_progress_destroy
)) {
1808 uatomic_dec(&ht
->in_progress_resize
);
1811 work
= malloc(sizeof(*work
));
1813 dbg_printf("error allocating resize work, bailing out\n");
1814 uatomic_dec(&ht
->in_progress_resize
);
1818 ht
->flavor
->update_call_rcu(&work
->head
, do_resize_cb
);
1819 CMM_STORE_SHARED(ht
->resize_initiated
, 1);
1824 void cds_lfht_resize_lazy_grow(struct cds_lfht
*ht
, unsigned long size
, int growth
)
1826 unsigned long target_size
= size
<< growth
;
1828 target_size
= min(target_size
, ht
->max_nr_buckets
);
1829 if (resize_target_grow(ht
, target_size
) >= target_size
)
1832 __cds_lfht_resize_lazy_launch(ht
);
1836 * We favor grow operations over shrink. A shrink operation never occurs
1837 * if a grow operation is queued for lazy execution. A grow operation
1838 * cancels any pending shrink lazy execution.
1841 void cds_lfht_resize_lazy_count(struct cds_lfht
*ht
, unsigned long size
,
1842 unsigned long count
)
1844 if (!(ht
->flags
& CDS_LFHT_AUTO_RESIZE
))
1846 count
= max(count
, MIN_TABLE_SIZE
);
1847 count
= min(count
, ht
->max_nr_buckets
);
1849 return; /* Already the right size, no resize needed */
1850 if (count
> size
) { /* lazy grow */
1851 if (resize_target_grow(ht
, count
) >= count
)
1853 } else { /* lazy shrink */
1857 s
= uatomic_cmpxchg(&ht
->resize_target
, size
, count
);
1859 break; /* no resize needed */
1861 return; /* growing is/(was just) in progress */
1863 return; /* some other thread do shrink */
1867 __cds_lfht_resize_lazy_launch(ht
);