4 * Userspace RCU library - Lock-Free Resizable RCU Hash Table
6 * Copyright 2010-2011 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 * Copyright 2011 - Lai Jiangshan <laijs@cn.fujitsu.com>
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with this library; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
25 * Based on the following articles:
26 * - Ori Shalev and Nir Shavit. Split-ordered lists: Lock-free
27 * extensible hash tables. J. ACM 53, 3 (May 2006), 379-405.
28 * - Michael, M. M. High performance dynamic lock-free hash tables
29 * and list-based sets. In Proceedings of the fourteenth annual ACM
30 * symposium on Parallel algorithms and architectures, ACM Press,
33 * Some specificities of this Lock-Free Resizable RCU Hash Table
36 * - RCU read-side critical section allows readers to perform hash
37 * table lookups, as well as traversals, and use the returned objects
38 * safely by allowing memory reclaim to take place only after a grace
40 * - Add and remove operations are lock-free, and do not need to
41 * allocate memory. They need to be executed within RCU read-side
42 * critical section to ensure the objects they read are valid and to
43 * deal with the cmpxchg ABA problem.
44 * - add and add_unique operations are supported. add_unique checks if
45 * the node key already exists in the hash table. It ensures not to
46 * populate a duplicate key if the node key already exists in the hash
48 * - The resize operation executes concurrently with
49 * add/add_unique/add_replace/remove/lookup/traversal.
50 * - Hash table nodes are contained within a split-ordered list. This
51 * list is ordered by incrementing reversed-bits-hash value.
52 * - An index of bucket nodes is kept. These bucket nodes are the hash
53 * table "buckets". These buckets are internal nodes that allow to
54 * perform a fast hash lookup, similarly to a skip list. These
55 * buckets are chained together in the split-ordered list, which
56 * allows recursive expansion by inserting new buckets between the
57 * existing buckets. The split-ordered list allows adding new buckets
58 * between existing buckets as the table needs to grow.
59 * - The resize operation for small tables only allows expanding the
60 * hash table. It is triggered automatically by detecting long chains
61 * in the add operation.
62 * - The resize operation for larger tables (and available through an
63 * API) allows both expanding and shrinking the hash table.
64 * - Split-counters are used to keep track of the number of
65 * nodes within the hash table for automatic resize triggering.
66 * - Resize operation initiated by long chain detection is executed by a
67 * call_rcu thread, which keeps lock-freedom of add and remove.
68 * - Resize operations are protected by a mutex.
69 * - The removal operation is split in two parts: first, a "removed"
70 * flag is set in the next pointer within the node to remove. Then,
71 * a "garbage collection" is performed in the bucket containing the
72 * removed node (from the start of the bucket up to the removed node).
73 * All encountered nodes with "removed" flag set in their next
74 * pointers are removed from the linked-list. If the cmpxchg used for
75 * removal fails (due to concurrent garbage-collection or concurrent
76 * add), we retry from the beginning of the bucket. This ensures that
77 * the node with "removed" flag set is removed from the hash table
78 * (not visible to lookups anymore) before the RCU read-side critical
79 * section held across removal ends. Furthermore, this ensures that
80 * the node with "removed" flag set is removed from the linked-list
81 * before its memory is reclaimed. After setting the "removal" flag,
82 * only the thread which removal is the first to set the "removal
83 * owner" flag (with an xchg) into a node's next pointer is considered
84 * to have succeeded its removal (and thus owns the node to reclaim).
85 * Because we garbage-collect starting from an invariant node (the
86 * start-of-bucket bucket node) up to the "removed" node (or find a
87 * reverse-hash that is higher), we are sure that a successful
88 * traversal of the chain leads to a chain that is present in the
89 * linked-list (the start node is never removed) and that it does not
90 * contain the "removed" node anymore, even if concurrent delete/add
91 * operations are changing the structure of the list concurrently.
92 * - The add operations perform garbage collection of buckets if they
93 * encounter nodes with removed flag set in the bucket where they want
94 * to add their new node. This ensures lock-freedom of add operation by
95 * helping the remover unlink nodes from the list rather than to wait
97 * - There are three memory backends for the hash table buckets: the
98 * "order table", the "chunks", and the "mmap".
99 * - These bucket containers contain a compact version of the hash table
101 * - The RCU "order table":
102 * - has a first level table indexed by log2(hash index) which is
103 * copied and expanded by the resize operation. This order table
104 * allows finding the "bucket node" tables.
105 * - There is one bucket node table per hash index order. The size of
106 * each bucket node table is half the number of hashes contained in
107 * this order (except for order 0).
108 * - The RCU "chunks" is best suited for close interaction with a page
109 * allocator. It uses a linear array as index to "chunks" containing
110 * each the same number of buckets.
111 * - The RCU "mmap" memory backend uses a single memory map to hold
113 * - synchronize_rcu is used to garbage-collect the old bucket node table.
115 * Ordering Guarantees:
117 * To discuss these guarantees, we first define "read" operation as any
118 * of the the basic cds_lfht_lookup, cds_lfht_next_duplicate,
119 * cds_lfht_first, cds_lfht_next operation, as well as
120 * cds_lfht_add_unique (failure).
122 * We define "read traversal" operation as any of the following
123 * group of operations
124 * - cds_lfht_lookup followed by iteration with cds_lfht_next_duplicate
125 * (and/or cds_lfht_next, although less common).
126 * - cds_lfht_add_unique (failure) followed by iteration with
127 * cds_lfht_next_duplicate (and/or cds_lfht_next, although less
129 * - cds_lfht_first followed iteration with cds_lfht_next (and/or
130 * cds_lfht_next_duplicate, although less common).
132 * We define "write" operations as any of cds_lfht_add,
133 * cds_lfht_add_unique (success), cds_lfht_add_replace, cds_lfht_del.
135 * When cds_lfht_add_unique succeeds (returns the node passed as
136 * parameter), it acts as a "write" operation. When cds_lfht_add_unique
137 * fails (returns a node different from the one passed as parameter), it
138 * acts as a "read" operation. A cds_lfht_add_unique failure is a
139 * cds_lfht_lookup "read" operation, therefore, any ordering guarantee
140 * referring to "lookup" imply any of "lookup" or cds_lfht_add_unique
143 * We define "prior" and "later" node as nodes observable by reads and
144 * read traversals respectively before and after a write or sequence of
147 * Hash-table operations are often cascaded, for example, the pointer
148 * returned by a cds_lfht_lookup() might be passed to a cds_lfht_next(),
149 * whose return value might in turn be passed to another hash-table
150 * operation. This entire cascaded series of operations must be enclosed
151 * by a pair of matching rcu_read_lock() and rcu_read_unlock()
154 * The following ordering guarantees are offered by this hash table:
156 * A.1) "read" after "write": if there is ordering between a write and a
157 * later read, then the read is guaranteed to see the write or some
159 * A.2) "read traversal" after "write": given that there is dependency
160 * ordering between reads in a "read traversal", if there is
161 * ordering between a write and the first read of the traversal,
162 * then the "read traversal" is guaranteed to see the write or
164 * B.1) "write" after "read": if there is ordering between a read and a
165 * later write, then the read will never see the write.
166 * B.2) "write" after "read traversal": given that there is dependency
167 * ordering between reads in a "read traversal", if there is
168 * ordering between the last read of the traversal and a later
169 * write, then the "read traversal" will never see the write.
170 * C) "write" while "read traversal": if a write occurs during a "read
171 * traversal", the traversal may, or may not, see the write.
172 * D.1) "write" after "write": if there is ordering between a write and
173 * a later write, then the later write is guaranteed to see the
174 * effects of the first write.
175 * D.2) Concurrent "write" pairs: The system will assign an arbitrary
176 * order to any pair of concurrent conflicting writes.
177 * Non-conflicting writes (for example, to different keys) are
179 * E) If a grace period separates a "del" or "replace" operation
180 * and a subsequent operation, then that subsequent operation is
181 * guaranteed not to see the removed item.
182 * F) Uniqueness guarantee: given a hash table that does not contain
183 * duplicate items for a given key, there will only be one item in
184 * the hash table after an arbitrary sequence of add_unique and/or
185 * add_replace operations. Note, however, that a pair of
186 * concurrent read operations might well access two different items
188 * G.1) If a pair of lookups for a given key are ordered (e.g. by a
189 * memory barrier), then the second lookup will return the same
190 * node as the previous lookup, or some later node.
191 * G.2) A "read traversal" that starts after the end of a prior "read
192 * traversal" (ordered by memory barriers) is guaranteed to see the
193 * same nodes as the previous traversal, or some later nodes.
194 * G.3) Concurrent "read" pairs: concurrent reads are unordered. For
195 * example, if a pair of reads to the same key run concurrently
196 * with an insertion of that same key, the reads remain unordered
197 * regardless of their return values. In other words, you cannot
198 * rely on the values returned by the reads to deduce ordering.
200 * Progress guarantees:
202 * * Reads are wait-free. These operations always move forward in the
203 * hash table linked list, and this list has no loop.
204 * * Writes are lock-free. Any retry loop performed by a write operation
205 * is triggered by progress made within another update operation.
207 * Bucket node tables:
209 * hash table hash table the last all bucket node tables
210 * order size bucket node 0 1 2 3 4 5 6(index)
217 * 5 32 16 1 1 2 4 8 16
218 * 6 64 32 1 1 2 4 8 16 32
220 * When growing/shrinking, we only focus on the last bucket node table
221 * which size is (!order ? 1 : (1 << (order -1))).
223 * Example for growing/shrinking:
224 * grow hash table from order 5 to 6: init the index=6 bucket node table
225 * shrink hash table from order 6 to 5: fini the index=6 bucket node table
227 * A bit of ascii art explanation:
229 * The order index is the off-by-one compared to the actual power of 2
230 * because we use index 0 to deal with the 0 special-case.
232 * This shows the nodes for a small table ordered by reversed bits:
244 * This shows the nodes in order of non-reversed bits, linked by
245 * reversed-bit order.
250 * 2 | | 2 010 010 <- |
251 * | | | 3 011 110 | <- |
252 * 3 -> | | | 4 100 001 | |
270 #include <urcu-call-rcu.h>
271 #include <urcu-flavor.h>
272 #include <urcu/arch.h>
273 #include <urcu/uatomic.h>
274 #include <urcu/compiler.h>
275 #include <urcu/rculfhash.h>
276 #include <rculfhash-internal.h>
281 * Split-counters lazily update the global counter each 1024
282 * addition/removal. It automatically keeps track of resize required.
283 * We use the bucket length as indicator for need to expand for small
284 * tables and machines lacking per-cpu data suppport.
286 #define COUNT_COMMIT_ORDER 10
287 #define DEFAULT_SPLIT_COUNT_MASK 0xFUL
288 #define CHAIN_LEN_TARGET 1
289 #define CHAIN_LEN_RESIZE_THRESHOLD 3
292 * Define the minimum table size.
294 #define MIN_TABLE_ORDER 0
295 #define MIN_TABLE_SIZE (1UL << MIN_TABLE_ORDER)
298 * Minimum number of bucket nodes to touch per thread to parallelize grow/shrink.
300 #define MIN_PARTITION_PER_THREAD_ORDER 12
301 #define MIN_PARTITION_PER_THREAD (1UL << MIN_PARTITION_PER_THREAD_ORDER)
304 * The removed flag needs to be updated atomically with the pointer.
305 * It indicates that no node must attach to the node scheduled for
306 * removal, and that node garbage collection must be performed.
307 * The bucket flag does not require to be updated atomically with the
308 * pointer, but it is added as a pointer low bit flag to save space.
309 * The "removal owner" flag is used to detect which of the "del"
310 * operation that has set the "removed flag" gets to return the removed
311 * node to its caller. Note that the replace operation does not need to
312 * iteract with the "removal owner" flag, because it validates that
313 * the "removed" flag is not set before performing its cmpxchg.
315 #define REMOVED_FLAG (1UL << 0)
316 #define BUCKET_FLAG (1UL << 1)
317 #define REMOVAL_OWNER_FLAG (1UL << 2)
318 #define FLAGS_MASK ((1UL << 3) - 1)
320 /* Value of the end pointer. Should not interact with flags. */
321 #define END_VALUE NULL
324 * ht_items_count: Split-counters counting the number of node addition
325 * and removal in the table. Only used if the CDS_LFHT_ACCOUNTING flag
326 * is set at hash table creation.
328 * These are free-running counters, never reset to zero. They count the
329 * number of add/remove, and trigger every (1 << COUNT_COMMIT_ORDER)
330 * operations to update the global counter. We choose a power-of-2 value
331 * for the trigger to deal with 32 or 64-bit overflow of the counter.
333 struct ht_items_count
{
334 unsigned long add
, del
;
335 } __attribute__((aligned(CAA_CACHE_LINE_SIZE
)));
338 * rcu_resize_work: Contains arguments passed to RCU worker thread
339 * responsible for performing lazy resize.
341 struct rcu_resize_work
{
342 struct rcu_head head
;
347 * partition_resize_work: Contains arguments passed to worker threads
348 * executing the hash table resize on partitions of the hash table
349 * assigned to each processor's worker thread.
351 struct partition_resize_work
{
354 unsigned long i
, start
, len
;
355 void (*fct
)(struct cds_lfht
*ht
, unsigned long i
,
356 unsigned long start
, unsigned long len
);
360 * Algorithm to reverse bits in a word by lookup table, extended to
363 * http://graphics.stanford.edu/~seander/bithacks.html#BitReverseTable
364 * Originally from Public Domain.
367 static const uint8_t BitReverseTable256
[256] =
369 #define R2(n) (n), (n) + 2*64, (n) + 1*64, (n) + 3*64
370 #define R4(n) R2(n), R2((n) + 2*16), R2((n) + 1*16), R2((n) + 3*16)
371 #define R6(n) R4(n), R4((n) + 2*4 ), R4((n) + 1*4 ), R4((n) + 3*4 )
372 R6(0), R6(2), R6(1), R6(3)
379 uint8_t bit_reverse_u8(uint8_t v
)
381 return BitReverseTable256
[v
];
384 static __attribute__((unused
))
385 uint32_t bit_reverse_u32(uint32_t v
)
387 return ((uint32_t) bit_reverse_u8(v
) << 24) |
388 ((uint32_t) bit_reverse_u8(v
>> 8) << 16) |
389 ((uint32_t) bit_reverse_u8(v
>> 16) << 8) |
390 ((uint32_t) bit_reverse_u8(v
>> 24));
393 static __attribute__((unused
))
394 uint64_t bit_reverse_u64(uint64_t v
)
396 return ((uint64_t) bit_reverse_u8(v
) << 56) |
397 ((uint64_t) bit_reverse_u8(v
>> 8) << 48) |
398 ((uint64_t) bit_reverse_u8(v
>> 16) << 40) |
399 ((uint64_t) bit_reverse_u8(v
>> 24) << 32) |
400 ((uint64_t) bit_reverse_u8(v
>> 32) << 24) |
401 ((uint64_t) bit_reverse_u8(v
>> 40) << 16) |
402 ((uint64_t) bit_reverse_u8(v
>> 48) << 8) |
403 ((uint64_t) bit_reverse_u8(v
>> 56));
407 unsigned long bit_reverse_ulong(unsigned long v
)
409 #if (CAA_BITS_PER_LONG == 32)
410 return bit_reverse_u32(v
);
412 return bit_reverse_u64(v
);
417 * fls: returns the position of the most significant bit.
418 * Returns 0 if no bit is set, else returns the position of the most
419 * significant bit (from 1 to 32 on 32-bit, from 1 to 64 on 64-bit).
421 #if defined(__i386) || defined(__x86_64)
423 unsigned int fls_u32(uint32_t x
)
431 : "=r" (r
) : "rm" (x
));
437 #if defined(__x86_64)
439 unsigned int fls_u64(uint64_t x
)
447 : "=r" (r
) : "rm" (x
));
454 static __attribute__((unused
))
455 unsigned int fls_u64(uint64_t x
)
462 if (!(x
& 0xFFFFFFFF00000000ULL
)) {
466 if (!(x
& 0xFFFF000000000000ULL
)) {
470 if (!(x
& 0xFF00000000000000ULL
)) {
474 if (!(x
& 0xF000000000000000ULL
)) {
478 if (!(x
& 0xC000000000000000ULL
)) {
482 if (!(x
& 0x8000000000000000ULL
)) {
491 static __attribute__((unused
))
492 unsigned int fls_u32(uint32_t x
)
498 if (!(x
& 0xFFFF0000U
)) {
502 if (!(x
& 0xFF000000U
)) {
506 if (!(x
& 0xF0000000U
)) {
510 if (!(x
& 0xC0000000U
)) {
514 if (!(x
& 0x80000000U
)) {
522 unsigned int cds_lfht_fls_ulong(unsigned long x
)
524 #if (CAA_BITS_PER_LONG == 32)
532 * Return the minimum order for which x <= (1UL << order).
533 * Return -1 if x is 0.
535 int cds_lfht_get_count_order_u32(uint32_t x
)
540 return fls_u32(x
- 1);
544 * Return the minimum order for which x <= (1UL << order).
545 * Return -1 if x is 0.
547 int cds_lfht_get_count_order_ulong(unsigned long x
)
552 return cds_lfht_fls_ulong(x
- 1);
556 void cds_lfht_resize_lazy_grow(struct cds_lfht
*ht
, unsigned long size
, int growth
);
559 void cds_lfht_resize_lazy_count(struct cds_lfht
*ht
, unsigned long size
,
560 unsigned long count
);
562 static long nr_cpus_mask
= -1;
563 static long split_count_mask
= -1;
565 #if defined(HAVE_SYSCONF)
566 static void ht_init_nr_cpus_mask(void)
570 maxcpus
= sysconf(_SC_NPROCESSORS_CONF
);
576 * round up number of CPUs to next power of two, so we
577 * can use & for modulo.
579 maxcpus
= 1UL << cds_lfht_get_count_order_ulong(maxcpus
);
580 nr_cpus_mask
= maxcpus
- 1;
582 #else /* #if defined(HAVE_SYSCONF) */
583 static void ht_init_nr_cpus_mask(void)
587 #endif /* #else #if defined(HAVE_SYSCONF) */
590 void alloc_split_items_count(struct cds_lfht
*ht
)
592 struct ht_items_count
*count
;
594 if (nr_cpus_mask
== -1) {
595 ht_init_nr_cpus_mask();
596 if (nr_cpus_mask
< 0)
597 split_count_mask
= DEFAULT_SPLIT_COUNT_MASK
;
599 split_count_mask
= nr_cpus_mask
;
602 assert(split_count_mask
>= 0);
604 if (ht
->flags
& CDS_LFHT_ACCOUNTING
) {
605 ht
->split_count
= calloc(split_count_mask
+ 1, sizeof(*count
));
606 assert(ht
->split_count
);
608 ht
->split_count
= NULL
;
613 void free_split_items_count(struct cds_lfht
*ht
)
615 poison_free(ht
->split_count
);
618 #if defined(HAVE_SCHED_GETCPU)
620 int ht_get_split_count_index(unsigned long hash
)
624 assert(split_count_mask
>= 0);
625 cpu
= sched_getcpu();
626 if (caa_unlikely(cpu
< 0))
627 return hash
& split_count_mask
;
629 return cpu
& split_count_mask
;
631 #else /* #if defined(HAVE_SCHED_GETCPU) */
633 int ht_get_split_count_index(unsigned long hash
)
635 return hash
& split_count_mask
;
637 #endif /* #else #if defined(HAVE_SCHED_GETCPU) */
640 void ht_count_add(struct cds_lfht
*ht
, unsigned long size
, unsigned long hash
)
642 unsigned long split_count
;
646 if (caa_unlikely(!ht
->split_count
))
648 index
= ht_get_split_count_index(hash
);
649 split_count
= uatomic_add_return(&ht
->split_count
[index
].add
, 1);
650 if (caa_likely(split_count
& ((1UL << COUNT_COMMIT_ORDER
) - 1)))
652 /* Only if number of add multiple of 1UL << COUNT_COMMIT_ORDER */
654 dbg_printf("add split count %lu\n", split_count
);
655 count
= uatomic_add_return(&ht
->count
,
656 1UL << COUNT_COMMIT_ORDER
);
657 if (caa_likely(count
& (count
- 1)))
659 /* Only if global count is power of 2 */
661 if ((count
>> CHAIN_LEN_RESIZE_THRESHOLD
) < size
)
663 dbg_printf("add set global %ld\n", count
);
664 cds_lfht_resize_lazy_count(ht
, size
,
665 count
>> (CHAIN_LEN_TARGET
- 1));
669 void ht_count_del(struct cds_lfht
*ht
, unsigned long size
, unsigned long hash
)
671 unsigned long split_count
;
675 if (caa_unlikely(!ht
->split_count
))
677 index
= ht_get_split_count_index(hash
);
678 split_count
= uatomic_add_return(&ht
->split_count
[index
].del
, 1);
679 if (caa_likely(split_count
& ((1UL << COUNT_COMMIT_ORDER
) - 1)))
681 /* Only if number of deletes multiple of 1UL << COUNT_COMMIT_ORDER */
683 dbg_printf("del split count %lu\n", split_count
);
684 count
= uatomic_add_return(&ht
->count
,
685 -(1UL << COUNT_COMMIT_ORDER
));
686 if (caa_likely(count
& (count
- 1)))
688 /* Only if global count is power of 2 */
690 if ((count
>> CHAIN_LEN_RESIZE_THRESHOLD
) >= size
)
692 dbg_printf("del set global %ld\n", count
);
694 * Don't shrink table if the number of nodes is below a
697 if (count
< (1UL << COUNT_COMMIT_ORDER
) * (split_count_mask
+ 1))
699 cds_lfht_resize_lazy_count(ht
, size
,
700 count
>> (CHAIN_LEN_TARGET
- 1));
704 void check_resize(struct cds_lfht
*ht
, unsigned long size
, uint32_t chain_len
)
708 if (!(ht
->flags
& CDS_LFHT_AUTO_RESIZE
))
710 count
= uatomic_read(&ht
->count
);
712 * Use bucket-local length for small table expand and for
713 * environments lacking per-cpu data support.
715 if (count
>= (1UL << COUNT_COMMIT_ORDER
))
718 dbg_printf("WARNING: large chain length: %u.\n",
720 if (chain_len
>= CHAIN_LEN_RESIZE_THRESHOLD
)
721 cds_lfht_resize_lazy_grow(ht
, size
,
722 cds_lfht_get_count_order_u32(chain_len
- (CHAIN_LEN_TARGET
- 1)));
726 struct cds_lfht_node
*clear_flag(struct cds_lfht_node
*node
)
728 return (struct cds_lfht_node
*) (((unsigned long) node
) & ~FLAGS_MASK
);
732 int is_removed(struct cds_lfht_node
*node
)
734 return ((unsigned long) node
) & REMOVED_FLAG
;
738 int is_bucket(struct cds_lfht_node
*node
)
740 return ((unsigned long) node
) & BUCKET_FLAG
;
744 struct cds_lfht_node
*flag_bucket(struct cds_lfht_node
*node
)
746 return (struct cds_lfht_node
*) (((unsigned long) node
) | BUCKET_FLAG
);
750 int is_removal_owner(struct cds_lfht_node
*node
)
752 return ((unsigned long) node
) & REMOVAL_OWNER_FLAG
;
756 struct cds_lfht_node
*flag_removal_owner(struct cds_lfht_node
*node
)
758 return (struct cds_lfht_node
*) (((unsigned long) node
) | REMOVAL_OWNER_FLAG
);
762 struct cds_lfht_node
*flag_removed_or_removal_owner(struct cds_lfht_node
*node
)
764 return (struct cds_lfht_node
*) (((unsigned long) node
) | REMOVED_FLAG
| REMOVAL_OWNER_FLAG
);
768 struct cds_lfht_node
*get_end(void)
770 return (struct cds_lfht_node
*) END_VALUE
;
774 int is_end(struct cds_lfht_node
*node
)
776 return clear_flag(node
) == (struct cds_lfht_node
*) END_VALUE
;
780 unsigned long _uatomic_xchg_monotonic_increase(unsigned long *ptr
,
783 unsigned long old1
, old2
;
785 old1
= uatomic_read(ptr
);
790 } while ((old1
= uatomic_cmpxchg(ptr
, old2
, v
)) != old2
);
795 void cds_lfht_alloc_bucket_table(struct cds_lfht
*ht
, unsigned long order
)
797 return ht
->mm
->alloc_bucket_table(ht
, order
);
801 * cds_lfht_free_bucket_table() should be called with decreasing order.
802 * When cds_lfht_free_bucket_table(0) is called, it means the whole
806 void cds_lfht_free_bucket_table(struct cds_lfht
*ht
, unsigned long order
)
808 return ht
->mm
->free_bucket_table(ht
, order
);
812 struct cds_lfht_node
*bucket_at(struct cds_lfht
*ht
, unsigned long index
)
814 return ht
->bucket_at(ht
, index
);
818 struct cds_lfht_node
*lookup_bucket(struct cds_lfht
*ht
, unsigned long size
,
822 return bucket_at(ht
, hash
& (size
- 1));
826 * Remove all logically deleted nodes from a bucket up to a certain node key.
829 void _cds_lfht_gc_bucket(struct cds_lfht_node
*bucket
, struct cds_lfht_node
*node
)
831 struct cds_lfht_node
*iter_prev
, *iter
, *next
, *new_next
;
833 assert(!is_bucket(bucket
));
834 assert(!is_removed(bucket
));
835 assert(!is_bucket(node
));
836 assert(!is_removed(node
));
839 /* We can always skip the bucket node initially */
840 iter
= rcu_dereference(iter_prev
->next
);
841 assert(!is_removed(iter
));
842 assert(iter_prev
->reverse_hash
<= node
->reverse_hash
);
844 * We should never be called with bucket (start of chain)
845 * and logically removed node (end of path compression
846 * marker) being the actual same node. This would be a
847 * bug in the algorithm implementation.
849 assert(bucket
!= node
);
851 if (caa_unlikely(is_end(iter
)))
853 if (caa_likely(clear_flag(iter
)->reverse_hash
> node
->reverse_hash
))
855 next
= rcu_dereference(clear_flag(iter
)->next
);
856 if (caa_likely(is_removed(next
)))
858 iter_prev
= clear_flag(iter
);
861 assert(!is_removed(iter
));
863 new_next
= flag_bucket(clear_flag(next
));
865 new_next
= clear_flag(next
);
866 (void) uatomic_cmpxchg(&iter_prev
->next
, iter
, new_next
);
871 int _cds_lfht_replace(struct cds_lfht
*ht
, unsigned long size
,
872 struct cds_lfht_node
*old_node
,
873 struct cds_lfht_node
*old_next
,
874 struct cds_lfht_node
*new_node
)
876 struct cds_lfht_node
*bucket
, *ret_next
;
878 if (!old_node
) /* Return -ENOENT if asked to replace NULL node */
881 assert(!is_removed(old_node
));
882 assert(!is_bucket(old_node
));
883 assert(!is_removed(new_node
));
884 assert(!is_bucket(new_node
));
885 assert(new_node
!= old_node
);
887 /* Insert after node to be replaced */
888 if (is_removed(old_next
)) {
890 * Too late, the old node has been removed under us
891 * between lookup and replace. Fail.
895 assert(old_next
== clear_flag(old_next
));
896 assert(new_node
!= old_next
);
898 * REMOVAL_OWNER flag is _NEVER_ set before the REMOVED
899 * flag. It is either set atomically at the same time
900 * (replace) or after (del).
902 assert(!is_removal_owner(old_next
));
903 new_node
->next
= old_next
;
905 * Here is the whole trick for lock-free replace: we add
906 * the replacement node _after_ the node we want to
907 * replace by atomically setting its next pointer at the
908 * same time we set its removal flag. Given that
909 * the lookups/get next use an iterator aware of the
910 * next pointer, they will either skip the old node due
911 * to the removal flag and see the new node, or use
912 * the old node, but will not see the new one.
913 * This is a replacement of a node with another node
914 * that has the same value: we are therefore not
915 * removing a value from the hash table. We set both the
916 * REMOVED and REMOVAL_OWNER flags atomically so we own
917 * the node after successful cmpxchg.
919 ret_next
= uatomic_cmpxchg(&old_node
->next
,
920 old_next
, flag_removed_or_removal_owner(new_node
));
921 if (ret_next
== old_next
)
922 break; /* We performed the replacement. */
927 * Ensure that the old node is not visible to readers anymore:
928 * lookup for the node, and remove it (along with any other
929 * logically removed node) if found.
931 bucket
= lookup_bucket(ht
, size
, bit_reverse_ulong(old_node
->reverse_hash
));
932 _cds_lfht_gc_bucket(bucket
, new_node
);
934 assert(is_removed(CMM_LOAD_SHARED(old_node
->next
)));
939 * A non-NULL unique_ret pointer uses the "add unique" (or uniquify) add
940 * mode. A NULL unique_ret allows creation of duplicate keys.
943 void _cds_lfht_add(struct cds_lfht
*ht
,
945 cds_lfht_match_fct match
,
948 struct cds_lfht_node
*node
,
949 struct cds_lfht_iter
*unique_ret
,
952 struct cds_lfht_node
*iter_prev
, *iter
, *next
, *new_node
, *new_next
,
954 struct cds_lfht_node
*bucket
;
956 assert(!is_bucket(node
));
957 assert(!is_removed(node
));
958 bucket
= lookup_bucket(ht
, size
, hash
);
960 uint32_t chain_len
= 0;
963 * iter_prev points to the non-removed node prior to the
967 /* We can always skip the bucket node initially */
968 iter
= rcu_dereference(iter_prev
->next
);
969 assert(iter_prev
->reverse_hash
<= node
->reverse_hash
);
971 if (caa_unlikely(is_end(iter
)))
973 if (caa_likely(clear_flag(iter
)->reverse_hash
> node
->reverse_hash
))
976 /* bucket node is the first node of the identical-hash-value chain */
977 if (bucket_flag
&& clear_flag(iter
)->reverse_hash
== node
->reverse_hash
)
980 next
= rcu_dereference(clear_flag(iter
)->next
);
981 if (caa_unlikely(is_removed(next
)))
987 && clear_flag(iter
)->reverse_hash
== node
->reverse_hash
) {
988 struct cds_lfht_iter d_iter
= { .node
= node
, .next
= iter
, };
991 * uniquely adding inserts the node as the first
992 * node of the identical-hash-value node chain.
994 * This semantic ensures no duplicated keys
995 * should ever be observable in the table
996 * (including traversing the table node by
997 * node by forward iterations)
999 cds_lfht_next_duplicate(ht
, match
, key
, &d_iter
);
1003 *unique_ret
= d_iter
;
1007 /* Only account for identical reverse hash once */
1008 if (iter_prev
->reverse_hash
!= clear_flag(iter
)->reverse_hash
1009 && !is_bucket(next
))
1010 check_resize(ht
, size
, ++chain_len
);
1011 iter_prev
= clear_flag(iter
);
1016 assert(node
!= clear_flag(iter
));
1017 assert(!is_removed(iter_prev
));
1018 assert(!is_removed(iter
));
1019 assert(iter_prev
!= node
);
1021 node
->next
= clear_flag(iter
);
1023 node
->next
= flag_bucket(clear_flag(iter
));
1024 if (is_bucket(iter
))
1025 new_node
= flag_bucket(node
);
1028 if (uatomic_cmpxchg(&iter_prev
->next
, iter
,
1029 new_node
) != iter
) {
1030 continue; /* retry */
1037 assert(!is_removed(iter
));
1038 if (is_bucket(iter
))
1039 new_next
= flag_bucket(clear_flag(next
));
1041 new_next
= clear_flag(next
);
1042 (void) uatomic_cmpxchg(&iter_prev
->next
, iter
, new_next
);
1047 unique_ret
->node
= return_node
;
1048 /* unique_ret->next left unset, never used. */
1053 int _cds_lfht_del(struct cds_lfht
*ht
, unsigned long size
,
1054 struct cds_lfht_node
*node
)
1056 struct cds_lfht_node
*bucket
, *next
;
1058 if (!node
) /* Return -ENOENT if asked to delete NULL node */
1061 /* logically delete the node */
1062 assert(!is_bucket(node
));
1063 assert(!is_removed(node
));
1064 assert(!is_removal_owner(node
));
1067 * We are first checking if the node had previously been
1068 * logically removed (this check is not atomic with setting the
1069 * logical removal flag). Return -ENOENT if the node had
1070 * previously been removed.
1072 next
= CMM_LOAD_SHARED(node
->next
); /* next is not dereferenced */
1073 if (caa_unlikely(is_removed(next
)))
1075 assert(!is_bucket(next
));
1077 * The del operation semantic guarantees a full memory barrier
1078 * before the uatomic_or atomic commit of the deletion flag.
1080 cmm_smp_mb__before_uatomic_or();
1082 * We set the REMOVED_FLAG unconditionally. Note that there may
1083 * be more than one concurrent thread setting this flag.
1084 * Knowing which wins the race will be known after the garbage
1085 * collection phase, stay tuned!
1087 uatomic_or(&node
->next
, REMOVED_FLAG
);
1088 /* We performed the (logical) deletion. */
1091 * Ensure that the node is not visible to readers anymore: lookup for
1092 * the node, and remove it (along with any other logically removed node)
1095 bucket
= lookup_bucket(ht
, size
, bit_reverse_ulong(node
->reverse_hash
));
1096 _cds_lfht_gc_bucket(bucket
, node
);
1098 assert(is_removed(CMM_LOAD_SHARED(node
->next
)));
1100 * Last phase: atomically exchange node->next with a version
1101 * having "REMOVAL_OWNER_FLAG" set. If the returned node->next
1102 * pointer did _not_ have "REMOVAL_OWNER_FLAG" set, we now own
1103 * the node and win the removal race.
1104 * It is interesting to note that all "add" paths are forbidden
1105 * to change the next pointer starting from the point where the
1106 * REMOVED_FLAG is set, so here using a read, followed by a
1107 * xchg() suffice to guarantee that the xchg() will ever only
1108 * set the "REMOVAL_OWNER_FLAG" (or change nothing if the flag
1111 if (!is_removal_owner(uatomic_xchg(&node
->next
,
1112 flag_removal_owner(node
->next
))))
1119 void *partition_resize_thread(void *arg
)
1121 struct partition_resize_work
*work
= arg
;
1123 work
->ht
->flavor
->register_thread();
1124 work
->fct(work
->ht
, work
->i
, work
->start
, work
->len
);
1125 work
->ht
->flavor
->unregister_thread();
1130 void partition_resize_helper(struct cds_lfht
*ht
, unsigned long i
,
1132 void (*fct
)(struct cds_lfht
*ht
, unsigned long i
,
1133 unsigned long start
, unsigned long len
))
1135 unsigned long partition_len
;
1136 struct partition_resize_work
*work
;
1138 unsigned long nr_threads
;
1141 * Note: nr_cpus_mask + 1 is always power of 2.
1142 * We spawn just the number of threads we need to satisfy the minimum
1143 * partition size, up to the number of CPUs in the system.
1145 if (nr_cpus_mask
> 0) {
1146 nr_threads
= min(nr_cpus_mask
+ 1,
1147 len
>> MIN_PARTITION_PER_THREAD_ORDER
);
1151 partition_len
= len
>> cds_lfht_get_count_order_ulong(nr_threads
);
1152 work
= calloc(nr_threads
, sizeof(*work
));
1154 for (thread
= 0; thread
< nr_threads
; thread
++) {
1155 work
[thread
].ht
= ht
;
1157 work
[thread
].len
= partition_len
;
1158 work
[thread
].start
= thread
* partition_len
;
1159 work
[thread
].fct
= fct
;
1160 ret
= pthread_create(&(work
[thread
].thread_id
), ht
->resize_attr
,
1161 partition_resize_thread
, &work
[thread
]);
1164 for (thread
= 0; thread
< nr_threads
; thread
++) {
1165 ret
= pthread_join(work
[thread
].thread_id
, NULL
);
1172 * Holding RCU read lock to protect _cds_lfht_add against memory
1173 * reclaim that could be performed by other call_rcu worker threads (ABA
1176 * When we reach a certain length, we can split this population phase over
1177 * many worker threads, based on the number of CPUs available in the system.
1178 * This should therefore take care of not having the expand lagging behind too
1179 * many concurrent insertion threads by using the scheduler's ability to
1180 * schedule bucket node population fairly with insertions.
1183 void init_table_populate_partition(struct cds_lfht
*ht
, unsigned long i
,
1184 unsigned long start
, unsigned long len
)
1186 unsigned long j
, size
= 1UL << (i
- 1);
1188 assert(i
> MIN_TABLE_ORDER
);
1189 ht
->flavor
->read_lock();
1190 for (j
= size
+ start
; j
< size
+ start
+ len
; j
++) {
1191 struct cds_lfht_node
*new_node
= bucket_at(ht
, j
);
1193 assert(j
>= size
&& j
< (size
<< 1));
1194 dbg_printf("init populate: order %lu index %lu hash %lu\n",
1196 new_node
->reverse_hash
= bit_reverse_ulong(j
);
1197 _cds_lfht_add(ht
, j
, NULL
, NULL
, size
, new_node
, NULL
, 1);
1199 ht
->flavor
->read_unlock();
1203 void init_table_populate(struct cds_lfht
*ht
, unsigned long i
,
1206 assert(nr_cpus_mask
!= -1);
1207 if (nr_cpus_mask
< 0 || len
< 2 * MIN_PARTITION_PER_THREAD
) {
1208 ht
->flavor
->thread_online();
1209 init_table_populate_partition(ht
, i
, 0, len
);
1210 ht
->flavor
->thread_offline();
1213 partition_resize_helper(ht
, i
, len
, init_table_populate_partition
);
1217 void init_table(struct cds_lfht
*ht
,
1218 unsigned long first_order
, unsigned long last_order
)
1222 dbg_printf("init table: first_order %lu last_order %lu\n",
1223 first_order
, last_order
);
1224 assert(first_order
> MIN_TABLE_ORDER
);
1225 for (i
= first_order
; i
<= last_order
; i
++) {
1228 len
= 1UL << (i
- 1);
1229 dbg_printf("init order %lu len: %lu\n", i
, len
);
1231 /* Stop expand if the resize target changes under us */
1232 if (CMM_LOAD_SHARED(ht
->resize_target
) < (1UL << i
))
1235 cds_lfht_alloc_bucket_table(ht
, i
);
1238 * Set all bucket nodes reverse hash values for a level and
1239 * link all bucket nodes into the table.
1241 init_table_populate(ht
, i
, len
);
1244 * Update table size.
1246 cmm_smp_wmb(); /* populate data before RCU size */
1247 CMM_STORE_SHARED(ht
->size
, 1UL << i
);
1249 dbg_printf("init new size: %lu\n", 1UL << i
);
1250 if (CMM_LOAD_SHARED(ht
->in_progress_destroy
))
1256 * Holding RCU read lock to protect _cds_lfht_remove against memory
1257 * reclaim that could be performed by other call_rcu worker threads (ABA
1259 * For a single level, we logically remove and garbage collect each node.
1261 * As a design choice, we perform logical removal and garbage collection on a
1262 * node-per-node basis to simplify this algorithm. We also assume keeping good
1263 * cache locality of the operation would overweight possible performance gain
1264 * that could be achieved by batching garbage collection for multiple levels.
1265 * However, this would have to be justified by benchmarks.
1267 * Concurrent removal and add operations are helping us perform garbage
1268 * collection of logically removed nodes. We guarantee that all logically
1269 * removed nodes have been garbage-collected (unlinked) before call_rcu is
1270 * invoked to free a hole level of bucket nodes (after a grace period).
1272 * Logical removal and garbage collection can therefore be done in batch
1273 * or on a node-per-node basis, as long as the guarantee above holds.
1275 * When we reach a certain length, we can split this removal over many worker
1276 * threads, based on the number of CPUs available in the system. This should
1277 * take care of not letting resize process lag behind too many concurrent
1278 * updater threads actively inserting into the hash table.
1281 void remove_table_partition(struct cds_lfht
*ht
, unsigned long i
,
1282 unsigned long start
, unsigned long len
)
1284 unsigned long j
, size
= 1UL << (i
- 1);
1286 assert(i
> MIN_TABLE_ORDER
);
1287 ht
->flavor
->read_lock();
1288 for (j
= size
+ start
; j
< size
+ start
+ len
; j
++) {
1289 struct cds_lfht_node
*fini_bucket
= bucket_at(ht
, j
);
1290 struct cds_lfht_node
*parent_bucket
= bucket_at(ht
, j
- size
);
1292 assert(j
>= size
&& j
< (size
<< 1));
1293 dbg_printf("remove entry: order %lu index %lu hash %lu\n",
1295 /* Set the REMOVED_FLAG to freeze the ->next for gc */
1296 uatomic_or(&fini_bucket
->next
, REMOVED_FLAG
);
1297 _cds_lfht_gc_bucket(parent_bucket
, fini_bucket
);
1299 ht
->flavor
->read_unlock();
1303 void remove_table(struct cds_lfht
*ht
, unsigned long i
, unsigned long len
)
1306 assert(nr_cpus_mask
!= -1);
1307 if (nr_cpus_mask
< 0 || len
< 2 * MIN_PARTITION_PER_THREAD
) {
1308 ht
->flavor
->thread_online();
1309 remove_table_partition(ht
, i
, 0, len
);
1310 ht
->flavor
->thread_offline();
1313 partition_resize_helper(ht
, i
, len
, remove_table_partition
);
1317 * fini_table() is never called for first_order == 0, which is why
1318 * free_by_rcu_order == 0 can be used as criterion to know if free must
1322 void fini_table(struct cds_lfht
*ht
,
1323 unsigned long first_order
, unsigned long last_order
)
1326 unsigned long free_by_rcu_order
= 0;
1328 dbg_printf("fini table: first_order %lu last_order %lu\n",
1329 first_order
, last_order
);
1330 assert(first_order
> MIN_TABLE_ORDER
);
1331 for (i
= last_order
; i
>= first_order
; i
--) {
1334 len
= 1UL << (i
- 1);
1335 dbg_printf("fini order %lu len: %lu\n", i
, len
);
1337 /* Stop shrink if the resize target changes under us */
1338 if (CMM_LOAD_SHARED(ht
->resize_target
) > (1UL << (i
- 1)))
1341 cmm_smp_wmb(); /* populate data before RCU size */
1342 CMM_STORE_SHARED(ht
->size
, 1UL << (i
- 1));
1345 * We need to wait for all add operations to reach Q.S. (and
1346 * thus use the new table for lookups) before we can start
1347 * releasing the old bucket nodes. Otherwise their lookup will
1348 * return a logically removed node as insert position.
1350 ht
->flavor
->update_synchronize_rcu();
1351 if (free_by_rcu_order
)
1352 cds_lfht_free_bucket_table(ht
, free_by_rcu_order
);
1355 * Set "removed" flag in bucket nodes about to be removed.
1356 * Unlink all now-logically-removed bucket node pointers.
1357 * Concurrent add/remove operation are helping us doing
1360 remove_table(ht
, i
, len
);
1362 free_by_rcu_order
= i
;
1364 dbg_printf("fini new size: %lu\n", 1UL << i
);
1365 if (CMM_LOAD_SHARED(ht
->in_progress_destroy
))
1369 if (free_by_rcu_order
) {
1370 ht
->flavor
->update_synchronize_rcu();
1371 cds_lfht_free_bucket_table(ht
, free_by_rcu_order
);
1376 void cds_lfht_create_bucket(struct cds_lfht
*ht
, unsigned long size
)
1378 struct cds_lfht_node
*prev
, *node
;
1379 unsigned long order
, len
, i
;
1381 cds_lfht_alloc_bucket_table(ht
, 0);
1383 dbg_printf("create bucket: order 0 index 0 hash 0\n");
1384 node
= bucket_at(ht
, 0);
1385 node
->next
= flag_bucket(get_end());
1386 node
->reverse_hash
= 0;
1388 for (order
= 1; order
< cds_lfht_get_count_order_ulong(size
) + 1; order
++) {
1389 len
= 1UL << (order
- 1);
1390 cds_lfht_alloc_bucket_table(ht
, order
);
1392 for (i
= 0; i
< len
; i
++) {
1394 * Now, we are trying to init the node with the
1395 * hash=(len+i) (which is also a bucket with the
1396 * index=(len+i)) and insert it into the hash table,
1397 * so this node has to be inserted after the bucket
1398 * with the index=(len+i)&(len-1)=i. And because there
1399 * is no other non-bucket node nor bucket node with
1400 * larger index/hash inserted, so the bucket node
1401 * being inserted should be inserted directly linked
1402 * after the bucket node with index=i.
1404 prev
= bucket_at(ht
, i
);
1405 node
= bucket_at(ht
, len
+ i
);
1407 dbg_printf("create bucket: order %lu index %lu hash %lu\n",
1408 order
, len
+ i
, len
+ i
);
1409 node
->reverse_hash
= bit_reverse_ulong(len
+ i
);
1411 /* insert after prev */
1412 assert(is_bucket(prev
->next
));
1413 node
->next
= prev
->next
;
1414 prev
->next
= flag_bucket(node
);
1419 struct cds_lfht
*_cds_lfht_new(unsigned long init_size
,
1420 unsigned long min_nr_alloc_buckets
,
1421 unsigned long max_nr_buckets
,
1423 const struct cds_lfht_mm_type
*mm
,
1424 const struct rcu_flavor_struct
*flavor
,
1425 pthread_attr_t
*attr
)
1427 struct cds_lfht
*ht
;
1428 unsigned long order
;
1430 /* min_nr_alloc_buckets must be power of two */
1431 if (!min_nr_alloc_buckets
|| (min_nr_alloc_buckets
& (min_nr_alloc_buckets
- 1)))
1434 /* init_size must be power of two */
1435 if (!init_size
|| (init_size
& (init_size
- 1)))
1439 * Memory management plugin default.
1442 if (CAA_BITS_PER_LONG
> 32
1444 && max_nr_buckets
<= (1ULL << 32)) {
1446 * For 64-bit architectures, with max number of
1447 * buckets small enough not to use the entire
1448 * 64-bit memory mapping space (and allowing a
1449 * fair number of hash table instances), use the
1450 * mmap allocator, which is faster than the
1453 mm
= &cds_lfht_mm_mmap
;
1456 * The fallback is to use the order allocator.
1458 mm
= &cds_lfht_mm_order
;
1462 /* max_nr_buckets == 0 for order based mm means infinite */
1463 if (mm
== &cds_lfht_mm_order
&& !max_nr_buckets
)
1464 max_nr_buckets
= 1UL << (MAX_TABLE_ORDER
- 1);
1466 /* max_nr_buckets must be power of two */
1467 if (!max_nr_buckets
|| (max_nr_buckets
& (max_nr_buckets
- 1)))
1470 min_nr_alloc_buckets
= max(min_nr_alloc_buckets
, MIN_TABLE_SIZE
);
1471 init_size
= max(init_size
, MIN_TABLE_SIZE
);
1472 max_nr_buckets
= max(max_nr_buckets
, min_nr_alloc_buckets
);
1473 init_size
= min(init_size
, max_nr_buckets
);
1475 ht
= mm
->alloc_cds_lfht(min_nr_alloc_buckets
, max_nr_buckets
);
1477 assert(ht
->mm
== mm
);
1478 assert(ht
->bucket_at
== mm
->bucket_at
);
1481 ht
->flavor
= flavor
;
1482 ht
->resize_attr
= attr
;
1483 alloc_split_items_count(ht
);
1484 /* this mutex should not nest in read-side C.S. */
1485 pthread_mutex_init(&ht
->resize_mutex
, NULL
);
1486 order
= cds_lfht_get_count_order_ulong(init_size
);
1487 ht
->resize_target
= 1UL << order
;
1488 cds_lfht_create_bucket(ht
, 1UL << order
);
1489 ht
->size
= 1UL << order
;
1493 void cds_lfht_lookup(struct cds_lfht
*ht
, unsigned long hash
,
1494 cds_lfht_match_fct match
, const void *key
,
1495 struct cds_lfht_iter
*iter
)
1497 struct cds_lfht_node
*node
, *next
, *bucket
;
1498 unsigned long reverse_hash
, size
;
1500 reverse_hash
= bit_reverse_ulong(hash
);
1502 size
= rcu_dereference(ht
->size
);
1503 bucket
= lookup_bucket(ht
, size
, hash
);
1504 /* We can always skip the bucket node initially */
1505 node
= rcu_dereference(bucket
->next
);
1506 node
= clear_flag(node
);
1508 if (caa_unlikely(is_end(node
))) {
1512 if (caa_unlikely(node
->reverse_hash
> reverse_hash
)) {
1516 next
= rcu_dereference(node
->next
);
1517 assert(node
== clear_flag(node
));
1518 if (caa_likely(!is_removed(next
))
1520 && node
->reverse_hash
== reverse_hash
1521 && caa_likely(match(node
, key
))) {
1524 node
= clear_flag(next
);
1526 assert(!node
|| !is_bucket(CMM_LOAD_SHARED(node
->next
)));
1531 void cds_lfht_next_duplicate(struct cds_lfht
*ht
, cds_lfht_match_fct match
,
1532 const void *key
, struct cds_lfht_iter
*iter
)
1534 struct cds_lfht_node
*node
, *next
;
1535 unsigned long reverse_hash
;
1538 reverse_hash
= node
->reverse_hash
;
1540 node
= clear_flag(next
);
1543 if (caa_unlikely(is_end(node
))) {
1547 if (caa_unlikely(node
->reverse_hash
> reverse_hash
)) {
1551 next
= rcu_dereference(node
->next
);
1552 if (caa_likely(!is_removed(next
))
1554 && caa_likely(match(node
, key
))) {
1557 node
= clear_flag(next
);
1559 assert(!node
|| !is_bucket(CMM_LOAD_SHARED(node
->next
)));
1564 void cds_lfht_next(struct cds_lfht
*ht
, struct cds_lfht_iter
*iter
)
1566 struct cds_lfht_node
*node
, *next
;
1568 node
= clear_flag(iter
->next
);
1570 if (caa_unlikely(is_end(node
))) {
1574 next
= rcu_dereference(node
->next
);
1575 if (caa_likely(!is_removed(next
))
1576 && !is_bucket(next
)) {
1579 node
= clear_flag(next
);
1581 assert(!node
|| !is_bucket(CMM_LOAD_SHARED(node
->next
)));
1586 void cds_lfht_first(struct cds_lfht
*ht
, struct cds_lfht_iter
*iter
)
1589 * Get next after first bucket node. The first bucket node is the
1590 * first node of the linked list.
1592 iter
->next
= bucket_at(ht
, 0)->next
;
1593 cds_lfht_next(ht
, iter
);
1596 void cds_lfht_add(struct cds_lfht
*ht
, unsigned long hash
,
1597 struct cds_lfht_node
*node
)
1601 node
->reverse_hash
= bit_reverse_ulong(hash
);
1602 size
= rcu_dereference(ht
->size
);
1603 _cds_lfht_add(ht
, hash
, NULL
, NULL
, size
, node
, NULL
, 0);
1604 ht_count_add(ht
, size
, hash
);
1607 struct cds_lfht_node
*cds_lfht_add_unique(struct cds_lfht
*ht
,
1609 cds_lfht_match_fct match
,
1611 struct cds_lfht_node
*node
)
1614 struct cds_lfht_iter iter
;
1616 node
->reverse_hash
= bit_reverse_ulong(hash
);
1617 size
= rcu_dereference(ht
->size
);
1618 _cds_lfht_add(ht
, hash
, match
, key
, size
, node
, &iter
, 0);
1619 if (iter
.node
== node
)
1620 ht_count_add(ht
, size
, hash
);
1624 struct cds_lfht_node
*cds_lfht_add_replace(struct cds_lfht
*ht
,
1626 cds_lfht_match_fct match
,
1628 struct cds_lfht_node
*node
)
1631 struct cds_lfht_iter iter
;
1633 node
->reverse_hash
= bit_reverse_ulong(hash
);
1634 size
= rcu_dereference(ht
->size
);
1636 _cds_lfht_add(ht
, hash
, match
, key
, size
, node
, &iter
, 0);
1637 if (iter
.node
== node
) {
1638 ht_count_add(ht
, size
, hash
);
1642 if (!_cds_lfht_replace(ht
, size
, iter
.node
, iter
.next
, node
))
1647 int cds_lfht_replace(struct cds_lfht
*ht
,
1648 struct cds_lfht_iter
*old_iter
,
1650 cds_lfht_match_fct match
,
1652 struct cds_lfht_node
*new_node
)
1656 new_node
->reverse_hash
= bit_reverse_ulong(hash
);
1657 if (!old_iter
->node
)
1659 if (caa_unlikely(old_iter
->node
->reverse_hash
!= new_node
->reverse_hash
))
1661 if (caa_unlikely(!match(old_iter
->node
, key
)))
1663 size
= rcu_dereference(ht
->size
);
1664 return _cds_lfht_replace(ht
, size
, old_iter
->node
, old_iter
->next
,
1668 int cds_lfht_del(struct cds_lfht
*ht
, struct cds_lfht_node
*node
)
1670 unsigned long size
, hash
;
1673 size
= rcu_dereference(ht
->size
);
1674 ret
= _cds_lfht_del(ht
, size
, node
);
1676 hash
= bit_reverse_ulong(node
->reverse_hash
);
1677 ht_count_del(ht
, size
, hash
);
1682 int cds_lfht_is_node_deleted(struct cds_lfht_node
*node
)
1684 return is_removed(CMM_LOAD_SHARED(node
->next
));
1688 int cds_lfht_delete_bucket(struct cds_lfht
*ht
)
1690 struct cds_lfht_node
*node
;
1691 unsigned long order
, i
, size
;
1693 /* Check that the table is empty */
1694 node
= bucket_at(ht
, 0);
1696 node
= clear_flag(node
)->next
;
1697 if (!is_bucket(node
))
1699 assert(!is_removed(node
));
1700 } while (!is_end(node
));
1702 * size accessed without rcu_dereference because hash table is
1706 /* Internal sanity check: all nodes left should be buckets */
1707 for (i
= 0; i
< size
; i
++) {
1708 node
= bucket_at(ht
, i
);
1709 dbg_printf("delete bucket: index %lu expected hash %lu hash %lu\n",
1710 i
, i
, bit_reverse_ulong(node
->reverse_hash
));
1711 assert(is_bucket(node
->next
));
1714 for (order
= cds_lfht_get_count_order_ulong(size
); (long)order
>= 0; order
--)
1715 cds_lfht_free_bucket_table(ht
, order
);
1721 * Should only be called when no more concurrent readers nor writers can
1722 * possibly access the table.
1724 int cds_lfht_destroy(struct cds_lfht
*ht
, pthread_attr_t
**attr
)
1728 /* Wait for in-flight resize operations to complete */
1729 _CMM_STORE_SHARED(ht
->in_progress_destroy
, 1);
1730 cmm_smp_mb(); /* Store destroy before load resize */
1731 while (uatomic_read(&ht
->in_progress_resize
))
1732 poll(NULL
, 0, 100); /* wait for 100ms */
1733 ret
= cds_lfht_delete_bucket(ht
);
1736 free_split_items_count(ht
);
1738 *attr
= ht
->resize_attr
;
1743 void cds_lfht_count_nodes(struct cds_lfht
*ht
,
1744 long *approx_before
,
1745 unsigned long *count
,
1748 struct cds_lfht_node
*node
, *next
;
1749 unsigned long nr_bucket
= 0, nr_removed
= 0;
1752 if (ht
->split_count
) {
1755 for (i
= 0; i
< split_count_mask
+ 1; i
++) {
1756 *approx_before
+= uatomic_read(&ht
->split_count
[i
].add
);
1757 *approx_before
-= uatomic_read(&ht
->split_count
[i
].del
);
1763 /* Count non-bucket nodes in the table */
1764 node
= bucket_at(ht
, 0);
1766 next
= rcu_dereference(node
->next
);
1767 if (is_removed(next
)) {
1768 if (!is_bucket(next
))
1772 } else if (!is_bucket(next
))
1776 node
= clear_flag(next
);
1777 } while (!is_end(node
));
1778 dbg_printf("number of logically removed nodes: %lu\n", nr_removed
);
1779 dbg_printf("number of bucket nodes: %lu\n", nr_bucket
);
1781 if (ht
->split_count
) {
1784 for (i
= 0; i
< split_count_mask
+ 1; i
++) {
1785 *approx_after
+= uatomic_read(&ht
->split_count
[i
].add
);
1786 *approx_after
-= uatomic_read(&ht
->split_count
[i
].del
);
1791 /* called with resize mutex held */
1793 void _do_cds_lfht_grow(struct cds_lfht
*ht
,
1794 unsigned long old_size
, unsigned long new_size
)
1796 unsigned long old_order
, new_order
;
1798 old_order
= cds_lfht_get_count_order_ulong(old_size
);
1799 new_order
= cds_lfht_get_count_order_ulong(new_size
);
1800 dbg_printf("resize from %lu (order %lu) to %lu (order %lu) buckets\n",
1801 old_size
, old_order
, new_size
, new_order
);
1802 assert(new_size
> old_size
);
1803 init_table(ht
, old_order
+ 1, new_order
);
1806 /* called with resize mutex held */
1808 void _do_cds_lfht_shrink(struct cds_lfht
*ht
,
1809 unsigned long old_size
, unsigned long new_size
)
1811 unsigned long old_order
, new_order
;
1813 new_size
= max(new_size
, MIN_TABLE_SIZE
);
1814 old_order
= cds_lfht_get_count_order_ulong(old_size
);
1815 new_order
= cds_lfht_get_count_order_ulong(new_size
);
1816 dbg_printf("resize from %lu (order %lu) to %lu (order %lu) buckets\n",
1817 old_size
, old_order
, new_size
, new_order
);
1818 assert(new_size
< old_size
);
1820 /* Remove and unlink all bucket nodes to remove. */
1821 fini_table(ht
, new_order
+ 1, old_order
);
1825 /* called with resize mutex held */
1827 void _do_cds_lfht_resize(struct cds_lfht
*ht
)
1829 unsigned long new_size
, old_size
;
1832 * Resize table, re-do if the target size has changed under us.
1835 assert(uatomic_read(&ht
->in_progress_resize
));
1836 if (CMM_LOAD_SHARED(ht
->in_progress_destroy
))
1838 ht
->resize_initiated
= 1;
1839 old_size
= ht
->size
;
1840 new_size
= CMM_LOAD_SHARED(ht
->resize_target
);
1841 if (old_size
< new_size
)
1842 _do_cds_lfht_grow(ht
, old_size
, new_size
);
1843 else if (old_size
> new_size
)
1844 _do_cds_lfht_shrink(ht
, old_size
, new_size
);
1845 ht
->resize_initiated
= 0;
1846 /* write resize_initiated before read resize_target */
1848 } while (ht
->size
!= CMM_LOAD_SHARED(ht
->resize_target
));
1852 unsigned long resize_target_grow(struct cds_lfht
*ht
, unsigned long new_size
)
1854 return _uatomic_xchg_monotonic_increase(&ht
->resize_target
, new_size
);
1858 void resize_target_update_count(struct cds_lfht
*ht
,
1859 unsigned long count
)
1861 count
= max(count
, MIN_TABLE_SIZE
);
1862 count
= min(count
, ht
->max_nr_buckets
);
1863 uatomic_set(&ht
->resize_target
, count
);
1866 void cds_lfht_resize(struct cds_lfht
*ht
, unsigned long new_size
)
1868 resize_target_update_count(ht
, new_size
);
1869 CMM_STORE_SHARED(ht
->resize_initiated
, 1);
1870 ht
->flavor
->thread_offline();
1871 pthread_mutex_lock(&ht
->resize_mutex
);
1872 _do_cds_lfht_resize(ht
);
1873 pthread_mutex_unlock(&ht
->resize_mutex
);
1874 ht
->flavor
->thread_online();
1878 void do_resize_cb(struct rcu_head
*head
)
1880 struct rcu_resize_work
*work
=
1881 caa_container_of(head
, struct rcu_resize_work
, head
);
1882 struct cds_lfht
*ht
= work
->ht
;
1884 ht
->flavor
->thread_offline();
1885 pthread_mutex_lock(&ht
->resize_mutex
);
1886 _do_cds_lfht_resize(ht
);
1887 pthread_mutex_unlock(&ht
->resize_mutex
);
1888 ht
->flavor
->thread_online();
1890 cmm_smp_mb(); /* finish resize before decrement */
1891 uatomic_dec(&ht
->in_progress_resize
);
1895 void __cds_lfht_resize_lazy_launch(struct cds_lfht
*ht
)
1897 struct rcu_resize_work
*work
;
1899 /* Store resize_target before read resize_initiated */
1901 if (!CMM_LOAD_SHARED(ht
->resize_initiated
)) {
1902 uatomic_inc(&ht
->in_progress_resize
);
1903 cmm_smp_mb(); /* increment resize count before load destroy */
1904 if (CMM_LOAD_SHARED(ht
->in_progress_destroy
)) {
1905 uatomic_dec(&ht
->in_progress_resize
);
1908 work
= malloc(sizeof(*work
));
1910 dbg_printf("error allocating resize work, bailing out\n");
1911 uatomic_dec(&ht
->in_progress_resize
);
1915 ht
->flavor
->update_call_rcu(&work
->head
, do_resize_cb
);
1916 CMM_STORE_SHARED(ht
->resize_initiated
, 1);
1921 void cds_lfht_resize_lazy_grow(struct cds_lfht
*ht
, unsigned long size
, int growth
)
1923 unsigned long target_size
= size
<< growth
;
1925 target_size
= min(target_size
, ht
->max_nr_buckets
);
1926 if (resize_target_grow(ht
, target_size
) >= target_size
)
1929 __cds_lfht_resize_lazy_launch(ht
);
1933 * We favor grow operations over shrink. A shrink operation never occurs
1934 * if a grow operation is queued for lazy execution. A grow operation
1935 * cancels any pending shrink lazy execution.
1938 void cds_lfht_resize_lazy_count(struct cds_lfht
*ht
, unsigned long size
,
1939 unsigned long count
)
1941 if (!(ht
->flags
& CDS_LFHT_AUTO_RESIZE
))
1943 count
= max(count
, MIN_TABLE_SIZE
);
1944 count
= min(count
, ht
->max_nr_buckets
);
1946 return; /* Already the right size, no resize needed */
1947 if (count
> size
) { /* lazy grow */
1948 if (resize_target_grow(ht
, count
) >= count
)
1950 } else { /* lazy shrink */
1954 s
= uatomic_cmpxchg(&ht
->resize_target
, size
, count
);
1956 break; /* no resize needed */
1958 return; /* growing is/(was just) in progress */
1960 return; /* some other thread do shrink */
1964 __cds_lfht_resize_lazy_launch(ht
);