rculfhash test: print test name
[userspace-rcu.git] / rculfhash.c
1 /*
2 * rculfhash.c
3 *
4 * Userspace RCU library - Lock-Free Resizable RCU Hash Table
5 *
6 * Copyright 2010-2011 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 * Copyright 2011 - Lai Jiangshan <laijs@cn.fujitsu.com>
8 *
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
13 *
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
18 *
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with this library; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 */
23
24 /*
25 * Based on the following articles:
26 * - Ori Shalev and Nir Shavit. Split-ordered lists: Lock-free
27 * extensible hash tables. J. ACM 53, 3 (May 2006), 379-405.
28 * - Michael, M. M. High performance dynamic lock-free hash tables
29 * and list-based sets. In Proceedings of the fourteenth annual ACM
30 * symposium on Parallel algorithms and architectures, ACM Press,
31 * (2002), 73-82.
32 *
33 * Some specificities of this Lock-Free Resizable RCU Hash Table
34 * implementation:
35 *
36 * - RCU read-side critical section allows readers to perform hash
37 * table lookups and use the returned objects safely by delaying
38 * memory reclaim of a grace period.
39 * - Add and remove operations are lock-free, and do not need to
40 * allocate memory. They need to be executed within RCU read-side
41 * critical section to ensure the objects they read are valid and to
42 * deal with the cmpxchg ABA problem.
43 * - add and add_unique operations are supported. add_unique checks if
44 * the node key already exists in the hash table. It ensures no key
45 * duplicata exists.
46 * - The resize operation executes concurrently with add/remove/lookup.
47 * - Hash table nodes are contained within a split-ordered list. This
48 * list is ordered by incrementing reversed-bits-hash value.
49 * - An index of bucket nodes is kept. These bucket nodes are the hash
50 * table "buckets", and they are also chained together in the
51 * split-ordered list, which allows recursive expansion.
52 * - The resize operation for small tables only allows expanding the hash table.
53 * It is triggered automatically by detecting long chains in the add
54 * operation.
55 * - The resize operation for larger tables (and available through an
56 * API) allows both expanding and shrinking the hash table.
57 * - Split-counters are used to keep track of the number of
58 * nodes within the hash table for automatic resize triggering.
59 * - Resize operation initiated by long chain detection is executed by a
60 * call_rcu thread, which keeps lock-freedom of add and remove.
61 * - Resize operations are protected by a mutex.
62 * - The removal operation is split in two parts: first, a "removed"
63 * flag is set in the next pointer within the node to remove. Then,
64 * a "garbage collection" is performed in the bucket containing the
65 * removed node (from the start of the bucket up to the removed node).
66 * All encountered nodes with "removed" flag set in their next
67 * pointers are removed from the linked-list. If the cmpxchg used for
68 * removal fails (due to concurrent garbage-collection or concurrent
69 * add), we retry from the beginning of the bucket. This ensures that
70 * the node with "removed" flag set is removed from the hash table
71 * (not visible to lookups anymore) before the RCU read-side critical
72 * section held across removal ends. Furthermore, this ensures that
73 * the node with "removed" flag set is removed from the linked-list
74 * before its memory is reclaimed. After setting the "removal" flag,
75 * only the thread which removal is the first to set the "removal
76 * owner" flag (with an xchg) into a node's next pointer is considered
77 * to have succeeded its removal (and thus owns the node to reclaim).
78 * Because we garbage-collect starting from an invariant node (the
79 * start-of-bucket bucket node) up to the "removed" node (or find a
80 * reverse-hash that is higher), we are sure that a successful
81 * traversal of the chain leads to a chain that is present in the
82 * linked-list (the start node is never removed) and that is does not
83 * contain the "removed" node anymore, even if concurrent delete/add
84 * operations are changing the structure of the list concurrently.
85 * - The add operation performs garbage collection of buckets if it
86 * encounters nodes with removed flag set in the bucket where it wants
87 * to add its new node. This ensures lock-freedom of add operation by
88 * helping the remover unlink nodes from the list rather than to wait
89 * for it do to so.
90 * - A RCU "order table" indexed by log2(hash index) is copied and
91 * expanded by the resize operation. This order table allows finding
92 * the "bucket node" tables.
93 * - There is one bucket node table per hash index order. The size of
94 * each bucket node table is half the number of hashes contained in
95 * this order (except for order 0).
96 * - synchronzie_rcu is used to garbage-collect the old bucket node table.
97 * - The per-order bucket node tables contain a compact version of the
98 * hash table nodes. These tables are invariant after they are
99 * populated into the hash table.
100 *
101 * Ordering Guarantees:
102 *
103 * To discuss these guarantees, we first define "read" operation as any
104 * of the the basic cds_lfht_lookup, cds_lfht_next_duplicate,
105 * cds_lfht_first, cds_lfht_next operation, as well as
106 * cds_lfht_add_unique (failure).
107 *
108 * We define "read traversal" operation as any of the following
109 * group of operations
110 * - cds_lfht_lookup followed by iteration with cds_lfht_next_duplicate
111 * (and/or cds_lfht_next, although less common).
112 * - cds_lfht_add_unique (failure) followed by iteration with
113 * cds_lfht_next_duplicate (and/or cds_lfht_next, although less
114 * common).
115 * - cds_lfht_first followed iteration with cds_lfht_next (and/or
116 * cds_lfht_next_duplicate, although less common).
117 *
118 * We define "write" operations as any of cds_lfht_add,
119 * cds_lfht_add_unique (success), cds_lfht_add_replace, cds_lfht_del.
120 *
121 * When cds_lfht_add_unique succeeds (returns the node passed as
122 * parameter), it acts as a "write" operation. When cds_lfht_add_unique
123 * fails (returns a node different from the one passed as parameter), it
124 * acts as a "read" operation. A cds_lfht_add_unique failure is a
125 * cds_lfht_lookup "read" operation, therefore, any ordering guarantee
126 * referring to "lookup" imply any of "lookup" or cds_lfht_add_unique
127 * (failure).
128 *
129 * We define "prior" and "later" node as nodes observable by reads and
130 * read traversals respectively before and after a write or sequence of
131 * write operations.
132 *
133 * Hash-table operations are often cascaded, for example, the pointer
134 * returned by a cds_lfht_lookup() might be passed to a cds_lfht_next(),
135 * whose return value might in turn be passed to another hash-table
136 * operation. This entire cascaded series of operations must be enclosed
137 * by a pair of matching rcu_read_lock() and rcu_read_unlock()
138 * operations.
139 *
140 * The following ordering guarantees are offered by this hash table:
141 *
142 * A.1) "read" after "write": if there is ordering between a write and a
143 * later read, then the read is guaranteed to see the write or some
144 * later write.
145 * A.2) "read traversal" after "write": given that there is dependency
146 * ordering between reads in a "read traversal", if there is
147 * ordering between a write and the first read of the traversal,
148 * then the "read traversal" is guaranteed to see the write or
149 * some later write.
150 * B.1) "write" after "read": if there is ordering between a read and a
151 * later write, then the read will never see the write.
152 * B.2) "write" after "read traversal": given that there is dependency
153 * ordering between reads in a "read traversal", if there is
154 * ordering between the last read of the traversal and a later
155 * write, then the "read traversal" will never see the write.
156 * C) "write" while "read traversal": if a write occurs during a "read
157 * traversal", the traversal may, or may not, see the write.
158 * D.1) "write" after "write": if there is ordering between a write and
159 * a later write, then the later write is guaranteed to see the
160 * effects of the first write.
161 * D.2) Concurrent "write" pairs: The system will assign an arbitrary
162 * order to any pair of concurrent conflicting writes.
163 * Non-conflicting writes (for example, to different keys) are
164 * unordered.
165 * E) If a grace period separates a "del" or "replace" operation
166 * and a subsequent operation, then that subsequent operation is
167 * guaranteed not to see the removed item.
168 * F) Uniqueness guarantee: given a hash table that does not contain
169 * duplicate items for a given key, there will only be one item in
170 * the hash table after an arbitrary sequence of add_unique and/or
171 * add_replace operations. Note, however, that a pair of
172 * concurrent read operations might well access two different items
173 * with that key.
174 * G.1) If a pair of lookups for a given key are ordered (e.g. by a
175 * memory barrier), then the second lookup will return the same
176 * node as the previous lookup, or some later node.
177 * G.2) A "read traversal" that starts after the end of a prior "read
178 * traversal" (ordered by memory barriers) is guaranteed to see the
179 * same nodes as the previous traversal, or some later nodes.
180 * G.3) Concurrent "read" pairs: concurrent reads are unordered. For
181 * example, if a pair of reads to the same key run concurrently
182 * with an insertion of that same key, the reads remain unordered
183 * regardless of their return values. In other words, you cannot
184 * rely on the values returned by the reads to deduce ordering.
185 *
186 * Progress guarantees:
187 *
188 * * Reads are wait-free. These operations always move forward in the
189 * hash table linked list, and this list has no loop.
190 * * Writes are lock-free. Any retry loop performed by a write operation
191 * is triggered by progress made within another update operation.
192 *
193 * Bucket node tables:
194 *
195 * hash table hash table the last all bucket node tables
196 * order size bucket node 0 1 2 3 4 5 6(index)
197 * table size
198 * 0 1 1 1
199 * 1 2 1 1 1
200 * 2 4 2 1 1 2
201 * 3 8 4 1 1 2 4
202 * 4 16 8 1 1 2 4 8
203 * 5 32 16 1 1 2 4 8 16
204 * 6 64 32 1 1 2 4 8 16 32
205 *
206 * When growing/shrinking, we only focus on the last bucket node table
207 * which size is (!order ? 1 : (1 << (order -1))).
208 *
209 * Example for growing/shrinking:
210 * grow hash table from order 5 to 6: init the index=6 bucket node table
211 * shrink hash table from order 6 to 5: fini the index=6 bucket node table
212 *
213 * A bit of ascii art explanation:
214 *
215 * Order index is the off-by-one compare to the actual power of 2 because
216 * we use index 0 to deal with the 0 special-case.
217 *
218 * This shows the nodes for a small table ordered by reversed bits:
219 *
220 * bits reverse
221 * 0 000 000
222 * 4 100 001
223 * 2 010 010
224 * 6 110 011
225 * 1 001 100
226 * 5 101 101
227 * 3 011 110
228 * 7 111 111
229 *
230 * This shows the nodes in order of non-reversed bits, linked by
231 * reversed-bit order.
232 *
233 * order bits reverse
234 * 0 0 000 000
235 * 1 | 1 001 100 <-
236 * 2 | | 2 010 010 <- |
237 * | | | 3 011 110 | <- |
238 * 3 -> | | | 4 100 001 | |
239 * -> | | 5 101 101 |
240 * -> | 6 110 011
241 * -> 7 111 111
242 */
243
244 #define _LGPL_SOURCE
245 #define _GNU_SOURCE
246 #include <stdlib.h>
247 #include <errno.h>
248 #include <assert.h>
249 #include <stdio.h>
250 #include <stdint.h>
251 #include <string.h>
252 #include <sched.h>
253
254 #include "config.h"
255 #include <urcu.h>
256 #include <urcu-call-rcu.h>
257 #include <urcu-flavor.h>
258 #include <urcu/arch.h>
259 #include <urcu/uatomic.h>
260 #include <urcu/compiler.h>
261 #include <urcu/rculfhash.h>
262 #include <rculfhash-internal.h>
263 #include <stdio.h>
264 #include <pthread.h>
265
266 /*
267 * Split-counters lazily update the global counter each 1024
268 * addition/removal. It automatically keeps track of resize required.
269 * We use the bucket length as indicator for need to expand for small
270 * tables and machines lacking per-cpu data suppport.
271 */
272 #define COUNT_COMMIT_ORDER 10
273 #define DEFAULT_SPLIT_COUNT_MASK 0xFUL
274 #define CHAIN_LEN_TARGET 1
275 #define CHAIN_LEN_RESIZE_THRESHOLD 3
276
277 /*
278 * Define the minimum table size.
279 */
280 #define MIN_TABLE_ORDER 0
281 #define MIN_TABLE_SIZE (1UL << MIN_TABLE_ORDER)
282
283 /*
284 * Minimum number of bucket nodes to touch per thread to parallelize grow/shrink.
285 */
286 #define MIN_PARTITION_PER_THREAD_ORDER 12
287 #define MIN_PARTITION_PER_THREAD (1UL << MIN_PARTITION_PER_THREAD_ORDER)
288
289 /*
290 * The removed flag needs to be updated atomically with the pointer.
291 * It indicates that no node must attach to the node scheduled for
292 * removal, and that node garbage collection must be performed.
293 * The bucket flag does not require to be updated atomically with the
294 * pointer, but it is added as a pointer low bit flag to save space.
295 */
296 #define REMOVED_FLAG (1UL << 0)
297 #define BUCKET_FLAG (1UL << 1)
298 #define REMOVAL_OWNER_FLAG (1UL << 2)
299 #define FLAGS_MASK ((1UL << 3) - 1)
300
301 /* Value of the end pointer. Should not interact with flags. */
302 #define END_VALUE NULL
303
304 /*
305 * ht_items_count: Split-counters counting the number of node addition
306 * and removal in the table. Only used if the CDS_LFHT_ACCOUNTING flag
307 * is set at hash table creation.
308 *
309 * These are free-running counters, never reset to zero. They count the
310 * number of add/remove, and trigger every (1 << COUNT_COMMIT_ORDER)
311 * operations to update the global counter. We choose a power-of-2 value
312 * for the trigger to deal with 32 or 64-bit overflow of the counter.
313 */
314 struct ht_items_count {
315 unsigned long add, del;
316 } __attribute__((aligned(CAA_CACHE_LINE_SIZE)));
317
318 /*
319 * rcu_resize_work: Contains arguments passed to RCU worker thread
320 * responsible for performing lazy resize.
321 */
322 struct rcu_resize_work {
323 struct rcu_head head;
324 struct cds_lfht *ht;
325 };
326
327 /*
328 * partition_resize_work: Contains arguments passed to worker threads
329 * executing the hash table resize on partitions of the hash table
330 * assigned to each processor's worker thread.
331 */
332 struct partition_resize_work {
333 pthread_t thread_id;
334 struct cds_lfht *ht;
335 unsigned long i, start, len;
336 void (*fct)(struct cds_lfht *ht, unsigned long i,
337 unsigned long start, unsigned long len);
338 };
339
340 /*
341 * Algorithm to reverse bits in a word by lookup table, extended to
342 * 64-bit words.
343 * Source:
344 * http://graphics.stanford.edu/~seander/bithacks.html#BitReverseTable
345 * Originally from Public Domain.
346 */
347
348 static const uint8_t BitReverseTable256[256] =
349 {
350 #define R2(n) (n), (n) + 2*64, (n) + 1*64, (n) + 3*64
351 #define R4(n) R2(n), R2((n) + 2*16), R2((n) + 1*16), R2((n) + 3*16)
352 #define R6(n) R4(n), R4((n) + 2*4 ), R4((n) + 1*4 ), R4((n) + 3*4 )
353 R6(0), R6(2), R6(1), R6(3)
354 };
355 #undef R2
356 #undef R4
357 #undef R6
358
359 static
360 uint8_t bit_reverse_u8(uint8_t v)
361 {
362 return BitReverseTable256[v];
363 }
364
365 static __attribute__((unused))
366 uint32_t bit_reverse_u32(uint32_t v)
367 {
368 return ((uint32_t) bit_reverse_u8(v) << 24) |
369 ((uint32_t) bit_reverse_u8(v >> 8) << 16) |
370 ((uint32_t) bit_reverse_u8(v >> 16) << 8) |
371 ((uint32_t) bit_reverse_u8(v >> 24));
372 }
373
374 static __attribute__((unused))
375 uint64_t bit_reverse_u64(uint64_t v)
376 {
377 return ((uint64_t) bit_reverse_u8(v) << 56) |
378 ((uint64_t) bit_reverse_u8(v >> 8) << 48) |
379 ((uint64_t) bit_reverse_u8(v >> 16) << 40) |
380 ((uint64_t) bit_reverse_u8(v >> 24) << 32) |
381 ((uint64_t) bit_reverse_u8(v >> 32) << 24) |
382 ((uint64_t) bit_reverse_u8(v >> 40) << 16) |
383 ((uint64_t) bit_reverse_u8(v >> 48) << 8) |
384 ((uint64_t) bit_reverse_u8(v >> 56));
385 }
386
387 static
388 unsigned long bit_reverse_ulong(unsigned long v)
389 {
390 #if (CAA_BITS_PER_LONG == 32)
391 return bit_reverse_u32(v);
392 #else
393 return bit_reverse_u64(v);
394 #endif
395 }
396
397 /*
398 * fls: returns the position of the most significant bit.
399 * Returns 0 if no bit is set, else returns the position of the most
400 * significant bit (from 1 to 32 on 32-bit, from 1 to 64 on 64-bit).
401 */
402 #if defined(__i386) || defined(__x86_64)
403 static inline
404 unsigned int fls_u32(uint32_t x)
405 {
406 int r;
407
408 asm("bsrl %1,%0\n\t"
409 "jnz 1f\n\t"
410 "movl $-1,%0\n\t"
411 "1:\n\t"
412 : "=r" (r) : "rm" (x));
413 return r + 1;
414 }
415 #define HAS_FLS_U32
416 #endif
417
418 #if defined(__x86_64)
419 static inline
420 unsigned int fls_u64(uint64_t x)
421 {
422 long r;
423
424 asm("bsrq %1,%0\n\t"
425 "jnz 1f\n\t"
426 "movq $-1,%0\n\t"
427 "1:\n\t"
428 : "=r" (r) : "rm" (x));
429 return r + 1;
430 }
431 #define HAS_FLS_U64
432 #endif
433
434 #ifndef HAS_FLS_U64
435 static __attribute__((unused))
436 unsigned int fls_u64(uint64_t x)
437 {
438 unsigned int r = 64;
439
440 if (!x)
441 return 0;
442
443 if (!(x & 0xFFFFFFFF00000000ULL)) {
444 x <<= 32;
445 r -= 32;
446 }
447 if (!(x & 0xFFFF000000000000ULL)) {
448 x <<= 16;
449 r -= 16;
450 }
451 if (!(x & 0xFF00000000000000ULL)) {
452 x <<= 8;
453 r -= 8;
454 }
455 if (!(x & 0xF000000000000000ULL)) {
456 x <<= 4;
457 r -= 4;
458 }
459 if (!(x & 0xC000000000000000ULL)) {
460 x <<= 2;
461 r -= 2;
462 }
463 if (!(x & 0x8000000000000000ULL)) {
464 x <<= 1;
465 r -= 1;
466 }
467 return r;
468 }
469 #endif
470
471 #ifndef HAS_FLS_U32
472 static __attribute__((unused))
473 unsigned int fls_u32(uint32_t x)
474 {
475 unsigned int r = 32;
476
477 if (!x)
478 return 0;
479 if (!(x & 0xFFFF0000U)) {
480 x <<= 16;
481 r -= 16;
482 }
483 if (!(x & 0xFF000000U)) {
484 x <<= 8;
485 r -= 8;
486 }
487 if (!(x & 0xF0000000U)) {
488 x <<= 4;
489 r -= 4;
490 }
491 if (!(x & 0xC0000000U)) {
492 x <<= 2;
493 r -= 2;
494 }
495 if (!(x & 0x80000000U)) {
496 x <<= 1;
497 r -= 1;
498 }
499 return r;
500 }
501 #endif
502
503 unsigned int cds_lfht_fls_ulong(unsigned long x)
504 {
505 #if (CAA_BITS_PER_LONG == 32)
506 return fls_u32(x);
507 #else
508 return fls_u64(x);
509 #endif
510 }
511
512 /*
513 * Return the minimum order for which x <= (1UL << order).
514 * Return -1 if x is 0.
515 */
516 int cds_lfht_get_count_order_u32(uint32_t x)
517 {
518 if (!x)
519 return -1;
520
521 return fls_u32(x - 1);
522 }
523
524 /*
525 * Return the minimum order for which x <= (1UL << order).
526 * Return -1 if x is 0.
527 */
528 int cds_lfht_get_count_order_ulong(unsigned long x)
529 {
530 if (!x)
531 return -1;
532
533 return cds_lfht_fls_ulong(x - 1);
534 }
535
536 static
537 void cds_lfht_resize_lazy_grow(struct cds_lfht *ht, unsigned long size, int growth);
538
539 static
540 void cds_lfht_resize_lazy_count(struct cds_lfht *ht, unsigned long size,
541 unsigned long count);
542
543 static long nr_cpus_mask = -1;
544 static long split_count_mask = -1;
545
546 #if defined(HAVE_SYSCONF)
547 static void ht_init_nr_cpus_mask(void)
548 {
549 long maxcpus;
550
551 maxcpus = sysconf(_SC_NPROCESSORS_CONF);
552 if (maxcpus <= 0) {
553 nr_cpus_mask = -2;
554 return;
555 }
556 /*
557 * round up number of CPUs to next power of two, so we
558 * can use & for modulo.
559 */
560 maxcpus = 1UL << cds_lfht_get_count_order_ulong(maxcpus);
561 nr_cpus_mask = maxcpus - 1;
562 }
563 #else /* #if defined(HAVE_SYSCONF) */
564 static void ht_init_nr_cpus_mask(void)
565 {
566 nr_cpus_mask = -2;
567 }
568 #endif /* #else #if defined(HAVE_SYSCONF) */
569
570 static
571 void alloc_split_items_count(struct cds_lfht *ht)
572 {
573 struct ht_items_count *count;
574
575 if (nr_cpus_mask == -1) {
576 ht_init_nr_cpus_mask();
577 if (nr_cpus_mask < 0)
578 split_count_mask = DEFAULT_SPLIT_COUNT_MASK;
579 else
580 split_count_mask = nr_cpus_mask;
581 }
582
583 assert(split_count_mask >= 0);
584
585 if (ht->flags & CDS_LFHT_ACCOUNTING) {
586 ht->split_count = calloc(split_count_mask + 1, sizeof(*count));
587 assert(ht->split_count);
588 } else {
589 ht->split_count = NULL;
590 }
591 }
592
593 static
594 void free_split_items_count(struct cds_lfht *ht)
595 {
596 poison_free(ht->split_count);
597 }
598
599 #if defined(HAVE_SCHED_GETCPU)
600 static
601 int ht_get_split_count_index(unsigned long hash)
602 {
603 int cpu;
604
605 assert(split_count_mask >= 0);
606 cpu = sched_getcpu();
607 if (caa_unlikely(cpu < 0))
608 return hash & split_count_mask;
609 else
610 return cpu & split_count_mask;
611 }
612 #else /* #if defined(HAVE_SCHED_GETCPU) */
613 static
614 int ht_get_split_count_index(unsigned long hash)
615 {
616 return hash & split_count_mask;
617 }
618 #endif /* #else #if defined(HAVE_SCHED_GETCPU) */
619
620 static
621 void ht_count_add(struct cds_lfht *ht, unsigned long size, unsigned long hash)
622 {
623 unsigned long split_count;
624 int index;
625 long count;
626
627 if (caa_unlikely(!ht->split_count))
628 return;
629 index = ht_get_split_count_index(hash);
630 split_count = uatomic_add_return(&ht->split_count[index].add, 1);
631 if (caa_likely(split_count & ((1UL << COUNT_COMMIT_ORDER) - 1)))
632 return;
633 /* Only if number of add multiple of 1UL << COUNT_COMMIT_ORDER */
634
635 dbg_printf("add split count %lu\n", split_count);
636 count = uatomic_add_return(&ht->count,
637 1UL << COUNT_COMMIT_ORDER);
638 if (caa_likely(count & (count - 1)))
639 return;
640 /* Only if global count is power of 2 */
641
642 if ((count >> CHAIN_LEN_RESIZE_THRESHOLD) < size)
643 return;
644 dbg_printf("add set global %ld\n", count);
645 cds_lfht_resize_lazy_count(ht, size,
646 count >> (CHAIN_LEN_TARGET - 1));
647 }
648
649 static
650 void ht_count_del(struct cds_lfht *ht, unsigned long size, unsigned long hash)
651 {
652 unsigned long split_count;
653 int index;
654 long count;
655
656 if (caa_unlikely(!ht->split_count))
657 return;
658 index = ht_get_split_count_index(hash);
659 split_count = uatomic_add_return(&ht->split_count[index].del, 1);
660 if (caa_likely(split_count & ((1UL << COUNT_COMMIT_ORDER) - 1)))
661 return;
662 /* Only if number of deletes multiple of 1UL << COUNT_COMMIT_ORDER */
663
664 dbg_printf("del split count %lu\n", split_count);
665 count = uatomic_add_return(&ht->count,
666 -(1UL << COUNT_COMMIT_ORDER));
667 if (caa_likely(count & (count - 1)))
668 return;
669 /* Only if global count is power of 2 */
670
671 if ((count >> CHAIN_LEN_RESIZE_THRESHOLD) >= size)
672 return;
673 dbg_printf("del set global %ld\n", count);
674 /*
675 * Don't shrink table if the number of nodes is below a
676 * certain threshold.
677 */
678 if (count < (1UL << COUNT_COMMIT_ORDER) * (split_count_mask + 1))
679 return;
680 cds_lfht_resize_lazy_count(ht, size,
681 count >> (CHAIN_LEN_TARGET - 1));
682 }
683
684 static
685 void check_resize(struct cds_lfht *ht, unsigned long size, uint32_t chain_len)
686 {
687 unsigned long count;
688
689 if (!(ht->flags & CDS_LFHT_AUTO_RESIZE))
690 return;
691 count = uatomic_read(&ht->count);
692 /*
693 * Use bucket-local length for small table expand and for
694 * environments lacking per-cpu data support.
695 */
696 if (count >= (1UL << COUNT_COMMIT_ORDER))
697 return;
698 if (chain_len > 100)
699 dbg_printf("WARNING: large chain length: %u.\n",
700 chain_len);
701 if (chain_len >= CHAIN_LEN_RESIZE_THRESHOLD)
702 cds_lfht_resize_lazy_grow(ht, size,
703 cds_lfht_get_count_order_u32(chain_len - (CHAIN_LEN_TARGET - 1)));
704 }
705
706 static
707 struct cds_lfht_node *clear_flag(struct cds_lfht_node *node)
708 {
709 return (struct cds_lfht_node *) (((unsigned long) node) & ~FLAGS_MASK);
710 }
711
712 static
713 int is_removed(struct cds_lfht_node *node)
714 {
715 return ((unsigned long) node) & REMOVED_FLAG;
716 }
717
718 static
719 struct cds_lfht_node *flag_removed(struct cds_lfht_node *node)
720 {
721 return (struct cds_lfht_node *) (((unsigned long) node) | REMOVED_FLAG);
722 }
723
724 static
725 int is_bucket(struct cds_lfht_node *node)
726 {
727 return ((unsigned long) node) & BUCKET_FLAG;
728 }
729
730 static
731 struct cds_lfht_node *flag_bucket(struct cds_lfht_node *node)
732 {
733 return (struct cds_lfht_node *) (((unsigned long) node) | BUCKET_FLAG);
734 }
735
736 static
737 int is_removal_owner(struct cds_lfht_node *node)
738 {
739 return ((unsigned long) node) & REMOVAL_OWNER_FLAG;
740 }
741
742 static
743 struct cds_lfht_node *flag_removal_owner(struct cds_lfht_node *node)
744 {
745 return (struct cds_lfht_node *) (((unsigned long) node) | REMOVAL_OWNER_FLAG);
746 }
747
748 static
749 struct cds_lfht_node *get_end(void)
750 {
751 return (struct cds_lfht_node *) END_VALUE;
752 }
753
754 static
755 int is_end(struct cds_lfht_node *node)
756 {
757 return clear_flag(node) == (struct cds_lfht_node *) END_VALUE;
758 }
759
760 static
761 unsigned long _uatomic_xchg_monotonic_increase(unsigned long *ptr,
762 unsigned long v)
763 {
764 unsigned long old1, old2;
765
766 old1 = uatomic_read(ptr);
767 do {
768 old2 = old1;
769 if (old2 >= v)
770 return old2;
771 } while ((old1 = uatomic_cmpxchg(ptr, old2, v)) != old2);
772 return old2;
773 }
774
775 static
776 void cds_lfht_alloc_bucket_table(struct cds_lfht *ht, unsigned long order)
777 {
778 return ht->mm->alloc_bucket_table(ht, order);
779 }
780
781 /*
782 * cds_lfht_free_bucket_table() should be called with decreasing order.
783 * When cds_lfht_free_bucket_table(0) is called, it means the whole
784 * lfht is destroyed.
785 */
786 static
787 void cds_lfht_free_bucket_table(struct cds_lfht *ht, unsigned long order)
788 {
789 return ht->mm->free_bucket_table(ht, order);
790 }
791
792 static inline
793 struct cds_lfht_node *bucket_at(struct cds_lfht *ht, unsigned long index)
794 {
795 return ht->bucket_at(ht, index);
796 }
797
798 static inline
799 struct cds_lfht_node *lookup_bucket(struct cds_lfht *ht, unsigned long size,
800 unsigned long hash)
801 {
802 assert(size > 0);
803 return bucket_at(ht, hash & (size - 1));
804 }
805
806 /*
807 * Remove all logically deleted nodes from a bucket up to a certain node key.
808 */
809 static
810 void _cds_lfht_gc_bucket(struct cds_lfht_node *bucket, struct cds_lfht_node *node)
811 {
812 struct cds_lfht_node *iter_prev, *iter, *next, *new_next;
813
814 assert(!is_bucket(bucket));
815 assert(!is_removed(bucket));
816 assert(!is_bucket(node));
817 assert(!is_removed(node));
818 for (;;) {
819 iter_prev = bucket;
820 /* We can always skip the bucket node initially */
821 iter = rcu_dereference(iter_prev->next);
822 assert(!is_removed(iter));
823 assert(iter_prev->reverse_hash <= node->reverse_hash);
824 /*
825 * We should never be called with bucket (start of chain)
826 * and logically removed node (end of path compression
827 * marker) being the actual same node. This would be a
828 * bug in the algorithm implementation.
829 */
830 assert(bucket != node);
831 for (;;) {
832 if (caa_unlikely(is_end(iter)))
833 return;
834 if (caa_likely(clear_flag(iter)->reverse_hash > node->reverse_hash))
835 return;
836 next = rcu_dereference(clear_flag(iter)->next);
837 if (caa_likely(is_removed(next)))
838 break;
839 iter_prev = clear_flag(iter);
840 iter = next;
841 }
842 assert(!is_removed(iter));
843 if (is_bucket(iter))
844 new_next = flag_bucket(clear_flag(next));
845 else
846 new_next = clear_flag(next);
847 (void) uatomic_cmpxchg(&iter_prev->next, iter, new_next);
848 }
849 }
850
851 static
852 int _cds_lfht_replace(struct cds_lfht *ht, unsigned long size,
853 struct cds_lfht_node *old_node,
854 struct cds_lfht_node *old_next,
855 struct cds_lfht_node *new_node)
856 {
857 struct cds_lfht_node *bucket, *ret_next;
858
859 if (!old_node) /* Return -ENOENT if asked to replace NULL node */
860 return -ENOENT;
861
862 assert(!is_removed(old_node));
863 assert(!is_bucket(old_node));
864 assert(!is_removed(new_node));
865 assert(!is_bucket(new_node));
866 assert(new_node != old_node);
867 for (;;) {
868 /* Insert after node to be replaced */
869 if (is_removed(old_next)) {
870 /*
871 * Too late, the old node has been removed under us
872 * between lookup and replace. Fail.
873 */
874 return -ENOENT;
875 }
876 assert(old_next == clear_flag(old_next));
877 assert(new_node != old_next);
878 new_node->next = old_next;
879 /*
880 * Here is the whole trick for lock-free replace: we add
881 * the replacement node _after_ the node we want to
882 * replace by atomically setting its next pointer at the
883 * same time we set its removal flag. Given that
884 * the lookups/get next use an iterator aware of the
885 * next pointer, they will either skip the old node due
886 * to the removal flag and see the new node, or use
887 * the old node, but will not see the new one.
888 * This is a replacement of a node with another node
889 * that has the same value: we are therefore not
890 * removing a value from the hash table.
891 */
892 ret_next = uatomic_cmpxchg(&old_node->next,
893 old_next, flag_removed(new_node));
894 if (ret_next == old_next)
895 break; /* We performed the replacement. */
896 old_next = ret_next;
897 }
898
899 /*
900 * Ensure that the old node is not visible to readers anymore:
901 * lookup for the node, and remove it (along with any other
902 * logically removed node) if found.
903 */
904 bucket = lookup_bucket(ht, size, bit_reverse_ulong(old_node->reverse_hash));
905 _cds_lfht_gc_bucket(bucket, new_node);
906
907 assert(is_removed(rcu_dereference(old_node->next)));
908 return 0;
909 }
910
911 /*
912 * A non-NULL unique_ret pointer uses the "add unique" (or uniquify) add
913 * mode. A NULL unique_ret allows creation of duplicate keys.
914 */
915 static
916 void _cds_lfht_add(struct cds_lfht *ht,
917 unsigned long hash,
918 cds_lfht_match_fct match,
919 const void *key,
920 unsigned long size,
921 struct cds_lfht_node *node,
922 struct cds_lfht_iter *unique_ret,
923 int bucket_flag)
924 {
925 struct cds_lfht_node *iter_prev, *iter, *next, *new_node, *new_next,
926 *return_node;
927 struct cds_lfht_node *bucket;
928
929 assert(!is_bucket(node));
930 assert(!is_removed(node));
931 bucket = lookup_bucket(ht, size, hash);
932 for (;;) {
933 uint32_t chain_len = 0;
934
935 /*
936 * iter_prev points to the non-removed node prior to the
937 * insert location.
938 */
939 iter_prev = bucket;
940 /* We can always skip the bucket node initially */
941 iter = rcu_dereference(iter_prev->next);
942 assert(iter_prev->reverse_hash <= node->reverse_hash);
943 for (;;) {
944 if (caa_unlikely(is_end(iter)))
945 goto insert;
946 if (caa_likely(clear_flag(iter)->reverse_hash > node->reverse_hash))
947 goto insert;
948
949 /* bucket node is the first node of the identical-hash-value chain */
950 if (bucket_flag && clear_flag(iter)->reverse_hash == node->reverse_hash)
951 goto insert;
952
953 next = rcu_dereference(clear_flag(iter)->next);
954 if (caa_unlikely(is_removed(next)))
955 goto gc_node;
956
957 /* uniquely add */
958 if (unique_ret
959 && !is_bucket(next)
960 && clear_flag(iter)->reverse_hash == node->reverse_hash) {
961 struct cds_lfht_iter d_iter = { .node = node, .next = iter, };
962
963 /*
964 * uniquely adding inserts the node as the first
965 * node of the identical-hash-value node chain.
966 *
967 * This semantic ensures no duplicated keys
968 * should ever be observable in the table
969 * (including observe one node by one node
970 * by forward iterations)
971 */
972 cds_lfht_next_duplicate(ht, match, key, &d_iter);
973 if (!d_iter.node)
974 goto insert;
975
976 *unique_ret = d_iter;
977 return;
978 }
979
980 /* Only account for identical reverse hash once */
981 if (iter_prev->reverse_hash != clear_flag(iter)->reverse_hash
982 && !is_bucket(next))
983 check_resize(ht, size, ++chain_len);
984 iter_prev = clear_flag(iter);
985 iter = next;
986 }
987
988 insert:
989 assert(node != clear_flag(iter));
990 assert(!is_removed(iter_prev));
991 assert(!is_removed(iter));
992 assert(iter_prev != node);
993 if (!bucket_flag)
994 node->next = clear_flag(iter);
995 else
996 node->next = flag_bucket(clear_flag(iter));
997 if (is_bucket(iter))
998 new_node = flag_bucket(node);
999 else
1000 new_node = node;
1001 if (uatomic_cmpxchg(&iter_prev->next, iter,
1002 new_node) != iter) {
1003 continue; /* retry */
1004 } else {
1005 return_node = node;
1006 goto end;
1007 }
1008
1009 gc_node:
1010 assert(!is_removed(iter));
1011 if (is_bucket(iter))
1012 new_next = flag_bucket(clear_flag(next));
1013 else
1014 new_next = clear_flag(next);
1015 (void) uatomic_cmpxchg(&iter_prev->next, iter, new_next);
1016 /* retry */
1017 }
1018 end:
1019 if (unique_ret) {
1020 unique_ret->node = return_node;
1021 /* unique_ret->next left unset, never used. */
1022 }
1023 }
1024
1025 static
1026 int _cds_lfht_del(struct cds_lfht *ht, unsigned long size,
1027 struct cds_lfht_node *node)
1028 {
1029 struct cds_lfht_node *bucket, *next;
1030
1031 if (!node) /* Return -ENOENT if asked to delete NULL node */
1032 return -ENOENT;
1033
1034 /* logically delete the node */
1035 assert(!is_bucket(node));
1036 assert(!is_removed(node));
1037 assert(!is_removal_owner(node));
1038
1039 /*
1040 * We are first checking if the node had previously been
1041 * logically removed (this check is not atomic with setting the
1042 * logical removal flag). Return -ENOENT if the node had
1043 * previously been removed.
1044 */
1045 next = rcu_dereference(node->next);
1046 if (caa_unlikely(is_removed(next)))
1047 return -ENOENT;
1048 assert(!is_bucket(next));
1049 /*
1050 * We set the REMOVED_FLAG unconditionally. Note that there may
1051 * be more than one concurrent thread setting this flag.
1052 * Knowing which wins the race will be known after the garbage
1053 * collection phase, stay tuned!
1054 */
1055 uatomic_or(&node->next, REMOVED_FLAG);
1056 /* We performed the (logical) deletion. */
1057
1058 /*
1059 * Ensure that the node is not visible to readers anymore: lookup for
1060 * the node, and remove it (along with any other logically removed node)
1061 * if found.
1062 */
1063 bucket = lookup_bucket(ht, size, bit_reverse_ulong(node->reverse_hash));
1064 _cds_lfht_gc_bucket(bucket, node);
1065
1066 assert(is_removed(rcu_dereference(node->next)));
1067 /*
1068 * Last phase: atomically exchange node->next with a version
1069 * having "REMOVAL_OWNER_FLAG" set. If the returned node->next
1070 * pointer did _not_ have "REMOVAL_OWNER_FLAG" set, we now own
1071 * the node and win the removal race.
1072 * It is interesting to note that all "add" paths are forbidden
1073 * to change the next pointer starting from the point where the
1074 * REMOVED_FLAG is set, so here using a read, followed by a
1075 * xchg() suffice to guarantee that the xchg() will ever only
1076 * set the "REMOVAL_OWNER_FLAG" (or change nothing if the flag
1077 * was already set).
1078 */
1079 if (!is_removal_owner(uatomic_xchg(&node->next,
1080 flag_removal_owner(node->next))))
1081 return 0;
1082 else
1083 return -ENOENT;
1084 }
1085
1086 static
1087 void *partition_resize_thread(void *arg)
1088 {
1089 struct partition_resize_work *work = arg;
1090
1091 work->ht->flavor->register_thread();
1092 work->fct(work->ht, work->i, work->start, work->len);
1093 work->ht->flavor->unregister_thread();
1094 return NULL;
1095 }
1096
1097 static
1098 void partition_resize_helper(struct cds_lfht *ht, unsigned long i,
1099 unsigned long len,
1100 void (*fct)(struct cds_lfht *ht, unsigned long i,
1101 unsigned long start, unsigned long len))
1102 {
1103 unsigned long partition_len;
1104 struct partition_resize_work *work;
1105 int thread, ret;
1106 unsigned long nr_threads;
1107
1108 /*
1109 * Note: nr_cpus_mask + 1 is always power of 2.
1110 * We spawn just the number of threads we need to satisfy the minimum
1111 * partition size, up to the number of CPUs in the system.
1112 */
1113 if (nr_cpus_mask > 0) {
1114 nr_threads = min(nr_cpus_mask + 1,
1115 len >> MIN_PARTITION_PER_THREAD_ORDER);
1116 } else {
1117 nr_threads = 1;
1118 }
1119 partition_len = len >> cds_lfht_get_count_order_ulong(nr_threads);
1120 work = calloc(nr_threads, sizeof(*work));
1121 assert(work);
1122 for (thread = 0; thread < nr_threads; thread++) {
1123 work[thread].ht = ht;
1124 work[thread].i = i;
1125 work[thread].len = partition_len;
1126 work[thread].start = thread * partition_len;
1127 work[thread].fct = fct;
1128 ret = pthread_create(&(work[thread].thread_id), ht->resize_attr,
1129 partition_resize_thread, &work[thread]);
1130 assert(!ret);
1131 }
1132 for (thread = 0; thread < nr_threads; thread++) {
1133 ret = pthread_join(work[thread].thread_id, NULL);
1134 assert(!ret);
1135 }
1136 free(work);
1137 }
1138
1139 /*
1140 * Holding RCU read lock to protect _cds_lfht_add against memory
1141 * reclaim that could be performed by other call_rcu worker threads (ABA
1142 * problem).
1143 *
1144 * When we reach a certain length, we can split this population phase over
1145 * many worker threads, based on the number of CPUs available in the system.
1146 * This should therefore take care of not having the expand lagging behind too
1147 * many concurrent insertion threads by using the scheduler's ability to
1148 * schedule bucket node population fairly with insertions.
1149 */
1150 static
1151 void init_table_populate_partition(struct cds_lfht *ht, unsigned long i,
1152 unsigned long start, unsigned long len)
1153 {
1154 unsigned long j, size = 1UL << (i - 1);
1155
1156 assert(i > MIN_TABLE_ORDER);
1157 ht->flavor->read_lock();
1158 for (j = size + start; j < size + start + len; j++) {
1159 struct cds_lfht_node *new_node = bucket_at(ht, j);
1160
1161 assert(j >= size && j < (size << 1));
1162 dbg_printf("init populate: order %lu index %lu hash %lu\n",
1163 i, j, j);
1164 new_node->reverse_hash = bit_reverse_ulong(j);
1165 _cds_lfht_add(ht, j, NULL, NULL, size, new_node, NULL, 1);
1166 }
1167 ht->flavor->read_unlock();
1168 }
1169
1170 static
1171 void init_table_populate(struct cds_lfht *ht, unsigned long i,
1172 unsigned long len)
1173 {
1174 assert(nr_cpus_mask != -1);
1175 if (nr_cpus_mask < 0 || len < 2 * MIN_PARTITION_PER_THREAD) {
1176 ht->flavor->thread_online();
1177 init_table_populate_partition(ht, i, 0, len);
1178 ht->flavor->thread_offline();
1179 return;
1180 }
1181 partition_resize_helper(ht, i, len, init_table_populate_partition);
1182 }
1183
1184 static
1185 void init_table(struct cds_lfht *ht,
1186 unsigned long first_order, unsigned long last_order)
1187 {
1188 unsigned long i;
1189
1190 dbg_printf("init table: first_order %lu last_order %lu\n",
1191 first_order, last_order);
1192 assert(first_order > MIN_TABLE_ORDER);
1193 for (i = first_order; i <= last_order; i++) {
1194 unsigned long len;
1195
1196 len = 1UL << (i - 1);
1197 dbg_printf("init order %lu len: %lu\n", i, len);
1198
1199 /* Stop expand if the resize target changes under us */
1200 if (CMM_LOAD_SHARED(ht->resize_target) < (1UL << i))
1201 break;
1202
1203 cds_lfht_alloc_bucket_table(ht, i);
1204
1205 /*
1206 * Set all bucket nodes reverse hash values for a level and
1207 * link all bucket nodes into the table.
1208 */
1209 init_table_populate(ht, i, len);
1210
1211 /*
1212 * Update table size.
1213 */
1214 cmm_smp_wmb(); /* populate data before RCU size */
1215 CMM_STORE_SHARED(ht->size, 1UL << i);
1216
1217 dbg_printf("init new size: %lu\n", 1UL << i);
1218 if (CMM_LOAD_SHARED(ht->in_progress_destroy))
1219 break;
1220 }
1221 }
1222
1223 /*
1224 * Holding RCU read lock to protect _cds_lfht_remove against memory
1225 * reclaim that could be performed by other call_rcu worker threads (ABA
1226 * problem).
1227 * For a single level, we logically remove and garbage collect each node.
1228 *
1229 * As a design choice, we perform logical removal and garbage collection on a
1230 * node-per-node basis to simplify this algorithm. We also assume keeping good
1231 * cache locality of the operation would overweight possible performance gain
1232 * that could be achieved by batching garbage collection for multiple levels.
1233 * However, this would have to be justified by benchmarks.
1234 *
1235 * Concurrent removal and add operations are helping us perform garbage
1236 * collection of logically removed nodes. We guarantee that all logically
1237 * removed nodes have been garbage-collected (unlinked) before call_rcu is
1238 * invoked to free a hole level of bucket nodes (after a grace period).
1239 *
1240 * Logical removal and garbage collection can therefore be done in batch or on a
1241 * node-per-node basis, as long as the guarantee above holds.
1242 *
1243 * When we reach a certain length, we can split this removal over many worker
1244 * threads, based on the number of CPUs available in the system. This should
1245 * take care of not letting resize process lag behind too many concurrent
1246 * updater threads actively inserting into the hash table.
1247 */
1248 static
1249 void remove_table_partition(struct cds_lfht *ht, unsigned long i,
1250 unsigned long start, unsigned long len)
1251 {
1252 unsigned long j, size = 1UL << (i - 1);
1253
1254 assert(i > MIN_TABLE_ORDER);
1255 ht->flavor->read_lock();
1256 for (j = size + start; j < size + start + len; j++) {
1257 struct cds_lfht_node *fini_bucket = bucket_at(ht, j);
1258 struct cds_lfht_node *parent_bucket = bucket_at(ht, j - size);
1259
1260 assert(j >= size && j < (size << 1));
1261 dbg_printf("remove entry: order %lu index %lu hash %lu\n",
1262 i, j, j);
1263 /* Set the REMOVED_FLAG to freeze the ->next for gc */
1264 uatomic_or(&fini_bucket->next, REMOVED_FLAG);
1265 _cds_lfht_gc_bucket(parent_bucket, fini_bucket);
1266 }
1267 ht->flavor->read_unlock();
1268 }
1269
1270 static
1271 void remove_table(struct cds_lfht *ht, unsigned long i, unsigned long len)
1272 {
1273
1274 assert(nr_cpus_mask != -1);
1275 if (nr_cpus_mask < 0 || len < 2 * MIN_PARTITION_PER_THREAD) {
1276 ht->flavor->thread_online();
1277 remove_table_partition(ht, i, 0, len);
1278 ht->flavor->thread_offline();
1279 return;
1280 }
1281 partition_resize_helper(ht, i, len, remove_table_partition);
1282 }
1283
1284 /*
1285 * fini_table() is never called for first_order == 0, which is why
1286 * free_by_rcu_order == 0 can be used as criterion to know if free must
1287 * be called.
1288 */
1289 static
1290 void fini_table(struct cds_lfht *ht,
1291 unsigned long first_order, unsigned long last_order)
1292 {
1293 long i;
1294 unsigned long free_by_rcu_order = 0;
1295
1296 dbg_printf("fini table: first_order %lu last_order %lu\n",
1297 first_order, last_order);
1298 assert(first_order > MIN_TABLE_ORDER);
1299 for (i = last_order; i >= first_order; i--) {
1300 unsigned long len;
1301
1302 len = 1UL << (i - 1);
1303 dbg_printf("fini order %lu len: %lu\n", i, len);
1304
1305 /* Stop shrink if the resize target changes under us */
1306 if (CMM_LOAD_SHARED(ht->resize_target) > (1UL << (i - 1)))
1307 break;
1308
1309 cmm_smp_wmb(); /* populate data before RCU size */
1310 CMM_STORE_SHARED(ht->size, 1UL << (i - 1));
1311
1312 /*
1313 * We need to wait for all add operations to reach Q.S. (and
1314 * thus use the new table for lookups) before we can start
1315 * releasing the old bucket nodes. Otherwise their lookup will
1316 * return a logically removed node as insert position.
1317 */
1318 ht->flavor->update_synchronize_rcu();
1319 if (free_by_rcu_order)
1320 cds_lfht_free_bucket_table(ht, free_by_rcu_order);
1321
1322 /*
1323 * Set "removed" flag in bucket nodes about to be removed.
1324 * Unlink all now-logically-removed bucket node pointers.
1325 * Concurrent add/remove operation are helping us doing
1326 * the gc.
1327 */
1328 remove_table(ht, i, len);
1329
1330 free_by_rcu_order = i;
1331
1332 dbg_printf("fini new size: %lu\n", 1UL << i);
1333 if (CMM_LOAD_SHARED(ht->in_progress_destroy))
1334 break;
1335 }
1336
1337 if (free_by_rcu_order) {
1338 ht->flavor->update_synchronize_rcu();
1339 cds_lfht_free_bucket_table(ht, free_by_rcu_order);
1340 }
1341 }
1342
1343 static
1344 void cds_lfht_create_bucket(struct cds_lfht *ht, unsigned long size)
1345 {
1346 struct cds_lfht_node *prev, *node;
1347 unsigned long order, len, i;
1348
1349 cds_lfht_alloc_bucket_table(ht, 0);
1350
1351 dbg_printf("create bucket: order 0 index 0 hash 0\n");
1352 node = bucket_at(ht, 0);
1353 node->next = flag_bucket(get_end());
1354 node->reverse_hash = 0;
1355
1356 for (order = 1; order < cds_lfht_get_count_order_ulong(size) + 1; order++) {
1357 len = 1UL << (order - 1);
1358 cds_lfht_alloc_bucket_table(ht, order);
1359
1360 for (i = 0; i < len; i++) {
1361 /*
1362 * Now, we are trying to init the node with the
1363 * hash=(len+i) (which is also a bucket with the
1364 * index=(len+i)) and insert it into the hash table,
1365 * so this node has to be inserted after the bucket
1366 * with the index=(len+i)&(len-1)=i. And because there
1367 * is no other non-bucket node nor bucket node with
1368 * larger index/hash inserted, so the bucket node
1369 * being inserted should be inserted directly linked
1370 * after the bucket node with index=i.
1371 */
1372 prev = bucket_at(ht, i);
1373 node = bucket_at(ht, len + i);
1374
1375 dbg_printf("create bucket: order %lu index %lu hash %lu\n",
1376 order, len + i, len + i);
1377 node->reverse_hash = bit_reverse_ulong(len + i);
1378
1379 /* insert after prev */
1380 assert(is_bucket(prev->next));
1381 node->next = prev->next;
1382 prev->next = flag_bucket(node);
1383 }
1384 }
1385 }
1386
1387 struct cds_lfht *_cds_lfht_new(unsigned long init_size,
1388 unsigned long min_nr_alloc_buckets,
1389 unsigned long max_nr_buckets,
1390 int flags,
1391 const struct cds_lfht_mm_type *mm,
1392 const struct rcu_flavor_struct *flavor,
1393 pthread_attr_t *attr)
1394 {
1395 struct cds_lfht *ht;
1396 unsigned long order;
1397
1398 /* min_nr_alloc_buckets must be power of two */
1399 if (!min_nr_alloc_buckets || (min_nr_alloc_buckets & (min_nr_alloc_buckets - 1)))
1400 return NULL;
1401
1402 /* init_size must be power of two */
1403 if (!init_size || (init_size & (init_size - 1)))
1404 return NULL;
1405
1406 /*
1407 * Memory management plugin default.
1408 */
1409 if (!mm) {
1410 if (CAA_BITS_PER_LONG > 32
1411 && max_nr_buckets
1412 && max_nr_buckets <= (1ULL << 32)) {
1413 /*
1414 * For 64-bit architectures, with max number of
1415 * buckets small enough not to use the entire
1416 * 64-bit memory mapping space (and allowing a
1417 * fair number of hash table instances), use the
1418 * mmap allocator, which is faster than the
1419 * order allocator.
1420 */
1421 mm = &cds_lfht_mm_mmap;
1422 } else {
1423 /*
1424 * The fallback is to use the order allocator.
1425 */
1426 mm = &cds_lfht_mm_order;
1427 }
1428 }
1429
1430 /* max_nr_buckets == 0 for order based mm means infinite */
1431 if (mm == &cds_lfht_mm_order && !max_nr_buckets)
1432 max_nr_buckets = 1UL << (MAX_TABLE_ORDER - 1);
1433
1434 /* max_nr_buckets must be power of two */
1435 if (!max_nr_buckets || (max_nr_buckets & (max_nr_buckets - 1)))
1436 return NULL;
1437
1438 min_nr_alloc_buckets = max(min_nr_alloc_buckets, MIN_TABLE_SIZE);
1439 init_size = max(init_size, MIN_TABLE_SIZE);
1440 max_nr_buckets = max(max_nr_buckets, min_nr_alloc_buckets);
1441 init_size = min(init_size, max_nr_buckets);
1442
1443 ht = mm->alloc_cds_lfht(min_nr_alloc_buckets, max_nr_buckets);
1444 assert(ht);
1445 assert(ht->mm == mm);
1446 assert(ht->bucket_at == mm->bucket_at);
1447
1448 ht->flags = flags;
1449 ht->flavor = flavor;
1450 ht->resize_attr = attr;
1451 alloc_split_items_count(ht);
1452 /* this mutex should not nest in read-side C.S. */
1453 pthread_mutex_init(&ht->resize_mutex, NULL);
1454 order = cds_lfht_get_count_order_ulong(init_size);
1455 ht->resize_target = 1UL << order;
1456 cds_lfht_create_bucket(ht, 1UL << order);
1457 ht->size = 1UL << order;
1458 return ht;
1459 }
1460
1461 void cds_lfht_lookup(struct cds_lfht *ht, unsigned long hash,
1462 cds_lfht_match_fct match, const void *key,
1463 struct cds_lfht_iter *iter)
1464 {
1465 struct cds_lfht_node *node, *next, *bucket;
1466 unsigned long reverse_hash, size;
1467
1468 reverse_hash = bit_reverse_ulong(hash);
1469
1470 size = rcu_dereference(ht->size);
1471 bucket = lookup_bucket(ht, size, hash);
1472 /* We can always skip the bucket node initially */
1473 node = rcu_dereference(bucket->next);
1474 node = clear_flag(node);
1475 for (;;) {
1476 if (caa_unlikely(is_end(node))) {
1477 node = next = NULL;
1478 break;
1479 }
1480 if (caa_unlikely(node->reverse_hash > reverse_hash)) {
1481 node = next = NULL;
1482 break;
1483 }
1484 next = rcu_dereference(node->next);
1485 assert(node == clear_flag(node));
1486 if (caa_likely(!is_removed(next))
1487 && !is_bucket(next)
1488 && node->reverse_hash == reverse_hash
1489 && caa_likely(match(node, key))) {
1490 break;
1491 }
1492 node = clear_flag(next);
1493 }
1494 assert(!node || !is_bucket(rcu_dereference(node->next)));
1495 iter->node = node;
1496 iter->next = next;
1497 }
1498
1499 void cds_lfht_next_duplicate(struct cds_lfht *ht, cds_lfht_match_fct match,
1500 const void *key, struct cds_lfht_iter *iter)
1501 {
1502 struct cds_lfht_node *node, *next;
1503 unsigned long reverse_hash;
1504
1505 node = iter->node;
1506 reverse_hash = node->reverse_hash;
1507 next = iter->next;
1508 node = clear_flag(next);
1509
1510 for (;;) {
1511 if (caa_unlikely(is_end(node))) {
1512 node = next = NULL;
1513 break;
1514 }
1515 if (caa_unlikely(node->reverse_hash > reverse_hash)) {
1516 node = next = NULL;
1517 break;
1518 }
1519 next = rcu_dereference(node->next);
1520 if (caa_likely(!is_removed(next))
1521 && !is_bucket(next)
1522 && caa_likely(match(node, key))) {
1523 break;
1524 }
1525 node = clear_flag(next);
1526 }
1527 assert(!node || !is_bucket(rcu_dereference(node->next)));
1528 iter->node = node;
1529 iter->next = next;
1530 }
1531
1532 void cds_lfht_next(struct cds_lfht *ht, struct cds_lfht_iter *iter)
1533 {
1534 struct cds_lfht_node *node, *next;
1535
1536 node = clear_flag(iter->next);
1537 for (;;) {
1538 if (caa_unlikely(is_end(node))) {
1539 node = next = NULL;
1540 break;
1541 }
1542 next = rcu_dereference(node->next);
1543 if (caa_likely(!is_removed(next))
1544 && !is_bucket(next)) {
1545 break;
1546 }
1547 node = clear_flag(next);
1548 }
1549 assert(!node || !is_bucket(rcu_dereference(node->next)));
1550 iter->node = node;
1551 iter->next = next;
1552 }
1553
1554 void cds_lfht_first(struct cds_lfht *ht, struct cds_lfht_iter *iter)
1555 {
1556 /*
1557 * Get next after first bucket node. The first bucket node is the
1558 * first node of the linked list.
1559 */
1560 iter->next = bucket_at(ht, 0)->next;
1561 cds_lfht_next(ht, iter);
1562 }
1563
1564 void cds_lfht_add(struct cds_lfht *ht, unsigned long hash,
1565 struct cds_lfht_node *node)
1566 {
1567 unsigned long size;
1568
1569 node->reverse_hash = bit_reverse_ulong(hash);
1570 size = rcu_dereference(ht->size);
1571 _cds_lfht_add(ht, hash, NULL, NULL, size, node, NULL, 0);
1572 ht_count_add(ht, size, hash);
1573 }
1574
1575 struct cds_lfht_node *cds_lfht_add_unique(struct cds_lfht *ht,
1576 unsigned long hash,
1577 cds_lfht_match_fct match,
1578 const void *key,
1579 struct cds_lfht_node *node)
1580 {
1581 unsigned long size;
1582 struct cds_lfht_iter iter;
1583
1584 node->reverse_hash = bit_reverse_ulong(hash);
1585 size = rcu_dereference(ht->size);
1586 _cds_lfht_add(ht, hash, match, key, size, node, &iter, 0);
1587 if (iter.node == node)
1588 ht_count_add(ht, size, hash);
1589 return iter.node;
1590 }
1591
1592 struct cds_lfht_node *cds_lfht_add_replace(struct cds_lfht *ht,
1593 unsigned long hash,
1594 cds_lfht_match_fct match,
1595 const void *key,
1596 struct cds_lfht_node *node)
1597 {
1598 unsigned long size;
1599 struct cds_lfht_iter iter;
1600
1601 node->reverse_hash = bit_reverse_ulong(hash);
1602 size = rcu_dereference(ht->size);
1603 for (;;) {
1604 _cds_lfht_add(ht, hash, match, key, size, node, &iter, 0);
1605 if (iter.node == node) {
1606 ht_count_add(ht, size, hash);
1607 return NULL;
1608 }
1609
1610 if (!_cds_lfht_replace(ht, size, iter.node, iter.next, node))
1611 return iter.node;
1612 }
1613 }
1614
1615 int cds_lfht_replace(struct cds_lfht *ht,
1616 struct cds_lfht_iter *old_iter,
1617 unsigned long hash,
1618 cds_lfht_match_fct match,
1619 const void *key,
1620 struct cds_lfht_node *new_node)
1621 {
1622 unsigned long size;
1623
1624 new_node->reverse_hash = bit_reverse_ulong(hash);
1625 if (!old_iter->node)
1626 return -ENOENT;
1627 if (caa_unlikely(old_iter->node->reverse_hash != new_node->reverse_hash))
1628 return -EINVAL;
1629 if (caa_unlikely(!match(old_iter->node, key)))
1630 return -EINVAL;
1631 size = rcu_dereference(ht->size);
1632 return _cds_lfht_replace(ht, size, old_iter->node, old_iter->next,
1633 new_node);
1634 }
1635
1636 int cds_lfht_del(struct cds_lfht *ht, struct cds_lfht_node *node)
1637 {
1638 unsigned long size, hash;
1639 int ret;
1640
1641 size = rcu_dereference(ht->size);
1642 ret = _cds_lfht_del(ht, size, node);
1643 if (!ret) {
1644 hash = bit_reverse_ulong(node->reverse_hash);
1645 ht_count_del(ht, size, hash);
1646 }
1647 return ret;
1648 }
1649
1650 int cds_lfht_is_node_deleted(struct cds_lfht_node *node)
1651 {
1652 return is_removed(rcu_dereference(node->next));
1653 }
1654
1655 static
1656 int cds_lfht_delete_bucket(struct cds_lfht *ht)
1657 {
1658 struct cds_lfht_node *node;
1659 unsigned long order, i, size;
1660
1661 /* Check that the table is empty */
1662 node = bucket_at(ht, 0);
1663 do {
1664 node = clear_flag(node)->next;
1665 if (!is_bucket(node))
1666 return -EPERM;
1667 assert(!is_removed(node));
1668 } while (!is_end(node));
1669 /*
1670 * size accessed without rcu_dereference because hash table is
1671 * being destroyed.
1672 */
1673 size = ht->size;
1674 /* Internal sanity check: all nodes left should be bucket */
1675 for (i = 0; i < size; i++) {
1676 node = bucket_at(ht, i);
1677 dbg_printf("delete bucket: index %lu expected hash %lu hash %lu\n",
1678 i, i, bit_reverse_ulong(node->reverse_hash));
1679 assert(is_bucket(node->next));
1680 }
1681
1682 for (order = cds_lfht_get_count_order_ulong(size); (long)order >= 0; order--)
1683 cds_lfht_free_bucket_table(ht, order);
1684
1685 return 0;
1686 }
1687
1688 /*
1689 * Should only be called when no more concurrent readers nor writers can
1690 * possibly access the table.
1691 */
1692 int cds_lfht_destroy(struct cds_lfht *ht, pthread_attr_t **attr)
1693 {
1694 int ret;
1695
1696 /* Wait for in-flight resize operations to complete */
1697 _CMM_STORE_SHARED(ht->in_progress_destroy, 1);
1698 cmm_smp_mb(); /* Store destroy before load resize */
1699 while (uatomic_read(&ht->in_progress_resize))
1700 poll(NULL, 0, 100); /* wait for 100ms */
1701 ret = cds_lfht_delete_bucket(ht);
1702 if (ret)
1703 return ret;
1704 free_split_items_count(ht);
1705 if (attr)
1706 *attr = ht->resize_attr;
1707 poison_free(ht);
1708 return ret;
1709 }
1710
1711 void cds_lfht_count_nodes(struct cds_lfht *ht,
1712 long *approx_before,
1713 unsigned long *count,
1714 long *approx_after)
1715 {
1716 struct cds_lfht_node *node, *next;
1717 unsigned long nr_bucket = 0, nr_removed = 0;
1718
1719 *approx_before = 0;
1720 if (ht->split_count) {
1721 int i;
1722
1723 for (i = 0; i < split_count_mask + 1; i++) {
1724 *approx_before += uatomic_read(&ht->split_count[i].add);
1725 *approx_before -= uatomic_read(&ht->split_count[i].del);
1726 }
1727 }
1728
1729 *count = 0;
1730
1731 /* Count non-bucket nodes in the table */
1732 node = bucket_at(ht, 0);
1733 do {
1734 next = rcu_dereference(node->next);
1735 if (is_removed(next)) {
1736 if (!is_bucket(next))
1737 (nr_removed)++;
1738 else
1739 (nr_bucket)++;
1740 } else if (!is_bucket(next))
1741 (*count)++;
1742 else
1743 (nr_bucket)++;
1744 node = clear_flag(next);
1745 } while (!is_end(node));
1746 dbg_printf("number of logically removed nodes: %lu\n", nr_removed);
1747 dbg_printf("number of bucket nodes: %lu\n", nr_bucket);
1748 *approx_after = 0;
1749 if (ht->split_count) {
1750 int i;
1751
1752 for (i = 0; i < split_count_mask + 1; i++) {
1753 *approx_after += uatomic_read(&ht->split_count[i].add);
1754 *approx_after -= uatomic_read(&ht->split_count[i].del);
1755 }
1756 }
1757 }
1758
1759 /* called with resize mutex held */
1760 static
1761 void _do_cds_lfht_grow(struct cds_lfht *ht,
1762 unsigned long old_size, unsigned long new_size)
1763 {
1764 unsigned long old_order, new_order;
1765
1766 old_order = cds_lfht_get_count_order_ulong(old_size);
1767 new_order = cds_lfht_get_count_order_ulong(new_size);
1768 dbg_printf("resize from %lu (order %lu) to %lu (order %lu) buckets\n",
1769 old_size, old_order, new_size, new_order);
1770 assert(new_size > old_size);
1771 init_table(ht, old_order + 1, new_order);
1772 }
1773
1774 /* called with resize mutex held */
1775 static
1776 void _do_cds_lfht_shrink(struct cds_lfht *ht,
1777 unsigned long old_size, unsigned long new_size)
1778 {
1779 unsigned long old_order, new_order;
1780
1781 new_size = max(new_size, MIN_TABLE_SIZE);
1782 old_order = cds_lfht_get_count_order_ulong(old_size);
1783 new_order = cds_lfht_get_count_order_ulong(new_size);
1784 dbg_printf("resize from %lu (order %lu) to %lu (order %lu) buckets\n",
1785 old_size, old_order, new_size, new_order);
1786 assert(new_size < old_size);
1787
1788 /* Remove and unlink all bucket nodes to remove. */
1789 fini_table(ht, new_order + 1, old_order);
1790 }
1791
1792
1793 /* called with resize mutex held */
1794 static
1795 void _do_cds_lfht_resize(struct cds_lfht *ht)
1796 {
1797 unsigned long new_size, old_size;
1798
1799 /*
1800 * Resize table, re-do if the target size has changed under us.
1801 */
1802 do {
1803 assert(uatomic_read(&ht->in_progress_resize));
1804 if (CMM_LOAD_SHARED(ht->in_progress_destroy))
1805 break;
1806 ht->resize_initiated = 1;
1807 old_size = ht->size;
1808 new_size = CMM_LOAD_SHARED(ht->resize_target);
1809 if (old_size < new_size)
1810 _do_cds_lfht_grow(ht, old_size, new_size);
1811 else if (old_size > new_size)
1812 _do_cds_lfht_shrink(ht, old_size, new_size);
1813 ht->resize_initiated = 0;
1814 /* write resize_initiated before read resize_target */
1815 cmm_smp_mb();
1816 } while (ht->size != CMM_LOAD_SHARED(ht->resize_target));
1817 }
1818
1819 static
1820 unsigned long resize_target_grow(struct cds_lfht *ht, unsigned long new_size)
1821 {
1822 return _uatomic_xchg_monotonic_increase(&ht->resize_target, new_size);
1823 }
1824
1825 static
1826 void resize_target_update_count(struct cds_lfht *ht,
1827 unsigned long count)
1828 {
1829 count = max(count, MIN_TABLE_SIZE);
1830 count = min(count, ht->max_nr_buckets);
1831 uatomic_set(&ht->resize_target, count);
1832 }
1833
1834 void cds_lfht_resize(struct cds_lfht *ht, unsigned long new_size)
1835 {
1836 resize_target_update_count(ht, new_size);
1837 CMM_STORE_SHARED(ht->resize_initiated, 1);
1838 ht->flavor->thread_offline();
1839 pthread_mutex_lock(&ht->resize_mutex);
1840 _do_cds_lfht_resize(ht);
1841 pthread_mutex_unlock(&ht->resize_mutex);
1842 ht->flavor->thread_online();
1843 }
1844
1845 static
1846 void do_resize_cb(struct rcu_head *head)
1847 {
1848 struct rcu_resize_work *work =
1849 caa_container_of(head, struct rcu_resize_work, head);
1850 struct cds_lfht *ht = work->ht;
1851
1852 ht->flavor->thread_offline();
1853 pthread_mutex_lock(&ht->resize_mutex);
1854 _do_cds_lfht_resize(ht);
1855 pthread_mutex_unlock(&ht->resize_mutex);
1856 ht->flavor->thread_online();
1857 poison_free(work);
1858 cmm_smp_mb(); /* finish resize before decrement */
1859 uatomic_dec(&ht->in_progress_resize);
1860 }
1861
1862 static
1863 void __cds_lfht_resize_lazy_launch(struct cds_lfht *ht)
1864 {
1865 struct rcu_resize_work *work;
1866
1867 /* Store resize_target before read resize_initiated */
1868 cmm_smp_mb();
1869 if (!CMM_LOAD_SHARED(ht->resize_initiated)) {
1870 uatomic_inc(&ht->in_progress_resize);
1871 cmm_smp_mb(); /* increment resize count before load destroy */
1872 if (CMM_LOAD_SHARED(ht->in_progress_destroy)) {
1873 uatomic_dec(&ht->in_progress_resize);
1874 return;
1875 }
1876 work = malloc(sizeof(*work));
1877 if (work == NULL) {
1878 dbg_printf("error allocating resize work, bailing out\n");
1879 uatomic_dec(&ht->in_progress_resize);
1880 return;
1881 }
1882 work->ht = ht;
1883 ht->flavor->update_call_rcu(&work->head, do_resize_cb);
1884 CMM_STORE_SHARED(ht->resize_initiated, 1);
1885 }
1886 }
1887
1888 static
1889 void cds_lfht_resize_lazy_grow(struct cds_lfht *ht, unsigned long size, int growth)
1890 {
1891 unsigned long target_size = size << growth;
1892
1893 target_size = min(target_size, ht->max_nr_buckets);
1894 if (resize_target_grow(ht, target_size) >= target_size)
1895 return;
1896
1897 __cds_lfht_resize_lazy_launch(ht);
1898 }
1899
1900 /*
1901 * We favor grow operations over shrink. A shrink operation never occurs
1902 * if a grow operation is queued for lazy execution. A grow operation
1903 * cancels any pending shrink lazy execution.
1904 */
1905 static
1906 void cds_lfht_resize_lazy_count(struct cds_lfht *ht, unsigned long size,
1907 unsigned long count)
1908 {
1909 if (!(ht->flags & CDS_LFHT_AUTO_RESIZE))
1910 return;
1911 count = max(count, MIN_TABLE_SIZE);
1912 count = min(count, ht->max_nr_buckets);
1913 if (count == size)
1914 return; /* Already the right size, no resize needed */
1915 if (count > size) { /* lazy grow */
1916 if (resize_target_grow(ht, count) >= count)
1917 return;
1918 } else { /* lazy shrink */
1919 for (;;) {
1920 unsigned long s;
1921
1922 s = uatomic_cmpxchg(&ht->resize_target, size, count);
1923 if (s == size)
1924 break; /* no resize needed */
1925 if (s > size)
1926 return; /* growing is/(was just) in progress */
1927 if (s <= count)
1928 return; /* some other thread do shrink */
1929 size = s;
1930 }
1931 }
1932 __cds_lfht_resize_lazy_launch(ht);
1933 }
This page took 0.068645 seconds and 4 git commands to generate.