57f1a04d46dd3d446ca88fa74267660dd7a26fe2
[urcu.git] / rculfhash.c
1 /*
2 * rculfhash.c
3 *
4 * Userspace RCU library - Lock-Free Resizable RCU Hash Table
5 *
6 * Copyright 2010-2011 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 * Copyright 2011 - Lai Jiangshan <laijs@cn.fujitsu.com>
8 *
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
13 *
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
18 *
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with this library; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 */
23
24 /*
25 * Based on the following articles:
26 * - Ori Shalev and Nir Shavit. Split-ordered lists: Lock-free
27 * extensible hash tables. J. ACM 53, 3 (May 2006), 379-405.
28 * - Michael, M. M. High performance dynamic lock-free hash tables
29 * and list-based sets. In Proceedings of the fourteenth annual ACM
30 * symposium on Parallel algorithms and architectures, ACM Press,
31 * (2002), 73-82.
32 *
33 * Some specificities of this Lock-Free Resizable RCU Hash Table
34 * implementation:
35 *
36 * - RCU read-side critical section allows readers to perform hash
37 * table lookups, as well as traversals, and use the returned objects
38 * safely by allowing memory reclaim to take place only after a grace
39 * period.
40 * - Add and remove operations are lock-free, and do not need to
41 * allocate memory. They need to be executed within RCU read-side
42 * critical section to ensure the objects they read are valid and to
43 * deal with the cmpxchg ABA problem.
44 * - add and add_unique operations are supported. add_unique checks if
45 * the node key already exists in the hash table. It ensures not to
46 * populate a duplicate key if the node key already exists in the hash
47 * table.
48 * - The resize operation executes concurrently with
49 * add/add_unique/add_replace/remove/lookup/traversal.
50 * - Hash table nodes are contained within a split-ordered list. This
51 * list is ordered by incrementing reversed-bits-hash value.
52 * - An index of bucket nodes is kept. These bucket nodes are the hash
53 * table "buckets". These buckets are internal nodes that allow to
54 * perform a fast hash lookup, similarly to a skip list. These
55 * buckets are chained together in the split-ordered list, which
56 * allows recursive expansion by inserting new buckets between the
57 * existing buckets. The split-ordered list allows adding new buckets
58 * between existing buckets as the table needs to grow.
59 * - The resize operation for small tables only allows expanding the
60 * hash table. It is triggered automatically by detecting long chains
61 * in the add operation.
62 * - The resize operation for larger tables (and available through an
63 * API) allows both expanding and shrinking the hash table.
64 * - Split-counters are used to keep track of the number of
65 * nodes within the hash table for automatic resize triggering.
66 * - Resize operation initiated by long chain detection is executed by a
67 * call_rcu thread, which keeps lock-freedom of add and remove.
68 * - Resize operations are protected by a mutex.
69 * - The removal operation is split in two parts: first, a "removed"
70 * flag is set in the next pointer within the node to remove. Then,
71 * a "garbage collection" is performed in the bucket containing the
72 * removed node (from the start of the bucket up to the removed node).
73 * All encountered nodes with "removed" flag set in their next
74 * pointers are removed from the linked-list. If the cmpxchg used for
75 * removal fails (due to concurrent garbage-collection or concurrent
76 * add), we retry from the beginning of the bucket. This ensures that
77 * the node with "removed" flag set is removed from the hash table
78 * (not visible to lookups anymore) before the RCU read-side critical
79 * section held across removal ends. Furthermore, this ensures that
80 * the node with "removed" flag set is removed from the linked-list
81 * before its memory is reclaimed. After setting the "removal" flag,
82 * only the thread which removal is the first to set the "removal
83 * owner" flag (with an xchg) into a node's next pointer is considered
84 * to have succeeded its removal (and thus owns the node to reclaim).
85 * Because we garbage-collect starting from an invariant node (the
86 * start-of-bucket bucket node) up to the "removed" node (or find a
87 * reverse-hash that is higher), we are sure that a successful
88 * traversal of the chain leads to a chain that is present in the
89 * linked-list (the start node is never removed) and that it does not
90 * contain the "removed" node anymore, even if concurrent delete/add
91 * operations are changing the structure of the list concurrently.
92 * - The add operations perform garbage collection of buckets if they
93 * encounter nodes with removed flag set in the bucket where they want
94 * to add their new node. This ensures lock-freedom of add operation by
95 * helping the remover unlink nodes from the list rather than to wait
96 * for it do to so.
97 * - There are three memory backends for the hash table buckets: the
98 * "order table", the "chunks", and the "mmap".
99 * - These bucket containers contain a compact version of the hash table
100 * nodes.
101 * - The RCU "order table":
102 * - has a first level table indexed by log2(hash index) which is
103 * copied and expanded by the resize operation. This order table
104 * allows finding the "bucket node" tables.
105 * - There is one bucket node table per hash index order. The size of
106 * each bucket node table is half the number of hashes contained in
107 * this order (except for order 0).
108 * - The RCU "chunks" is best suited for close interaction with a page
109 * allocator. It uses a linear array as index to "chunks" containing
110 * each the same number of buckets.
111 * - The RCU "mmap" memory backend uses a single memory map to hold
112 * all buckets.
113 * - synchronize_rcu is used to garbage-collect the old bucket node table.
114 *
115 * Ordering Guarantees:
116 *
117 * To discuss these guarantees, we first define "read" operation as any
118 * of the the basic cds_lfht_lookup, cds_lfht_next_duplicate,
119 * cds_lfht_first, cds_lfht_next operation, as well as
120 * cds_lfht_add_unique (failure).
121 *
122 * We define "read traversal" operation as any of the following
123 * group of operations
124 * - cds_lfht_lookup followed by iteration with cds_lfht_next_duplicate
125 * (and/or cds_lfht_next, although less common).
126 * - cds_lfht_add_unique (failure) followed by iteration with
127 * cds_lfht_next_duplicate (and/or cds_lfht_next, although less
128 * common).
129 * - cds_lfht_first followed iteration with cds_lfht_next (and/or
130 * cds_lfht_next_duplicate, although less common).
131 *
132 * We define "write" operations as any of cds_lfht_add, cds_lfht_replace,
133 * cds_lfht_add_unique (success), cds_lfht_add_replace, cds_lfht_del.
134 *
135 * When cds_lfht_add_unique succeeds (returns the node passed as
136 * parameter), it acts as a "write" operation. When cds_lfht_add_unique
137 * fails (returns a node different from the one passed as parameter), it
138 * acts as a "read" operation. A cds_lfht_add_unique failure is a
139 * cds_lfht_lookup "read" operation, therefore, any ordering guarantee
140 * referring to "lookup" imply any of "lookup" or cds_lfht_add_unique
141 * (failure).
142 *
143 * We define "prior" and "later" node as nodes observable by reads and
144 * read traversals respectively before and after a write or sequence of
145 * write operations.
146 *
147 * Hash-table operations are often cascaded, for example, the pointer
148 * returned by a cds_lfht_lookup() might be passed to a cds_lfht_next(),
149 * whose return value might in turn be passed to another hash-table
150 * operation. This entire cascaded series of operations must be enclosed
151 * by a pair of matching rcu_read_lock() and rcu_read_unlock()
152 * operations.
153 *
154 * The following ordering guarantees are offered by this hash table:
155 *
156 * A.1) "read" after "write": if there is ordering between a write and a
157 * later read, then the read is guaranteed to see the write or some
158 * later write.
159 * A.2) "read traversal" after "write": given that there is dependency
160 * ordering between reads in a "read traversal", if there is
161 * ordering between a write and the first read of the traversal,
162 * then the "read traversal" is guaranteed to see the write or
163 * some later write.
164 * B.1) "write" after "read": if there is ordering between a read and a
165 * later write, then the read will never see the write.
166 * B.2) "write" after "read traversal": given that there is dependency
167 * ordering between reads in a "read traversal", if there is
168 * ordering between the last read of the traversal and a later
169 * write, then the "read traversal" will never see the write.
170 * C) "write" while "read traversal": if a write occurs during a "read
171 * traversal", the traversal may, or may not, see the write.
172 * D.1) "write" after "write": if there is ordering between a write and
173 * a later write, then the later write is guaranteed to see the
174 * effects of the first write.
175 * D.2) Concurrent "write" pairs: The system will assign an arbitrary
176 * order to any pair of concurrent conflicting writes.
177 * Non-conflicting writes (for example, to different keys) are
178 * unordered.
179 * E) If a grace period separates a "del" or "replace" operation
180 * and a subsequent operation, then that subsequent operation is
181 * guaranteed not to see the removed item.
182 * F) Uniqueness guarantee: given a hash table that does not contain
183 * duplicate items for a given key, there will only be one item in
184 * the hash table after an arbitrary sequence of add_unique and/or
185 * add_replace operations. Note, however, that a pair of
186 * concurrent read operations might well access two different items
187 * with that key.
188 * G.1) If a pair of lookups for a given key are ordered (e.g. by a
189 * memory barrier), then the second lookup will return the same
190 * node as the previous lookup, or some later node.
191 * G.2) A "read traversal" that starts after the end of a prior "read
192 * traversal" (ordered by memory barriers) is guaranteed to see the
193 * same nodes as the previous traversal, or some later nodes.
194 * G.3) Concurrent "read" pairs: concurrent reads are unordered. For
195 * example, if a pair of reads to the same key run concurrently
196 * with an insertion of that same key, the reads remain unordered
197 * regardless of their return values. In other words, you cannot
198 * rely on the values returned by the reads to deduce ordering.
199 *
200 * Progress guarantees:
201 *
202 * * Reads are wait-free. These operations always move forward in the
203 * hash table linked list, and this list has no loop.
204 * * Writes are lock-free. Any retry loop performed by a write operation
205 * is triggered by progress made within another update operation.
206 *
207 * Bucket node tables:
208 *
209 * hash table hash table the last all bucket node tables
210 * order size bucket node 0 1 2 3 4 5 6(index)
211 * table size
212 * 0 1 1 1
213 * 1 2 1 1 1
214 * 2 4 2 1 1 2
215 * 3 8 4 1 1 2 4
216 * 4 16 8 1 1 2 4 8
217 * 5 32 16 1 1 2 4 8 16
218 * 6 64 32 1 1 2 4 8 16 32
219 *
220 * When growing/shrinking, we only focus on the last bucket node table
221 * which size is (!order ? 1 : (1 << (order -1))).
222 *
223 * Example for growing/shrinking:
224 * grow hash table from order 5 to 6: init the index=6 bucket node table
225 * shrink hash table from order 6 to 5: fini the index=6 bucket node table
226 *
227 * A bit of ascii art explanation:
228 *
229 * The order index is the off-by-one compared to the actual power of 2
230 * because we use index 0 to deal with the 0 special-case.
231 *
232 * This shows the nodes for a small table ordered by reversed bits:
233 *
234 * bits reverse
235 * 0 000 000
236 * 4 100 001
237 * 2 010 010
238 * 6 110 011
239 * 1 001 100
240 * 5 101 101
241 * 3 011 110
242 * 7 111 111
243 *
244 * This shows the nodes in order of non-reversed bits, linked by
245 * reversed-bit order.
246 *
247 * order bits reverse
248 * 0 0 000 000
249 * 1 | 1 001 100 <-
250 * 2 | | 2 010 010 <- |
251 * | | | 3 011 110 | <- |
252 * 3 -> | | | 4 100 001 | |
253 * -> | | 5 101 101 |
254 * -> | 6 110 011
255 * -> 7 111 111
256 */
257
258 #define _LGPL_SOURCE
259 #define _GNU_SOURCE
260 #include <stdlib.h>
261 #include <errno.h>
262 #include <assert.h>
263 #include <stdio.h>
264 #include <stdint.h>
265 #include <string.h>
266 #include <sched.h>
267
268 #include "config.h"
269 #include <urcu.h>
270 #include <urcu-call-rcu.h>
271 #include <urcu-flavor.h>
272 #include <urcu/arch.h>
273 #include <urcu/uatomic.h>
274 #include <urcu/compiler.h>
275 #include <urcu/rculfhash.h>
276 #include <rculfhash-internal.h>
277 #include <stdio.h>
278 #include <pthread.h>
279
280 /*
281 * Split-counters lazily update the global counter each 1024
282 * addition/removal. It automatically keeps track of resize required.
283 * We use the bucket length as indicator for need to expand for small
284 * tables and machines lacking per-cpu data support.
285 */
286 #define COUNT_COMMIT_ORDER 10
287 #define DEFAULT_SPLIT_COUNT_MASK 0xFUL
288 #define CHAIN_LEN_TARGET 1
289 #define CHAIN_LEN_RESIZE_THRESHOLD 3
290
291 /*
292 * Define the minimum table size.
293 */
294 #define MIN_TABLE_ORDER 0
295 #define MIN_TABLE_SIZE (1UL << MIN_TABLE_ORDER)
296
297 /*
298 * Minimum number of bucket nodes to touch per thread to parallelize grow/shrink.
299 */
300 #define MIN_PARTITION_PER_THREAD_ORDER 12
301 #define MIN_PARTITION_PER_THREAD (1UL << MIN_PARTITION_PER_THREAD_ORDER)
302
303 /*
304 * The removed flag needs to be updated atomically with the pointer.
305 * It indicates that no node must attach to the node scheduled for
306 * removal, and that node garbage collection must be performed.
307 * The bucket flag does not require to be updated atomically with the
308 * pointer, but it is added as a pointer low bit flag to save space.
309 * The "removal owner" flag is used to detect which of the "del"
310 * operation that has set the "removed flag" gets to return the removed
311 * node to its caller. Note that the replace operation does not need to
312 * iteract with the "removal owner" flag, because it validates that
313 * the "removed" flag is not set before performing its cmpxchg.
314 */
315 #define REMOVED_FLAG (1UL << 0)
316 #define BUCKET_FLAG (1UL << 1)
317 #define REMOVAL_OWNER_FLAG (1UL << 2)
318 #define FLAGS_MASK ((1UL << 3) - 1)
319
320 /* Value of the end pointer. Should not interact with flags. */
321 #define END_VALUE NULL
322
323 /*
324 * ht_items_count: Split-counters counting the number of node addition
325 * and removal in the table. Only used if the CDS_LFHT_ACCOUNTING flag
326 * is set at hash table creation.
327 *
328 * These are free-running counters, never reset to zero. They count the
329 * number of add/remove, and trigger every (1 << COUNT_COMMIT_ORDER)
330 * operations to update the global counter. We choose a power-of-2 value
331 * for the trigger to deal with 32 or 64-bit overflow of the counter.
332 */
333 struct ht_items_count {
334 unsigned long add, del;
335 } __attribute__((aligned(CAA_CACHE_LINE_SIZE)));
336
337 /*
338 * rcu_resize_work: Contains arguments passed to RCU worker thread
339 * responsible for performing lazy resize.
340 */
341 struct rcu_resize_work {
342 struct rcu_head head;
343 struct cds_lfht *ht;
344 };
345
346 /*
347 * partition_resize_work: Contains arguments passed to worker threads
348 * executing the hash table resize on partitions of the hash table
349 * assigned to each processor's worker thread.
350 */
351 struct partition_resize_work {
352 pthread_t thread_id;
353 struct cds_lfht *ht;
354 unsigned long i, start, len;
355 void (*fct)(struct cds_lfht *ht, unsigned long i,
356 unsigned long start, unsigned long len);
357 };
358
359 /*
360 * Algorithm to reverse bits in a word by lookup table, extended to
361 * 64-bit words.
362 * Source:
363 * http://graphics.stanford.edu/~seander/bithacks.html#BitReverseTable
364 * Originally from Public Domain.
365 */
366
367 static const uint8_t BitReverseTable256[256] =
368 {
369 #define R2(n) (n), (n) + 2*64, (n) + 1*64, (n) + 3*64
370 #define R4(n) R2(n), R2((n) + 2*16), R2((n) + 1*16), R2((n) + 3*16)
371 #define R6(n) R4(n), R4((n) + 2*4 ), R4((n) + 1*4 ), R4((n) + 3*4 )
372 R6(0), R6(2), R6(1), R6(3)
373 };
374 #undef R2
375 #undef R4
376 #undef R6
377
378 static
379 uint8_t bit_reverse_u8(uint8_t v)
380 {
381 return BitReverseTable256[v];
382 }
383
384 #if (CAA_BITS_PER_LONG == 32)
385 static
386 uint32_t bit_reverse_u32(uint32_t v)
387 {
388 return ((uint32_t) bit_reverse_u8(v) << 24) |
389 ((uint32_t) bit_reverse_u8(v >> 8) << 16) |
390 ((uint32_t) bit_reverse_u8(v >> 16) << 8) |
391 ((uint32_t) bit_reverse_u8(v >> 24));
392 }
393 #else
394 static
395 uint64_t bit_reverse_u64(uint64_t v)
396 {
397 return ((uint64_t) bit_reverse_u8(v) << 56) |
398 ((uint64_t) bit_reverse_u8(v >> 8) << 48) |
399 ((uint64_t) bit_reverse_u8(v >> 16) << 40) |
400 ((uint64_t) bit_reverse_u8(v >> 24) << 32) |
401 ((uint64_t) bit_reverse_u8(v >> 32) << 24) |
402 ((uint64_t) bit_reverse_u8(v >> 40) << 16) |
403 ((uint64_t) bit_reverse_u8(v >> 48) << 8) |
404 ((uint64_t) bit_reverse_u8(v >> 56));
405 }
406 #endif
407
408 static
409 unsigned long bit_reverse_ulong(unsigned long v)
410 {
411 #if (CAA_BITS_PER_LONG == 32)
412 return bit_reverse_u32(v);
413 #else
414 return bit_reverse_u64(v);
415 #endif
416 }
417
418 /*
419 * fls: returns the position of the most significant bit.
420 * Returns 0 if no bit is set, else returns the position of the most
421 * significant bit (from 1 to 32 on 32-bit, from 1 to 64 on 64-bit).
422 */
423 #if defined(__i386) || defined(__x86_64)
424 static inline
425 unsigned int fls_u32(uint32_t x)
426 {
427 int r;
428
429 asm("bsrl %1,%0\n\t"
430 "jnz 1f\n\t"
431 "movl $-1,%0\n\t"
432 "1:\n\t"
433 : "=r" (r) : "rm" (x));
434 return r + 1;
435 }
436 #define HAS_FLS_U32
437 #endif
438
439 #if defined(__x86_64)
440 static inline
441 unsigned int fls_u64(uint64_t x)
442 {
443 long r;
444
445 asm("bsrq %1,%0\n\t"
446 "jnz 1f\n\t"
447 "movq $-1,%0\n\t"
448 "1:\n\t"
449 : "=r" (r) : "rm" (x));
450 return r + 1;
451 }
452 #define HAS_FLS_U64
453 #endif
454
455 #ifndef HAS_FLS_U64
456 static __attribute__((unused))
457 unsigned int fls_u64(uint64_t x)
458 {
459 unsigned int r = 64;
460
461 if (!x)
462 return 0;
463
464 if (!(x & 0xFFFFFFFF00000000ULL)) {
465 x <<= 32;
466 r -= 32;
467 }
468 if (!(x & 0xFFFF000000000000ULL)) {
469 x <<= 16;
470 r -= 16;
471 }
472 if (!(x & 0xFF00000000000000ULL)) {
473 x <<= 8;
474 r -= 8;
475 }
476 if (!(x & 0xF000000000000000ULL)) {
477 x <<= 4;
478 r -= 4;
479 }
480 if (!(x & 0xC000000000000000ULL)) {
481 x <<= 2;
482 r -= 2;
483 }
484 if (!(x & 0x8000000000000000ULL)) {
485 x <<= 1;
486 r -= 1;
487 }
488 return r;
489 }
490 #endif
491
492 #ifndef HAS_FLS_U32
493 static __attribute__((unused))
494 unsigned int fls_u32(uint32_t x)
495 {
496 unsigned int r = 32;
497
498 if (!x)
499 return 0;
500 if (!(x & 0xFFFF0000U)) {
501 x <<= 16;
502 r -= 16;
503 }
504 if (!(x & 0xFF000000U)) {
505 x <<= 8;
506 r -= 8;
507 }
508 if (!(x & 0xF0000000U)) {
509 x <<= 4;
510 r -= 4;
511 }
512 if (!(x & 0xC0000000U)) {
513 x <<= 2;
514 r -= 2;
515 }
516 if (!(x & 0x80000000U)) {
517 x <<= 1;
518 r -= 1;
519 }
520 return r;
521 }
522 #endif
523
524 unsigned int cds_lfht_fls_ulong(unsigned long x)
525 {
526 #if (CAA_BITS_PER_LONG == 32)
527 return fls_u32(x);
528 #else
529 return fls_u64(x);
530 #endif
531 }
532
533 /*
534 * Return the minimum order for which x <= (1UL << order).
535 * Return -1 if x is 0.
536 */
537 int cds_lfht_get_count_order_u32(uint32_t x)
538 {
539 if (!x)
540 return -1;
541
542 return fls_u32(x - 1);
543 }
544
545 /*
546 * Return the minimum order for which x <= (1UL << order).
547 * Return -1 if x is 0.
548 */
549 int cds_lfht_get_count_order_ulong(unsigned long x)
550 {
551 if (!x)
552 return -1;
553
554 return cds_lfht_fls_ulong(x - 1);
555 }
556
557 static
558 void cds_lfht_resize_lazy_grow(struct cds_lfht *ht, unsigned long size, int growth);
559
560 static
561 void cds_lfht_resize_lazy_count(struct cds_lfht *ht, unsigned long size,
562 unsigned long count);
563
564 static long nr_cpus_mask = -1;
565 static long split_count_mask = -1;
566 static int split_count_order = -1;
567
568 #if defined(HAVE_SYSCONF)
569 static void ht_init_nr_cpus_mask(void)
570 {
571 long maxcpus;
572
573 maxcpus = sysconf(_SC_NPROCESSORS_CONF);
574 if (maxcpus <= 0) {
575 nr_cpus_mask = -2;
576 return;
577 }
578 /*
579 * round up number of CPUs to next power of two, so we
580 * can use & for modulo.
581 */
582 maxcpus = 1UL << cds_lfht_get_count_order_ulong(maxcpus);
583 nr_cpus_mask = maxcpus - 1;
584 }
585 #else /* #if defined(HAVE_SYSCONF) */
586 static void ht_init_nr_cpus_mask(void)
587 {
588 nr_cpus_mask = -2;
589 }
590 #endif /* #else #if defined(HAVE_SYSCONF) */
591
592 static
593 void alloc_split_items_count(struct cds_lfht *ht)
594 {
595 if (nr_cpus_mask == -1) {
596 ht_init_nr_cpus_mask();
597 if (nr_cpus_mask < 0)
598 split_count_mask = DEFAULT_SPLIT_COUNT_MASK;
599 else
600 split_count_mask = nr_cpus_mask;
601 split_count_order =
602 cds_lfht_get_count_order_ulong(split_count_mask + 1);
603 }
604
605 assert(split_count_mask >= 0);
606
607 if (ht->flags & CDS_LFHT_ACCOUNTING) {
608 ht->split_count = calloc(split_count_mask + 1,
609 sizeof(struct ht_items_count));
610 assert(ht->split_count);
611 } else {
612 ht->split_count = NULL;
613 }
614 }
615
616 static
617 void free_split_items_count(struct cds_lfht *ht)
618 {
619 poison_free(ht->split_count);
620 }
621
622 #if defined(HAVE_SCHED_GETCPU)
623 static
624 int ht_get_split_count_index(unsigned long hash)
625 {
626 int cpu;
627
628 assert(split_count_mask >= 0);
629 cpu = sched_getcpu();
630 if (caa_unlikely(cpu < 0))
631 return hash & split_count_mask;
632 else
633 return cpu & split_count_mask;
634 }
635 #else /* #if defined(HAVE_SCHED_GETCPU) */
636 static
637 int ht_get_split_count_index(unsigned long hash)
638 {
639 return hash & split_count_mask;
640 }
641 #endif /* #else #if defined(HAVE_SCHED_GETCPU) */
642
643 static
644 void ht_count_add(struct cds_lfht *ht, unsigned long size, unsigned long hash)
645 {
646 unsigned long split_count;
647 int index;
648 long count;
649
650 if (caa_unlikely(!ht->split_count))
651 return;
652 index = ht_get_split_count_index(hash);
653 split_count = uatomic_add_return(&ht->split_count[index].add, 1);
654 if (caa_likely(split_count & ((1UL << COUNT_COMMIT_ORDER) - 1)))
655 return;
656 /* Only if number of add multiple of 1UL << COUNT_COMMIT_ORDER */
657
658 dbg_printf("add split count %lu\n", split_count);
659 count = uatomic_add_return(&ht->count,
660 1UL << COUNT_COMMIT_ORDER);
661 if (caa_likely(count & (count - 1)))
662 return;
663 /* Only if global count is power of 2 */
664
665 if ((count >> CHAIN_LEN_RESIZE_THRESHOLD) < size)
666 return;
667 dbg_printf("add set global %ld\n", count);
668 cds_lfht_resize_lazy_count(ht, size,
669 count >> (CHAIN_LEN_TARGET - 1));
670 }
671
672 static
673 void ht_count_del(struct cds_lfht *ht, unsigned long size, unsigned long hash)
674 {
675 unsigned long split_count;
676 int index;
677 long count;
678
679 if (caa_unlikely(!ht->split_count))
680 return;
681 index = ht_get_split_count_index(hash);
682 split_count = uatomic_add_return(&ht->split_count[index].del, 1);
683 if (caa_likely(split_count & ((1UL << COUNT_COMMIT_ORDER) - 1)))
684 return;
685 /* Only if number of deletes multiple of 1UL << COUNT_COMMIT_ORDER */
686
687 dbg_printf("del split count %lu\n", split_count);
688 count = uatomic_add_return(&ht->count,
689 -(1UL << COUNT_COMMIT_ORDER));
690 if (caa_likely(count & (count - 1)))
691 return;
692 /* Only if global count is power of 2 */
693
694 if ((count >> CHAIN_LEN_RESIZE_THRESHOLD) >= size)
695 return;
696 dbg_printf("del set global %ld\n", count);
697 /*
698 * Don't shrink table if the number of nodes is below a
699 * certain threshold.
700 */
701 if (count < (1UL << COUNT_COMMIT_ORDER) * (split_count_mask + 1))
702 return;
703 cds_lfht_resize_lazy_count(ht, size,
704 count >> (CHAIN_LEN_TARGET - 1));
705 }
706
707 static
708 void check_resize(struct cds_lfht *ht, unsigned long size, uint32_t chain_len)
709 {
710 unsigned long count;
711
712 if (!(ht->flags & CDS_LFHT_AUTO_RESIZE))
713 return;
714 count = uatomic_read(&ht->count);
715 /*
716 * Use bucket-local length for small table expand and for
717 * environments lacking per-cpu data support.
718 */
719 if (count >= (1UL << (COUNT_COMMIT_ORDER + split_count_order)))
720 return;
721 if (chain_len > 100)
722 dbg_printf("WARNING: large chain length: %u.\n",
723 chain_len);
724 if (chain_len >= CHAIN_LEN_RESIZE_THRESHOLD) {
725 int growth;
726
727 /*
728 * Ideal growth calculated based on chain length.
729 */
730 growth = cds_lfht_get_count_order_u32(chain_len
731 - (CHAIN_LEN_TARGET - 1));
732 if ((ht->flags & CDS_LFHT_ACCOUNTING)
733 && (size << growth)
734 >= (1UL << (COUNT_COMMIT_ORDER
735 + split_count_order))) {
736 /*
737 * If ideal growth expands the hash table size
738 * beyond the "small hash table" sizes, use the
739 * maximum small hash table size to attempt
740 * expanding the hash table. This only applies
741 * when node accounting is available, otherwise
742 * the chain length is used to expand the hash
743 * table in every case.
744 */
745 growth = COUNT_COMMIT_ORDER + split_count_order
746 - cds_lfht_get_count_order_ulong(size);
747 if (growth <= 0)
748 return;
749 }
750 cds_lfht_resize_lazy_grow(ht, size, growth);
751 }
752 }
753
754 static
755 struct cds_lfht_node *clear_flag(struct cds_lfht_node *node)
756 {
757 return (struct cds_lfht_node *) (((unsigned long) node) & ~FLAGS_MASK);
758 }
759
760 static
761 int is_removed(struct cds_lfht_node *node)
762 {
763 return ((unsigned long) node) & REMOVED_FLAG;
764 }
765
766 static
767 int is_bucket(struct cds_lfht_node *node)
768 {
769 return ((unsigned long) node) & BUCKET_FLAG;
770 }
771
772 static
773 struct cds_lfht_node *flag_bucket(struct cds_lfht_node *node)
774 {
775 return (struct cds_lfht_node *) (((unsigned long) node) | BUCKET_FLAG);
776 }
777
778 static
779 int is_removal_owner(struct cds_lfht_node *node)
780 {
781 return ((unsigned long) node) & REMOVAL_OWNER_FLAG;
782 }
783
784 static
785 struct cds_lfht_node *flag_removal_owner(struct cds_lfht_node *node)
786 {
787 return (struct cds_lfht_node *) (((unsigned long) node) | REMOVAL_OWNER_FLAG);
788 }
789
790 static
791 struct cds_lfht_node *flag_removed_or_removal_owner(struct cds_lfht_node *node)
792 {
793 return (struct cds_lfht_node *) (((unsigned long) node) | REMOVED_FLAG | REMOVAL_OWNER_FLAG);
794 }
795
796 static
797 struct cds_lfht_node *get_end(void)
798 {
799 return (struct cds_lfht_node *) END_VALUE;
800 }
801
802 static
803 int is_end(struct cds_lfht_node *node)
804 {
805 return clear_flag(node) == (struct cds_lfht_node *) END_VALUE;
806 }
807
808 static
809 unsigned long _uatomic_xchg_monotonic_increase(unsigned long *ptr,
810 unsigned long v)
811 {
812 unsigned long old1, old2;
813
814 old1 = uatomic_read(ptr);
815 do {
816 old2 = old1;
817 if (old2 >= v)
818 return old2;
819 } while ((old1 = uatomic_cmpxchg(ptr, old2, v)) != old2);
820 return old2;
821 }
822
823 static
824 void cds_lfht_alloc_bucket_table(struct cds_lfht *ht, unsigned long order)
825 {
826 return ht->mm->alloc_bucket_table(ht, order);
827 }
828
829 /*
830 * cds_lfht_free_bucket_table() should be called with decreasing order.
831 * When cds_lfht_free_bucket_table(0) is called, it means the whole
832 * lfht is destroyed.
833 */
834 static
835 void cds_lfht_free_bucket_table(struct cds_lfht *ht, unsigned long order)
836 {
837 return ht->mm->free_bucket_table(ht, order);
838 }
839
840 static inline
841 struct cds_lfht_node *bucket_at(struct cds_lfht *ht, unsigned long index)
842 {
843 return ht->bucket_at(ht, index);
844 }
845
846 static inline
847 struct cds_lfht_node *lookup_bucket(struct cds_lfht *ht, unsigned long size,
848 unsigned long hash)
849 {
850 assert(size > 0);
851 return bucket_at(ht, hash & (size - 1));
852 }
853
854 /*
855 * Remove all logically deleted nodes from a bucket up to a certain node key.
856 */
857 static
858 void _cds_lfht_gc_bucket(struct cds_lfht_node *bucket, struct cds_lfht_node *node)
859 {
860 struct cds_lfht_node *iter_prev, *iter, *next, *new_next;
861
862 assert(!is_bucket(bucket));
863 assert(!is_removed(bucket));
864 assert(!is_removal_owner(bucket));
865 assert(!is_bucket(node));
866 assert(!is_removed(node));
867 assert(!is_removal_owner(node));
868 for (;;) {
869 iter_prev = bucket;
870 /* We can always skip the bucket node initially */
871 iter = rcu_dereference(iter_prev->next);
872 assert(!is_removed(iter));
873 assert(!is_removal_owner(iter));
874 assert(iter_prev->reverse_hash <= node->reverse_hash);
875 /*
876 * We should never be called with bucket (start of chain)
877 * and logically removed node (end of path compression
878 * marker) being the actual same node. This would be a
879 * bug in the algorithm implementation.
880 */
881 assert(bucket != node);
882 for (;;) {
883 if (caa_unlikely(is_end(iter)))
884 return;
885 if (caa_likely(clear_flag(iter)->reverse_hash > node->reverse_hash))
886 return;
887 next = rcu_dereference(clear_flag(iter)->next);
888 if (caa_likely(is_removed(next)))
889 break;
890 iter_prev = clear_flag(iter);
891 iter = next;
892 }
893 assert(!is_removed(iter));
894 assert(!is_removal_owner(iter));
895 if (is_bucket(iter))
896 new_next = flag_bucket(clear_flag(next));
897 else
898 new_next = clear_flag(next);
899 (void) uatomic_cmpxchg(&iter_prev->next, iter, new_next);
900 }
901 }
902
903 static
904 int _cds_lfht_replace(struct cds_lfht *ht, unsigned long size,
905 struct cds_lfht_node *old_node,
906 struct cds_lfht_node *old_next,
907 struct cds_lfht_node *new_node)
908 {
909 struct cds_lfht_node *bucket, *ret_next;
910
911 if (!old_node) /* Return -ENOENT if asked to replace NULL node */
912 return -ENOENT;
913
914 assert(!is_removed(old_node));
915 assert(!is_removal_owner(old_node));
916 assert(!is_bucket(old_node));
917 assert(!is_removed(new_node));
918 assert(!is_removal_owner(new_node));
919 assert(!is_bucket(new_node));
920 assert(new_node != old_node);
921 for (;;) {
922 /* Insert after node to be replaced */
923 if (is_removed(old_next)) {
924 /*
925 * Too late, the old node has been removed under us
926 * between lookup and replace. Fail.
927 */
928 return -ENOENT;
929 }
930 assert(old_next == clear_flag(old_next));
931 assert(new_node != old_next);
932 /*
933 * REMOVAL_OWNER flag is _NEVER_ set before the REMOVED
934 * flag. It is either set atomically at the same time
935 * (replace) or after (del).
936 */
937 assert(!is_removal_owner(old_next));
938 new_node->next = old_next;
939 /*
940 * Here is the whole trick for lock-free replace: we add
941 * the replacement node _after_ the node we want to
942 * replace by atomically setting its next pointer at the
943 * same time we set its removal flag. Given that
944 * the lookups/get next use an iterator aware of the
945 * next pointer, they will either skip the old node due
946 * to the removal flag and see the new node, or use
947 * the old node, but will not see the new one.
948 * This is a replacement of a node with another node
949 * that has the same value: we are therefore not
950 * removing a value from the hash table. We set both the
951 * REMOVED and REMOVAL_OWNER flags atomically so we own
952 * the node after successful cmpxchg.
953 */
954 ret_next = uatomic_cmpxchg(&old_node->next,
955 old_next, flag_removed_or_removal_owner(new_node));
956 if (ret_next == old_next)
957 break; /* We performed the replacement. */
958 old_next = ret_next;
959 }
960
961 /*
962 * Ensure that the old node is not visible to readers anymore:
963 * lookup for the node, and remove it (along with any other
964 * logically removed node) if found.
965 */
966 bucket = lookup_bucket(ht, size, bit_reverse_ulong(old_node->reverse_hash));
967 _cds_lfht_gc_bucket(bucket, new_node);
968
969 assert(is_removed(CMM_LOAD_SHARED(old_node->next)));
970 return 0;
971 }
972
973 /*
974 * A non-NULL unique_ret pointer uses the "add unique" (or uniquify) add
975 * mode. A NULL unique_ret allows creation of duplicate keys.
976 */
977 static
978 void _cds_lfht_add(struct cds_lfht *ht,
979 unsigned long hash,
980 cds_lfht_match_fct match,
981 const void *key,
982 unsigned long size,
983 struct cds_lfht_node *node,
984 struct cds_lfht_iter *unique_ret,
985 int bucket_flag)
986 {
987 struct cds_lfht_node *iter_prev, *iter, *next, *new_node, *new_next,
988 *return_node;
989 struct cds_lfht_node *bucket;
990
991 assert(!is_bucket(node));
992 assert(!is_removed(node));
993 assert(!is_removal_owner(node));
994 bucket = lookup_bucket(ht, size, hash);
995 for (;;) {
996 uint32_t chain_len = 0;
997
998 /*
999 * iter_prev points to the non-removed node prior to the
1000 * insert location.
1001 */
1002 iter_prev = bucket;
1003 /* We can always skip the bucket node initially */
1004 iter = rcu_dereference(iter_prev->next);
1005 assert(iter_prev->reverse_hash <= node->reverse_hash);
1006 for (;;) {
1007 if (caa_unlikely(is_end(iter)))
1008 goto insert;
1009 if (caa_likely(clear_flag(iter)->reverse_hash > node->reverse_hash))
1010 goto insert;
1011
1012 /* bucket node is the first node of the identical-hash-value chain */
1013 if (bucket_flag && clear_flag(iter)->reverse_hash == node->reverse_hash)
1014 goto insert;
1015
1016 next = rcu_dereference(clear_flag(iter)->next);
1017 if (caa_unlikely(is_removed(next)))
1018 goto gc_node;
1019
1020 /* uniquely add */
1021 if (unique_ret
1022 && !is_bucket(next)
1023 && clear_flag(iter)->reverse_hash == node->reverse_hash) {
1024 struct cds_lfht_iter d_iter = { .node = node, .next = iter, };
1025
1026 /*
1027 * uniquely adding inserts the node as the first
1028 * node of the identical-hash-value node chain.
1029 *
1030 * This semantic ensures no duplicated keys
1031 * should ever be observable in the table
1032 * (including traversing the table node by
1033 * node by forward iterations)
1034 */
1035 cds_lfht_next_duplicate(ht, match, key, &d_iter);
1036 if (!d_iter.node)
1037 goto insert;
1038
1039 *unique_ret = d_iter;
1040 return;
1041 }
1042
1043 /* Only account for identical reverse hash once */
1044 if (iter_prev->reverse_hash != clear_flag(iter)->reverse_hash
1045 && !is_bucket(next))
1046 check_resize(ht, size, ++chain_len);
1047 iter_prev = clear_flag(iter);
1048 iter = next;
1049 }
1050
1051 insert:
1052 assert(node != clear_flag(iter));
1053 assert(!is_removed(iter_prev));
1054 assert(!is_removal_owner(iter_prev));
1055 assert(!is_removed(iter));
1056 assert(!is_removal_owner(iter));
1057 assert(iter_prev != node);
1058 if (!bucket_flag)
1059 node->next = clear_flag(iter);
1060 else
1061 node->next = flag_bucket(clear_flag(iter));
1062 if (is_bucket(iter))
1063 new_node = flag_bucket(node);
1064 else
1065 new_node = node;
1066 if (uatomic_cmpxchg(&iter_prev->next, iter,
1067 new_node) != iter) {
1068 continue; /* retry */
1069 } else {
1070 return_node = node;
1071 goto end;
1072 }
1073
1074 gc_node:
1075 assert(!is_removed(iter));
1076 assert(!is_removal_owner(iter));
1077 if (is_bucket(iter))
1078 new_next = flag_bucket(clear_flag(next));
1079 else
1080 new_next = clear_flag(next);
1081 (void) uatomic_cmpxchg(&iter_prev->next, iter, new_next);
1082 /* retry */
1083 }
1084 end:
1085 if (unique_ret) {
1086 unique_ret->node = return_node;
1087 /* unique_ret->next left unset, never used. */
1088 }
1089 }
1090
1091 static
1092 int _cds_lfht_del(struct cds_lfht *ht, unsigned long size,
1093 struct cds_lfht_node *node)
1094 {
1095 struct cds_lfht_node *bucket, *next;
1096
1097 if (!node) /* Return -ENOENT if asked to delete NULL node */
1098 return -ENOENT;
1099
1100 /* logically delete the node */
1101 assert(!is_bucket(node));
1102 assert(!is_removed(node));
1103 assert(!is_removal_owner(node));
1104
1105 /*
1106 * We are first checking if the node had previously been
1107 * logically removed (this check is not atomic with setting the
1108 * logical removal flag). Return -ENOENT if the node had
1109 * previously been removed.
1110 */
1111 next = CMM_LOAD_SHARED(node->next); /* next is not dereferenced */
1112 if (caa_unlikely(is_removed(next)))
1113 return -ENOENT;
1114 assert(!is_bucket(next));
1115 /*
1116 * The del operation semantic guarantees a full memory barrier
1117 * before the uatomic_or atomic commit of the deletion flag.
1118 */
1119 cmm_smp_mb__before_uatomic_or();
1120 /*
1121 * We set the REMOVED_FLAG unconditionally. Note that there may
1122 * be more than one concurrent thread setting this flag.
1123 * Knowing which wins the race will be known after the garbage
1124 * collection phase, stay tuned!
1125 */
1126 uatomic_or(&node->next, REMOVED_FLAG);
1127 /* We performed the (logical) deletion. */
1128
1129 /*
1130 * Ensure that the node is not visible to readers anymore: lookup for
1131 * the node, and remove it (along with any other logically removed node)
1132 * if found.
1133 */
1134 bucket = lookup_bucket(ht, size, bit_reverse_ulong(node->reverse_hash));
1135 _cds_lfht_gc_bucket(bucket, node);
1136
1137 assert(is_removed(CMM_LOAD_SHARED(node->next)));
1138 /*
1139 * Last phase: atomically exchange node->next with a version
1140 * having "REMOVAL_OWNER_FLAG" set. If the returned node->next
1141 * pointer did _not_ have "REMOVAL_OWNER_FLAG" set, we now own
1142 * the node and win the removal race.
1143 * It is interesting to note that all "add" paths are forbidden
1144 * to change the next pointer starting from the point where the
1145 * REMOVED_FLAG is set, so here using a read, followed by a
1146 * xchg() suffice to guarantee that the xchg() will ever only
1147 * set the "REMOVAL_OWNER_FLAG" (or change nothing if the flag
1148 * was already set).
1149 */
1150 if (!is_removal_owner(uatomic_xchg(&node->next,
1151 flag_removal_owner(node->next))))
1152 return 0;
1153 else
1154 return -ENOENT;
1155 }
1156
1157 static
1158 void *partition_resize_thread(void *arg)
1159 {
1160 struct partition_resize_work *work = arg;
1161
1162 work->ht->flavor->register_thread();
1163 work->fct(work->ht, work->i, work->start, work->len);
1164 work->ht->flavor->unregister_thread();
1165 return NULL;
1166 }
1167
1168 static
1169 void partition_resize_helper(struct cds_lfht *ht, unsigned long i,
1170 unsigned long len,
1171 void (*fct)(struct cds_lfht *ht, unsigned long i,
1172 unsigned long start, unsigned long len))
1173 {
1174 unsigned long partition_len, start = 0;
1175 struct partition_resize_work *work;
1176 int thread, ret;
1177 unsigned long nr_threads;
1178
1179 assert(nr_cpus_mask != -1);
1180 if (nr_cpus_mask < 0 || len < 2 * MIN_PARTITION_PER_THREAD)
1181 goto fallback;
1182
1183 /*
1184 * Note: nr_cpus_mask + 1 is always power of 2.
1185 * We spawn just the number of threads we need to satisfy the minimum
1186 * partition size, up to the number of CPUs in the system.
1187 */
1188 if (nr_cpus_mask > 0) {
1189 nr_threads = min(nr_cpus_mask + 1,
1190 len >> MIN_PARTITION_PER_THREAD_ORDER);
1191 } else {
1192 nr_threads = 1;
1193 }
1194 partition_len = len >> cds_lfht_get_count_order_ulong(nr_threads);
1195 work = calloc(nr_threads, sizeof(*work));
1196 if (!work) {
1197 dbg_printf("error allocating for resize, single-threading\n");
1198 goto fallback;
1199 }
1200 for (thread = 0; thread < nr_threads; thread++) {
1201 work[thread].ht = ht;
1202 work[thread].i = i;
1203 work[thread].len = partition_len;
1204 work[thread].start = thread * partition_len;
1205 work[thread].fct = fct;
1206 ret = pthread_create(&(work[thread].thread_id), ht->resize_attr,
1207 partition_resize_thread, &work[thread]);
1208 if (ret == EAGAIN) {
1209 /*
1210 * Out of resources: wait and join the threads
1211 * we've created, then handle leftovers.
1212 */
1213 dbg_printf("error spawning for resize, single-threading\n");
1214 start = work[thread].start;
1215 len -= start;
1216 nr_threads = thread;
1217 break;
1218 }
1219 assert(!ret);
1220 }
1221 for (thread = 0; thread < nr_threads; thread++) {
1222 ret = pthread_join(work[thread].thread_id, NULL);
1223 assert(!ret);
1224 }
1225 free(work);
1226
1227 /*
1228 * A pthread_create failure above will either lead in us having
1229 * no threads to join or starting at a non-zero offset,
1230 * fallback to single thread processing of leftovers.
1231 */
1232 if (start == 0 && nr_threads > 0)
1233 return;
1234 fallback:
1235 ht->flavor->thread_online();
1236 fct(ht, i, start, len);
1237 ht->flavor->thread_offline();
1238 }
1239
1240 /*
1241 * Holding RCU read lock to protect _cds_lfht_add against memory
1242 * reclaim that could be performed by other call_rcu worker threads (ABA
1243 * problem).
1244 *
1245 * When we reach a certain length, we can split this population phase over
1246 * many worker threads, based on the number of CPUs available in the system.
1247 * This should therefore take care of not having the expand lagging behind too
1248 * many concurrent insertion threads by using the scheduler's ability to
1249 * schedule bucket node population fairly with insertions.
1250 */
1251 static
1252 void init_table_populate_partition(struct cds_lfht *ht, unsigned long i,
1253 unsigned long start, unsigned long len)
1254 {
1255 unsigned long j, size = 1UL << (i - 1);
1256
1257 assert(i > MIN_TABLE_ORDER);
1258 ht->flavor->read_lock();
1259 for (j = size + start; j < size + start + len; j++) {
1260 struct cds_lfht_node *new_node = bucket_at(ht, j);
1261
1262 assert(j >= size && j < (size << 1));
1263 dbg_printf("init populate: order %lu index %lu hash %lu\n",
1264 i, j, j);
1265 new_node->reverse_hash = bit_reverse_ulong(j);
1266 _cds_lfht_add(ht, j, NULL, NULL, size, new_node, NULL, 1);
1267 }
1268 ht->flavor->read_unlock();
1269 }
1270
1271 static
1272 void init_table_populate(struct cds_lfht *ht, unsigned long i,
1273 unsigned long len)
1274 {
1275 partition_resize_helper(ht, i, len, init_table_populate_partition);
1276 }
1277
1278 static
1279 void init_table(struct cds_lfht *ht,
1280 unsigned long first_order, unsigned long last_order)
1281 {
1282 unsigned long i;
1283
1284 dbg_printf("init table: first_order %lu last_order %lu\n",
1285 first_order, last_order);
1286 assert(first_order > MIN_TABLE_ORDER);
1287 for (i = first_order; i <= last_order; i++) {
1288 unsigned long len;
1289
1290 len = 1UL << (i - 1);
1291 dbg_printf("init order %lu len: %lu\n", i, len);
1292
1293 /* Stop expand if the resize target changes under us */
1294 if (CMM_LOAD_SHARED(ht->resize_target) < (1UL << i))
1295 break;
1296
1297 cds_lfht_alloc_bucket_table(ht, i);
1298
1299 /*
1300 * Set all bucket nodes reverse hash values for a level and
1301 * link all bucket nodes into the table.
1302 */
1303 init_table_populate(ht, i, len);
1304
1305 /*
1306 * Update table size.
1307 */
1308 cmm_smp_wmb(); /* populate data before RCU size */
1309 CMM_STORE_SHARED(ht->size, 1UL << i);
1310
1311 dbg_printf("init new size: %lu\n", 1UL << i);
1312 if (CMM_LOAD_SHARED(ht->in_progress_destroy))
1313 break;
1314 }
1315 }
1316
1317 /*
1318 * Holding RCU read lock to protect _cds_lfht_remove against memory
1319 * reclaim that could be performed by other call_rcu worker threads (ABA
1320 * problem).
1321 * For a single level, we logically remove and garbage collect each node.
1322 *
1323 * As a design choice, we perform logical removal and garbage collection on a
1324 * node-per-node basis to simplify this algorithm. We also assume keeping good
1325 * cache locality of the operation would overweight possible performance gain
1326 * that could be achieved by batching garbage collection for multiple levels.
1327 * However, this would have to be justified by benchmarks.
1328 *
1329 * Concurrent removal and add operations are helping us perform garbage
1330 * collection of logically removed nodes. We guarantee that all logically
1331 * removed nodes have been garbage-collected (unlinked) before call_rcu is
1332 * invoked to free a hole level of bucket nodes (after a grace period).
1333 *
1334 * Logical removal and garbage collection can therefore be done in batch
1335 * or on a node-per-node basis, as long as the guarantee above holds.
1336 *
1337 * When we reach a certain length, we can split this removal over many worker
1338 * threads, based on the number of CPUs available in the system. This should
1339 * take care of not letting resize process lag behind too many concurrent
1340 * updater threads actively inserting into the hash table.
1341 */
1342 static
1343 void remove_table_partition(struct cds_lfht *ht, unsigned long i,
1344 unsigned long start, unsigned long len)
1345 {
1346 unsigned long j, size = 1UL << (i - 1);
1347
1348 assert(i > MIN_TABLE_ORDER);
1349 ht->flavor->read_lock();
1350 for (j = size + start; j < size + start + len; j++) {
1351 struct cds_lfht_node *fini_bucket = bucket_at(ht, j);
1352 struct cds_lfht_node *parent_bucket = bucket_at(ht, j - size);
1353
1354 assert(j >= size && j < (size << 1));
1355 dbg_printf("remove entry: order %lu index %lu hash %lu\n",
1356 i, j, j);
1357 /* Set the REMOVED_FLAG to freeze the ->next for gc */
1358 uatomic_or(&fini_bucket->next, REMOVED_FLAG);
1359 _cds_lfht_gc_bucket(parent_bucket, fini_bucket);
1360 }
1361 ht->flavor->read_unlock();
1362 }
1363
1364 static
1365 void remove_table(struct cds_lfht *ht, unsigned long i, unsigned long len)
1366 {
1367 partition_resize_helper(ht, i, len, remove_table_partition);
1368 }
1369
1370 /*
1371 * fini_table() is never called for first_order == 0, which is why
1372 * free_by_rcu_order == 0 can be used as criterion to know if free must
1373 * be called.
1374 */
1375 static
1376 void fini_table(struct cds_lfht *ht,
1377 unsigned long first_order, unsigned long last_order)
1378 {
1379 long i;
1380 unsigned long free_by_rcu_order = 0;
1381
1382 dbg_printf("fini table: first_order %lu last_order %lu\n",
1383 first_order, last_order);
1384 assert(first_order > MIN_TABLE_ORDER);
1385 for (i = last_order; i >= first_order; i--) {
1386 unsigned long len;
1387
1388 len = 1UL << (i - 1);
1389 dbg_printf("fini order %lu len: %lu\n", i, len);
1390
1391 /* Stop shrink if the resize target changes under us */
1392 if (CMM_LOAD_SHARED(ht->resize_target) > (1UL << (i - 1)))
1393 break;
1394
1395 cmm_smp_wmb(); /* populate data before RCU size */
1396 CMM_STORE_SHARED(ht->size, 1UL << (i - 1));
1397
1398 /*
1399 * We need to wait for all add operations to reach Q.S. (and
1400 * thus use the new table for lookups) before we can start
1401 * releasing the old bucket nodes. Otherwise their lookup will
1402 * return a logically removed node as insert position.
1403 */
1404 ht->flavor->update_synchronize_rcu();
1405 if (free_by_rcu_order)
1406 cds_lfht_free_bucket_table(ht, free_by_rcu_order);
1407
1408 /*
1409 * Set "removed" flag in bucket nodes about to be removed.
1410 * Unlink all now-logically-removed bucket node pointers.
1411 * Concurrent add/remove operation are helping us doing
1412 * the gc.
1413 */
1414 remove_table(ht, i, len);
1415
1416 free_by_rcu_order = i;
1417
1418 dbg_printf("fini new size: %lu\n", 1UL << i);
1419 if (CMM_LOAD_SHARED(ht->in_progress_destroy))
1420 break;
1421 }
1422
1423 if (free_by_rcu_order) {
1424 ht->flavor->update_synchronize_rcu();
1425 cds_lfht_free_bucket_table(ht, free_by_rcu_order);
1426 }
1427 }
1428
1429 static
1430 void cds_lfht_create_bucket(struct cds_lfht *ht, unsigned long size)
1431 {
1432 struct cds_lfht_node *prev, *node;
1433 unsigned long order, len, i;
1434
1435 cds_lfht_alloc_bucket_table(ht, 0);
1436
1437 dbg_printf("create bucket: order 0 index 0 hash 0\n");
1438 node = bucket_at(ht, 0);
1439 node->next = flag_bucket(get_end());
1440 node->reverse_hash = 0;
1441
1442 for (order = 1; order < cds_lfht_get_count_order_ulong(size) + 1; order++) {
1443 len = 1UL << (order - 1);
1444 cds_lfht_alloc_bucket_table(ht, order);
1445
1446 for (i = 0; i < len; i++) {
1447 /*
1448 * Now, we are trying to init the node with the
1449 * hash=(len+i) (which is also a bucket with the
1450 * index=(len+i)) and insert it into the hash table,
1451 * so this node has to be inserted after the bucket
1452 * with the index=(len+i)&(len-1)=i. And because there
1453 * is no other non-bucket node nor bucket node with
1454 * larger index/hash inserted, so the bucket node
1455 * being inserted should be inserted directly linked
1456 * after the bucket node with index=i.
1457 */
1458 prev = bucket_at(ht, i);
1459 node = bucket_at(ht, len + i);
1460
1461 dbg_printf("create bucket: order %lu index %lu hash %lu\n",
1462 order, len + i, len + i);
1463 node->reverse_hash = bit_reverse_ulong(len + i);
1464
1465 /* insert after prev */
1466 assert(is_bucket(prev->next));
1467 node->next = prev->next;
1468 prev->next = flag_bucket(node);
1469 }
1470 }
1471 }
1472
1473 struct cds_lfht *_cds_lfht_new(unsigned long init_size,
1474 unsigned long min_nr_alloc_buckets,
1475 unsigned long max_nr_buckets,
1476 int flags,
1477 const struct cds_lfht_mm_type *mm,
1478 const struct rcu_flavor_struct *flavor,
1479 pthread_attr_t *attr)
1480 {
1481 struct cds_lfht *ht;
1482 unsigned long order;
1483
1484 /* min_nr_alloc_buckets must be power of two */
1485 if (!min_nr_alloc_buckets || (min_nr_alloc_buckets & (min_nr_alloc_buckets - 1)))
1486 return NULL;
1487
1488 /* init_size must be power of two */
1489 if (!init_size || (init_size & (init_size - 1)))
1490 return NULL;
1491
1492 /*
1493 * Memory management plugin default.
1494 */
1495 if (!mm) {
1496 if (CAA_BITS_PER_LONG > 32
1497 && max_nr_buckets
1498 && max_nr_buckets <= (1ULL << 32)) {
1499 /*
1500 * For 64-bit architectures, with max number of
1501 * buckets small enough not to use the entire
1502 * 64-bit memory mapping space (and allowing a
1503 * fair number of hash table instances), use the
1504 * mmap allocator, which is faster than the
1505 * order allocator.
1506 */
1507 mm = &cds_lfht_mm_mmap;
1508 } else {
1509 /*
1510 * The fallback is to use the order allocator.
1511 */
1512 mm = &cds_lfht_mm_order;
1513 }
1514 }
1515
1516 /* max_nr_buckets == 0 for order based mm means infinite */
1517 if (mm == &cds_lfht_mm_order && !max_nr_buckets)
1518 max_nr_buckets = 1UL << (MAX_TABLE_ORDER - 1);
1519
1520 /* max_nr_buckets must be power of two */
1521 if (!max_nr_buckets || (max_nr_buckets & (max_nr_buckets - 1)))
1522 return NULL;
1523
1524 min_nr_alloc_buckets = max(min_nr_alloc_buckets, MIN_TABLE_SIZE);
1525 init_size = max(init_size, MIN_TABLE_SIZE);
1526 max_nr_buckets = max(max_nr_buckets, min_nr_alloc_buckets);
1527 init_size = min(init_size, max_nr_buckets);
1528
1529 ht = mm->alloc_cds_lfht(min_nr_alloc_buckets, max_nr_buckets);
1530 assert(ht);
1531 assert(ht->mm == mm);
1532 assert(ht->bucket_at == mm->bucket_at);
1533
1534 ht->flags = flags;
1535 ht->flavor = flavor;
1536 ht->resize_attr = attr;
1537 alloc_split_items_count(ht);
1538 /* this mutex should not nest in read-side C.S. */
1539 pthread_mutex_init(&ht->resize_mutex, NULL);
1540 order = cds_lfht_get_count_order_ulong(init_size);
1541 ht->resize_target = 1UL << order;
1542 cds_lfht_create_bucket(ht, 1UL << order);
1543 ht->size = 1UL << order;
1544 return ht;
1545 }
1546
1547 void cds_lfht_lookup(struct cds_lfht *ht, unsigned long hash,
1548 cds_lfht_match_fct match, const void *key,
1549 struct cds_lfht_iter *iter)
1550 {
1551 struct cds_lfht_node *node, *next, *bucket;
1552 unsigned long reverse_hash, size;
1553
1554 reverse_hash = bit_reverse_ulong(hash);
1555
1556 size = rcu_dereference(ht->size);
1557 bucket = lookup_bucket(ht, size, hash);
1558 /* We can always skip the bucket node initially */
1559 node = rcu_dereference(bucket->next);
1560 node = clear_flag(node);
1561 for (;;) {
1562 if (caa_unlikely(is_end(node))) {
1563 node = next = NULL;
1564 break;
1565 }
1566 if (caa_unlikely(node->reverse_hash > reverse_hash)) {
1567 node = next = NULL;
1568 break;
1569 }
1570 next = rcu_dereference(node->next);
1571 assert(node == clear_flag(node));
1572 if (caa_likely(!is_removed(next))
1573 && !is_bucket(next)
1574 && node->reverse_hash == reverse_hash
1575 && caa_likely(match(node, key))) {
1576 break;
1577 }
1578 node = clear_flag(next);
1579 }
1580 assert(!node || !is_bucket(CMM_LOAD_SHARED(node->next)));
1581 iter->node = node;
1582 iter->next = next;
1583 }
1584
1585 void cds_lfht_next_duplicate(struct cds_lfht *ht, cds_lfht_match_fct match,
1586 const void *key, struct cds_lfht_iter *iter)
1587 {
1588 struct cds_lfht_node *node, *next;
1589 unsigned long reverse_hash;
1590
1591 node = iter->node;
1592 reverse_hash = node->reverse_hash;
1593 next = iter->next;
1594 node = clear_flag(next);
1595
1596 for (;;) {
1597 if (caa_unlikely(is_end(node))) {
1598 node = next = NULL;
1599 break;
1600 }
1601 if (caa_unlikely(node->reverse_hash > reverse_hash)) {
1602 node = next = NULL;
1603 break;
1604 }
1605 next = rcu_dereference(node->next);
1606 if (caa_likely(!is_removed(next))
1607 && !is_bucket(next)
1608 && caa_likely(match(node, key))) {
1609 break;
1610 }
1611 node = clear_flag(next);
1612 }
1613 assert(!node || !is_bucket(CMM_LOAD_SHARED(node->next)));
1614 iter->node = node;
1615 iter->next = next;
1616 }
1617
1618 void cds_lfht_next(struct cds_lfht *ht, struct cds_lfht_iter *iter)
1619 {
1620 struct cds_lfht_node *node, *next;
1621
1622 node = clear_flag(iter->next);
1623 for (;;) {
1624 if (caa_unlikely(is_end(node))) {
1625 node = next = NULL;
1626 break;
1627 }
1628 next = rcu_dereference(node->next);
1629 if (caa_likely(!is_removed(next))
1630 && !is_bucket(next)) {
1631 break;
1632 }
1633 node = clear_flag(next);
1634 }
1635 assert(!node || !is_bucket(CMM_LOAD_SHARED(node->next)));
1636 iter->node = node;
1637 iter->next = next;
1638 }
1639
1640 void cds_lfht_first(struct cds_lfht *ht, struct cds_lfht_iter *iter)
1641 {
1642 /*
1643 * Get next after first bucket node. The first bucket node is the
1644 * first node of the linked list.
1645 */
1646 iter->next = bucket_at(ht, 0)->next;
1647 cds_lfht_next(ht, iter);
1648 }
1649
1650 void cds_lfht_add(struct cds_lfht *ht, unsigned long hash,
1651 struct cds_lfht_node *node)
1652 {
1653 unsigned long size;
1654
1655 node->reverse_hash = bit_reverse_ulong(hash);
1656 size = rcu_dereference(ht->size);
1657 _cds_lfht_add(ht, hash, NULL, NULL, size, node, NULL, 0);
1658 ht_count_add(ht, size, hash);
1659 }
1660
1661 struct cds_lfht_node *cds_lfht_add_unique(struct cds_lfht *ht,
1662 unsigned long hash,
1663 cds_lfht_match_fct match,
1664 const void *key,
1665 struct cds_lfht_node *node)
1666 {
1667 unsigned long size;
1668 struct cds_lfht_iter iter;
1669
1670 node->reverse_hash = bit_reverse_ulong(hash);
1671 size = rcu_dereference(ht->size);
1672 _cds_lfht_add(ht, hash, match, key, size, node, &iter, 0);
1673 if (iter.node == node)
1674 ht_count_add(ht, size, hash);
1675 return iter.node;
1676 }
1677
1678 struct cds_lfht_node *cds_lfht_add_replace(struct cds_lfht *ht,
1679 unsigned long hash,
1680 cds_lfht_match_fct match,
1681 const void *key,
1682 struct cds_lfht_node *node)
1683 {
1684 unsigned long size;
1685 struct cds_lfht_iter iter;
1686
1687 node->reverse_hash = bit_reverse_ulong(hash);
1688 size = rcu_dereference(ht->size);
1689 for (;;) {
1690 _cds_lfht_add(ht, hash, match, key, size, node, &iter, 0);
1691 if (iter.node == node) {
1692 ht_count_add(ht, size, hash);
1693 return NULL;
1694 }
1695
1696 if (!_cds_lfht_replace(ht, size, iter.node, iter.next, node))
1697 return iter.node;
1698 }
1699 }
1700
1701 int cds_lfht_replace(struct cds_lfht *ht,
1702 struct cds_lfht_iter *old_iter,
1703 unsigned long hash,
1704 cds_lfht_match_fct match,
1705 const void *key,
1706 struct cds_lfht_node *new_node)
1707 {
1708 unsigned long size;
1709
1710 new_node->reverse_hash = bit_reverse_ulong(hash);
1711 if (!old_iter->node)
1712 return -ENOENT;
1713 if (caa_unlikely(old_iter->node->reverse_hash != new_node->reverse_hash))
1714 return -EINVAL;
1715 if (caa_unlikely(!match(old_iter->node, key)))
1716 return -EINVAL;
1717 size = rcu_dereference(ht->size);
1718 return _cds_lfht_replace(ht, size, old_iter->node, old_iter->next,
1719 new_node);
1720 }
1721
1722 int cds_lfht_del(struct cds_lfht *ht, struct cds_lfht_node *node)
1723 {
1724 unsigned long size;
1725 int ret;
1726
1727 size = rcu_dereference(ht->size);
1728 ret = _cds_lfht_del(ht, size, node);
1729 if (!ret) {
1730 unsigned long hash;
1731
1732 hash = bit_reverse_ulong(node->reverse_hash);
1733 ht_count_del(ht, size, hash);
1734 }
1735 return ret;
1736 }
1737
1738 int cds_lfht_is_node_deleted(struct cds_lfht_node *node)
1739 {
1740 return is_removed(CMM_LOAD_SHARED(node->next));
1741 }
1742
1743 static
1744 int cds_lfht_delete_bucket(struct cds_lfht *ht)
1745 {
1746 struct cds_lfht_node *node;
1747 unsigned long order, i, size;
1748
1749 /* Check that the table is empty */
1750 node = bucket_at(ht, 0);
1751 do {
1752 node = clear_flag(node)->next;
1753 if (!is_bucket(node))
1754 return -EPERM;
1755 assert(!is_removed(node));
1756 assert(!is_removal_owner(node));
1757 } while (!is_end(node));
1758 /*
1759 * size accessed without rcu_dereference because hash table is
1760 * being destroyed.
1761 */
1762 size = ht->size;
1763 /* Internal sanity check: all nodes left should be buckets */
1764 for (i = 0; i < size; i++) {
1765 node = bucket_at(ht, i);
1766 dbg_printf("delete bucket: index %lu expected hash %lu hash %lu\n",
1767 i, i, bit_reverse_ulong(node->reverse_hash));
1768 assert(is_bucket(node->next));
1769 }
1770
1771 for (order = cds_lfht_get_count_order_ulong(size); (long)order >= 0; order--)
1772 cds_lfht_free_bucket_table(ht, order);
1773
1774 return 0;
1775 }
1776
1777 /*
1778 * Should only be called when no more concurrent readers nor writers can
1779 * possibly access the table.
1780 */
1781 int cds_lfht_destroy(struct cds_lfht *ht, pthread_attr_t **attr)
1782 {
1783 int ret, was_online;
1784
1785 /* Wait for in-flight resize operations to complete */
1786 _CMM_STORE_SHARED(ht->in_progress_destroy, 1);
1787 cmm_smp_mb(); /* Store destroy before load resize */
1788 was_online = ht->flavor->read_ongoing();
1789 if (was_online)
1790 ht->flavor->thread_offline();
1791 /* Calling with RCU read-side held is an error. */
1792 if (ht->flavor->read_ongoing()) {
1793 ret = -EINVAL;
1794 if (was_online)
1795 ht->flavor->thread_online();
1796 goto end;
1797 }
1798 while (uatomic_read(&ht->in_progress_resize))
1799 poll(NULL, 0, 100); /* wait for 100ms */
1800 if (was_online)
1801 ht->flavor->thread_online();
1802 ret = cds_lfht_delete_bucket(ht);
1803 if (ret)
1804 return ret;
1805 free_split_items_count(ht);
1806 if (attr)
1807 *attr = ht->resize_attr;
1808 poison_free(ht);
1809 end:
1810 return ret;
1811 }
1812
1813 void cds_lfht_count_nodes(struct cds_lfht *ht,
1814 long *approx_before,
1815 unsigned long *count,
1816 long *approx_after)
1817 {
1818 struct cds_lfht_node *node, *next;
1819 unsigned long nr_bucket = 0, nr_removed = 0;
1820
1821 *approx_before = 0;
1822 if (ht->split_count) {
1823 int i;
1824
1825 for (i = 0; i < split_count_mask + 1; i++) {
1826 *approx_before += uatomic_read(&ht->split_count[i].add);
1827 *approx_before -= uatomic_read(&ht->split_count[i].del);
1828 }
1829 }
1830
1831 *count = 0;
1832
1833 /* Count non-bucket nodes in the table */
1834 node = bucket_at(ht, 0);
1835 do {
1836 next = rcu_dereference(node->next);
1837 if (is_removed(next)) {
1838 if (!is_bucket(next))
1839 (nr_removed)++;
1840 else
1841 (nr_bucket)++;
1842 } else if (!is_bucket(next))
1843 (*count)++;
1844 else
1845 (nr_bucket)++;
1846 node = clear_flag(next);
1847 } while (!is_end(node));
1848 dbg_printf("number of logically removed nodes: %lu\n", nr_removed);
1849 dbg_printf("number of bucket nodes: %lu\n", nr_bucket);
1850 *approx_after = 0;
1851 if (ht->split_count) {
1852 int i;
1853
1854 for (i = 0; i < split_count_mask + 1; i++) {
1855 *approx_after += uatomic_read(&ht->split_count[i].add);
1856 *approx_after -= uatomic_read(&ht->split_count[i].del);
1857 }
1858 }
1859 }
1860
1861 /* called with resize mutex held */
1862 static
1863 void _do_cds_lfht_grow(struct cds_lfht *ht,
1864 unsigned long old_size, unsigned long new_size)
1865 {
1866 unsigned long old_order, new_order;
1867
1868 old_order = cds_lfht_get_count_order_ulong(old_size);
1869 new_order = cds_lfht_get_count_order_ulong(new_size);
1870 dbg_printf("resize from %lu (order %lu) to %lu (order %lu) buckets\n",
1871 old_size, old_order, new_size, new_order);
1872 assert(new_size > old_size);
1873 init_table(ht, old_order + 1, new_order);
1874 }
1875
1876 /* called with resize mutex held */
1877 static
1878 void _do_cds_lfht_shrink(struct cds_lfht *ht,
1879 unsigned long old_size, unsigned long new_size)
1880 {
1881 unsigned long old_order, new_order;
1882
1883 new_size = max(new_size, MIN_TABLE_SIZE);
1884 old_order = cds_lfht_get_count_order_ulong(old_size);
1885 new_order = cds_lfht_get_count_order_ulong(new_size);
1886 dbg_printf("resize from %lu (order %lu) to %lu (order %lu) buckets\n",
1887 old_size, old_order, new_size, new_order);
1888 assert(new_size < old_size);
1889
1890 /* Remove and unlink all bucket nodes to remove. */
1891 fini_table(ht, new_order + 1, old_order);
1892 }
1893
1894
1895 /* called with resize mutex held */
1896 static
1897 void _do_cds_lfht_resize(struct cds_lfht *ht)
1898 {
1899 unsigned long new_size, old_size;
1900
1901 /*
1902 * Resize table, re-do if the target size has changed under us.
1903 */
1904 do {
1905 assert(uatomic_read(&ht->in_progress_resize));
1906 if (CMM_LOAD_SHARED(ht->in_progress_destroy))
1907 break;
1908 ht->resize_initiated = 1;
1909 old_size = ht->size;
1910 new_size = CMM_LOAD_SHARED(ht->resize_target);
1911 if (old_size < new_size)
1912 _do_cds_lfht_grow(ht, old_size, new_size);
1913 else if (old_size > new_size)
1914 _do_cds_lfht_shrink(ht, old_size, new_size);
1915 ht->resize_initiated = 0;
1916 /* write resize_initiated before read resize_target */
1917 cmm_smp_mb();
1918 } while (ht->size != CMM_LOAD_SHARED(ht->resize_target));
1919 }
1920
1921 static
1922 unsigned long resize_target_grow(struct cds_lfht *ht, unsigned long new_size)
1923 {
1924 return _uatomic_xchg_monotonic_increase(&ht->resize_target, new_size);
1925 }
1926
1927 static
1928 void resize_target_update_count(struct cds_lfht *ht,
1929 unsigned long count)
1930 {
1931 count = max(count, MIN_TABLE_SIZE);
1932 count = min(count, ht->max_nr_buckets);
1933 uatomic_set(&ht->resize_target, count);
1934 }
1935
1936 void cds_lfht_resize(struct cds_lfht *ht, unsigned long new_size)
1937 {
1938 int was_online;
1939
1940 was_online = ht->flavor->read_ongoing();
1941 if (was_online)
1942 ht->flavor->thread_offline();
1943 /* Calling with RCU read-side held is an error. */
1944 if (ht->flavor->read_ongoing()) {
1945 static int print_once;
1946
1947 if (!CMM_LOAD_SHARED(print_once))
1948 fprintf(stderr, "[error] rculfhash: cds_lfht_resize "
1949 "called with RCU read-side lock held.\n");
1950 CMM_STORE_SHARED(print_once, 1);
1951 assert(0);
1952 goto end;
1953 }
1954 resize_target_update_count(ht, new_size);
1955 CMM_STORE_SHARED(ht->resize_initiated, 1);
1956 pthread_mutex_lock(&ht->resize_mutex);
1957 _do_cds_lfht_resize(ht);
1958 pthread_mutex_unlock(&ht->resize_mutex);
1959 end:
1960 if (was_online)
1961 ht->flavor->thread_online();
1962 }
1963
1964 static
1965 void do_resize_cb(struct rcu_head *head)
1966 {
1967 struct rcu_resize_work *work =
1968 caa_container_of(head, struct rcu_resize_work, head);
1969 struct cds_lfht *ht = work->ht;
1970
1971 ht->flavor->thread_offline();
1972 pthread_mutex_lock(&ht->resize_mutex);
1973 _do_cds_lfht_resize(ht);
1974 pthread_mutex_unlock(&ht->resize_mutex);
1975 ht->flavor->thread_online();
1976 poison_free(work);
1977 cmm_smp_mb(); /* finish resize before decrement */
1978 uatomic_dec(&ht->in_progress_resize);
1979 }
1980
1981 static
1982 void __cds_lfht_resize_lazy_launch(struct cds_lfht *ht)
1983 {
1984 struct rcu_resize_work *work;
1985
1986 /* Store resize_target before read resize_initiated */
1987 cmm_smp_mb();
1988 if (!CMM_LOAD_SHARED(ht->resize_initiated)) {
1989 uatomic_inc(&ht->in_progress_resize);
1990 cmm_smp_mb(); /* increment resize count before load destroy */
1991 if (CMM_LOAD_SHARED(ht->in_progress_destroy)) {
1992 uatomic_dec(&ht->in_progress_resize);
1993 return;
1994 }
1995 work = malloc(sizeof(*work));
1996 if (work == NULL) {
1997 dbg_printf("error allocating resize work, bailing out\n");
1998 uatomic_dec(&ht->in_progress_resize);
1999 return;
2000 }
2001 work->ht = ht;
2002 ht->flavor->update_call_rcu(&work->head, do_resize_cb);
2003 CMM_STORE_SHARED(ht->resize_initiated, 1);
2004 }
2005 }
2006
2007 static
2008 void cds_lfht_resize_lazy_grow(struct cds_lfht *ht, unsigned long size, int growth)
2009 {
2010 unsigned long target_size = size << growth;
2011
2012 target_size = min(target_size, ht->max_nr_buckets);
2013 if (resize_target_grow(ht, target_size) >= target_size)
2014 return;
2015
2016 __cds_lfht_resize_lazy_launch(ht);
2017 }
2018
2019 /*
2020 * We favor grow operations over shrink. A shrink operation never occurs
2021 * if a grow operation is queued for lazy execution. A grow operation
2022 * cancels any pending shrink lazy execution.
2023 */
2024 static
2025 void cds_lfht_resize_lazy_count(struct cds_lfht *ht, unsigned long size,
2026 unsigned long count)
2027 {
2028 if (!(ht->flags & CDS_LFHT_AUTO_RESIZE))
2029 return;
2030 count = max(count, MIN_TABLE_SIZE);
2031 count = min(count, ht->max_nr_buckets);
2032 if (count == size)
2033 return; /* Already the right size, no resize needed */
2034 if (count > size) { /* lazy grow */
2035 if (resize_target_grow(ht, count) >= count)
2036 return;
2037 } else { /* lazy shrink */
2038 for (;;) {
2039 unsigned long s;
2040
2041 s = uatomic_cmpxchg(&ht->resize_target, size, count);
2042 if (s == size)
2043 break; /* no resize needed */
2044 if (s > size)
2045 return; /* growing is/(was just) in progress */
2046 if (s <= count)
2047 return; /* some other thread do shrink */
2048 size = s;
2049 }
2050 }
2051 __cds_lfht_resize_lazy_launch(ht);
2052 }
This page took 0.127986 seconds and 4 git commands to generate.