uatomic/x86: Remove redundant memory barriers
[urcu.git] / src / rculfhash.c
1 /*
2 * rculfhash.c
3 *
4 * Userspace RCU library - Lock-Free Resizable RCU Hash Table
5 *
6 * Copyright 2010-2011 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 * Copyright 2011 - Lai Jiangshan <laijs@cn.fujitsu.com>
8 *
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
13 *
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
18 *
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with this library; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 */
23
24 /*
25 * Based on the following articles:
26 * - Ori Shalev and Nir Shavit. Split-ordered lists: Lock-free
27 * extensible hash tables. J. ACM 53, 3 (May 2006), 379-405.
28 * - Michael, M. M. High performance dynamic lock-free hash tables
29 * and list-based sets. In Proceedings of the fourteenth annual ACM
30 * symposium on Parallel algorithms and architectures, ACM Press,
31 * (2002), 73-82.
32 *
33 * Some specificities of this Lock-Free Resizable RCU Hash Table
34 * implementation:
35 *
36 * - RCU read-side critical section allows readers to perform hash
37 * table lookups, as well as traversals, and use the returned objects
38 * safely by allowing memory reclaim to take place only after a grace
39 * period.
40 * - Add and remove operations are lock-free, and do not need to
41 * allocate memory. They need to be executed within RCU read-side
42 * critical section to ensure the objects they read are valid and to
43 * deal with the cmpxchg ABA problem.
44 * - add and add_unique operations are supported. add_unique checks if
45 * the node key already exists in the hash table. It ensures not to
46 * populate a duplicate key if the node key already exists in the hash
47 * table.
48 * - The resize operation executes concurrently with
49 * add/add_unique/add_replace/remove/lookup/traversal.
50 * - Hash table nodes are contained within a split-ordered list. This
51 * list is ordered by incrementing reversed-bits-hash value.
52 * - An index of bucket nodes is kept. These bucket nodes are the hash
53 * table "buckets". These buckets are internal nodes that allow to
54 * perform a fast hash lookup, similarly to a skip list. These
55 * buckets are chained together in the split-ordered list, which
56 * allows recursive expansion by inserting new buckets between the
57 * existing buckets. The split-ordered list allows adding new buckets
58 * between existing buckets as the table needs to grow.
59 * - The resize operation for small tables only allows expanding the
60 * hash table. It is triggered automatically by detecting long chains
61 * in the add operation.
62 * - The resize operation for larger tables (and available through an
63 * API) allows both expanding and shrinking the hash table.
64 * - Split-counters are used to keep track of the number of
65 * nodes within the hash table for automatic resize triggering.
66 * - Resize operation initiated by long chain detection is executed by a
67 * worker thread, which keeps lock-freedom of add and remove.
68 * - Resize operations are protected by a mutex.
69 * - The removal operation is split in two parts: first, a "removed"
70 * flag is set in the next pointer within the node to remove. Then,
71 * a "garbage collection" is performed in the bucket containing the
72 * removed node (from the start of the bucket up to the removed node).
73 * All encountered nodes with "removed" flag set in their next
74 * pointers are removed from the linked-list. If the cmpxchg used for
75 * removal fails (due to concurrent garbage-collection or concurrent
76 * add), we retry from the beginning of the bucket. This ensures that
77 * the node with "removed" flag set is removed from the hash table
78 * (not visible to lookups anymore) before the RCU read-side critical
79 * section held across removal ends. Furthermore, this ensures that
80 * the node with "removed" flag set is removed from the linked-list
81 * before its memory is reclaimed. After setting the "removal" flag,
82 * only the thread which removal is the first to set the "removal
83 * owner" flag (with an xchg) into a node's next pointer is considered
84 * to have succeeded its removal (and thus owns the node to reclaim).
85 * Because we garbage-collect starting from an invariant node (the
86 * start-of-bucket bucket node) up to the "removed" node (or find a
87 * reverse-hash that is higher), we are sure that a successful
88 * traversal of the chain leads to a chain that is present in the
89 * linked-list (the start node is never removed) and that it does not
90 * contain the "removed" node anymore, even if concurrent delete/add
91 * operations are changing the structure of the list concurrently.
92 * - The add operations perform garbage collection of buckets if they
93 * encounter nodes with removed flag set in the bucket where they want
94 * to add their new node. This ensures lock-freedom of add operation by
95 * helping the remover unlink nodes from the list rather than to wait
96 * for it do to so.
97 * - There are three memory backends for the hash table buckets: the
98 * "order table", the "chunks", and the "mmap".
99 * - These bucket containers contain a compact version of the hash table
100 * nodes.
101 * - The RCU "order table":
102 * - has a first level table indexed by log2(hash index) which is
103 * copied and expanded by the resize operation. This order table
104 * allows finding the "bucket node" tables.
105 * - There is one bucket node table per hash index order. The size of
106 * each bucket node table is half the number of hashes contained in
107 * this order (except for order 0).
108 * - The RCU "chunks" is best suited for close interaction with a page
109 * allocator. It uses a linear array as index to "chunks" containing
110 * each the same number of buckets.
111 * - The RCU "mmap" memory backend uses a single memory map to hold
112 * all buckets.
113 * - synchronize_rcu is used to garbage-collect the old bucket node table.
114 *
115 * Ordering Guarantees:
116 *
117 * To discuss these guarantees, we first define "read" operation as any
118 * of the the basic cds_lfht_lookup, cds_lfht_next_duplicate,
119 * cds_lfht_first, cds_lfht_next operation, as well as
120 * cds_lfht_add_unique (failure).
121 *
122 * We define "read traversal" operation as any of the following
123 * group of operations
124 * - cds_lfht_lookup followed by iteration with cds_lfht_next_duplicate
125 * (and/or cds_lfht_next, although less common).
126 * - cds_lfht_add_unique (failure) followed by iteration with
127 * cds_lfht_next_duplicate (and/or cds_lfht_next, although less
128 * common).
129 * - cds_lfht_first followed iteration with cds_lfht_next (and/or
130 * cds_lfht_next_duplicate, although less common).
131 *
132 * We define "write" operations as any of cds_lfht_add, cds_lfht_replace,
133 * cds_lfht_add_unique (success), cds_lfht_add_replace, cds_lfht_del.
134 *
135 * When cds_lfht_add_unique succeeds (returns the node passed as
136 * parameter), it acts as a "write" operation. When cds_lfht_add_unique
137 * fails (returns a node different from the one passed as parameter), it
138 * acts as a "read" operation. A cds_lfht_add_unique failure is a
139 * cds_lfht_lookup "read" operation, therefore, any ordering guarantee
140 * referring to "lookup" imply any of "lookup" or cds_lfht_add_unique
141 * (failure).
142 *
143 * We define "prior" and "later" node as nodes observable by reads and
144 * read traversals respectively before and after a write or sequence of
145 * write operations.
146 *
147 * Hash-table operations are often cascaded, for example, the pointer
148 * returned by a cds_lfht_lookup() might be passed to a cds_lfht_next(),
149 * whose return value might in turn be passed to another hash-table
150 * operation. This entire cascaded series of operations must be enclosed
151 * by a pair of matching rcu_read_lock() and rcu_read_unlock()
152 * operations.
153 *
154 * The following ordering guarantees are offered by this hash table:
155 *
156 * A.1) "read" after "write": if there is ordering between a write and a
157 * later read, then the read is guaranteed to see the write or some
158 * later write.
159 * A.2) "read traversal" after "write": given that there is dependency
160 * ordering between reads in a "read traversal", if there is
161 * ordering between a write and the first read of the traversal,
162 * then the "read traversal" is guaranteed to see the write or
163 * some later write.
164 * B.1) "write" after "read": if there is ordering between a read and a
165 * later write, then the read will never see the write.
166 * B.2) "write" after "read traversal": given that there is dependency
167 * ordering between reads in a "read traversal", if there is
168 * ordering between the last read of the traversal and a later
169 * write, then the "read traversal" will never see the write.
170 * C) "write" while "read traversal": if a write occurs during a "read
171 * traversal", the traversal may, or may not, see the write.
172 * D.1) "write" after "write": if there is ordering between a write and
173 * a later write, then the later write is guaranteed to see the
174 * effects of the first write.
175 * D.2) Concurrent "write" pairs: The system will assign an arbitrary
176 * order to any pair of concurrent conflicting writes.
177 * Non-conflicting writes (for example, to different keys) are
178 * unordered.
179 * E) If a grace period separates a "del" or "replace" operation
180 * and a subsequent operation, then that subsequent operation is
181 * guaranteed not to see the removed item.
182 * F) Uniqueness guarantee: given a hash table that does not contain
183 * duplicate items for a given key, there will only be one item in
184 * the hash table after an arbitrary sequence of add_unique and/or
185 * add_replace operations. Note, however, that a pair of
186 * concurrent read operations might well access two different items
187 * with that key.
188 * G.1) If a pair of lookups for a given key are ordered (e.g. by a
189 * memory barrier), then the second lookup will return the same
190 * node as the previous lookup, or some later node.
191 * G.2) A "read traversal" that starts after the end of a prior "read
192 * traversal" (ordered by memory barriers) is guaranteed to see the
193 * same nodes as the previous traversal, or some later nodes.
194 * G.3) Concurrent "read" pairs: concurrent reads are unordered. For
195 * example, if a pair of reads to the same key run concurrently
196 * with an insertion of that same key, the reads remain unordered
197 * regardless of their return values. In other words, you cannot
198 * rely on the values returned by the reads to deduce ordering.
199 *
200 * Progress guarantees:
201 *
202 * * Reads are wait-free. These operations always move forward in the
203 * hash table linked list, and this list has no loop.
204 * * Writes are lock-free. Any retry loop performed by a write operation
205 * is triggered by progress made within another update operation.
206 *
207 * Bucket node tables:
208 *
209 * hash table hash table the last all bucket node tables
210 * order size bucket node 0 1 2 3 4 5 6(index)
211 * table size
212 * 0 1 1 1
213 * 1 2 1 1 1
214 * 2 4 2 1 1 2
215 * 3 8 4 1 1 2 4
216 * 4 16 8 1 1 2 4 8
217 * 5 32 16 1 1 2 4 8 16
218 * 6 64 32 1 1 2 4 8 16 32
219 *
220 * When growing/shrinking, we only focus on the last bucket node table
221 * which size is (!order ? 1 : (1 << (order -1))).
222 *
223 * Example for growing/shrinking:
224 * grow hash table from order 5 to 6: init the index=6 bucket node table
225 * shrink hash table from order 6 to 5: fini the index=6 bucket node table
226 *
227 * A bit of ascii art explanation:
228 *
229 * The order index is the off-by-one compared to the actual power of 2
230 * because we use index 0 to deal with the 0 special-case.
231 *
232 * This shows the nodes for a small table ordered by reversed bits:
233 *
234 * bits reverse
235 * 0 000 000
236 * 4 100 001
237 * 2 010 010
238 * 6 110 011
239 * 1 001 100
240 * 5 101 101
241 * 3 011 110
242 * 7 111 111
243 *
244 * This shows the nodes in order of non-reversed bits, linked by
245 * reversed-bit order.
246 *
247 * order bits reverse
248 * 0 0 000 000
249 * 1 | 1 001 100 <-
250 * 2 | | 2 010 010 <- |
251 * | | | 3 011 110 | <- |
252 * 3 -> | | | 4 100 001 | |
253 * -> | | 5 101 101 |
254 * -> | 6 110 011
255 * -> 7 111 111
256 */
257
258 #define _LGPL_SOURCE
259 #include <stdlib.h>
260 #include <errno.h>
261 #include <stdio.h>
262 #include <stdint.h>
263 #include <string.h>
264 #include <sched.h>
265 #include <unistd.h>
266
267 #include "compat-getcpu.h"
268 #include <urcu/assert.h>
269 #include <urcu/pointer.h>
270 #include <urcu/call-rcu.h>
271 #include <urcu/flavor.h>
272 #include <urcu/arch.h>
273 #include <urcu/uatomic.h>
274 #include <urcu/compiler.h>
275 #include <urcu/rculfhash.h>
276 #include <urcu/static/urcu-signal-nr.h>
277 #include <stdio.h>
278 #include <pthread.h>
279 #include <signal.h>
280 #include "rculfhash-internal.h"
281 #include "workqueue.h"
282 #include "urcu-die.h"
283 #include "urcu-utils.h"
284 #include "compat-smp.h"
285
286 /*
287 * Split-counters lazily update the global counter each 1024
288 * addition/removal. It automatically keeps track of resize required.
289 * We use the bucket length as indicator for need to expand for small
290 * tables and machines lacking per-cpu data support.
291 */
292 #define COUNT_COMMIT_ORDER 10
293 #define DEFAULT_SPLIT_COUNT_MASK 0xFUL
294 #define CHAIN_LEN_TARGET 1
295 #define CHAIN_LEN_RESIZE_THRESHOLD 3
296
297 /*
298 * Define the minimum table size.
299 */
300 #define MIN_TABLE_ORDER 0
301 #define MIN_TABLE_SIZE (1UL << MIN_TABLE_ORDER)
302
303 /*
304 * Minimum number of bucket nodes to touch per thread to parallelize grow/shrink.
305 */
306 #define MIN_PARTITION_PER_THREAD_ORDER 12
307 #define MIN_PARTITION_PER_THREAD (1UL << MIN_PARTITION_PER_THREAD_ORDER)
308
309 /*
310 * The removed flag needs to be updated atomically with the pointer.
311 * It indicates that no node must attach to the node scheduled for
312 * removal, and that node garbage collection must be performed.
313 * The bucket flag does not require to be updated atomically with the
314 * pointer, but it is added as a pointer low bit flag to save space.
315 * The "removal owner" flag is used to detect which of the "del"
316 * operation that has set the "removed flag" gets to return the removed
317 * node to its caller. Note that the replace operation does not need to
318 * iteract with the "removal owner" flag, because it validates that
319 * the "removed" flag is not set before performing its cmpxchg.
320 */
321 #define REMOVED_FLAG (1UL << 0)
322 #define BUCKET_FLAG (1UL << 1)
323 #define REMOVAL_OWNER_FLAG (1UL << 2)
324 #define FLAGS_MASK ((1UL << 3) - 1)
325
326 /* Value of the end pointer. Should not interact with flags. */
327 #define END_VALUE NULL
328
329 /*
330 * ht_items_count: Split-counters counting the number of node addition
331 * and removal in the table. Only used if the CDS_LFHT_ACCOUNTING flag
332 * is set at hash table creation.
333 *
334 * These are free-running counters, never reset to zero. They count the
335 * number of add/remove, and trigger every (1 << COUNT_COMMIT_ORDER)
336 * operations to update the global counter. We choose a power-of-2 value
337 * for the trigger to deal with 32 or 64-bit overflow of the counter.
338 */
339 struct ht_items_count {
340 unsigned long add, del;
341 } __attribute__((aligned(CAA_CACHE_LINE_SIZE)));
342
343 /*
344 * resize_work: Contains arguments passed to worker thread
345 * responsible for performing lazy resize.
346 */
347 struct resize_work {
348 struct urcu_work work;
349 struct cds_lfht *ht;
350 };
351
352 /*
353 * partition_resize_work: Contains arguments passed to worker threads
354 * executing the hash table resize on partitions of the hash table
355 * assigned to each processor's worker thread.
356 */
357 struct partition_resize_work {
358 pthread_t thread_id;
359 struct cds_lfht *ht;
360 unsigned long i, start, len;
361 void (*fct)(struct cds_lfht *ht, unsigned long i,
362 unsigned long start, unsigned long len);
363 };
364
365 static struct urcu_workqueue *cds_lfht_workqueue;
366
367 /*
368 * Mutex ensuring mutual exclusion between workqueue initialization and
369 * fork handlers. cds_lfht_fork_mutex nests inside call_rcu_mutex.
370 */
371 static pthread_mutex_t cds_lfht_fork_mutex = PTHREAD_MUTEX_INITIALIZER;
372
373 static struct urcu_atfork cds_lfht_atfork;
374
375 /*
376 * atfork handler nesting counters. Handle being registered to many urcu
377 * flavors, thus being possibly invoked more than once in the
378 * pthread_atfork list of callbacks.
379 */
380 static int cds_lfht_workqueue_atfork_nesting;
381
382 static void __attribute__((destructor)) cds_lfht_exit(void);
383 static void cds_lfht_init_worker(const struct rcu_flavor_struct *flavor);
384
385 #ifdef CONFIG_CDS_LFHT_ITER_DEBUG
386
387 static
388 void cds_lfht_iter_debug_set_ht(struct cds_lfht *ht, struct cds_lfht_iter *iter)
389 {
390 iter->lfht = ht;
391 }
392
393 #define cds_lfht_iter_debug_assert(...) urcu_posix_assert(__VA_ARGS__)
394
395 #else
396
397 static
398 void cds_lfht_iter_debug_set_ht(struct cds_lfht *ht __attribute__((unused)),
399 struct cds_lfht_iter *iter __attribute__((unused)))
400 {
401 }
402
403 #define cds_lfht_iter_debug_assert(...)
404
405 #endif
406
407 /*
408 * Algorithm to reverse bits in a word by lookup table, extended to
409 * 64-bit words.
410 * Source:
411 * http://graphics.stanford.edu/~seander/bithacks.html#BitReverseTable
412 * Originally from Public Domain.
413 */
414
415 static const uint8_t BitReverseTable256[256] =
416 {
417 #define R2(n) (n), (n) + 2*64, (n) + 1*64, (n) + 3*64
418 #define R4(n) R2(n), R2((n) + 2*16), R2((n) + 1*16), R2((n) + 3*16)
419 #define R6(n) R4(n), R4((n) + 2*4 ), R4((n) + 1*4 ), R4((n) + 3*4 )
420 R6(0), R6(2), R6(1), R6(3)
421 };
422 #undef R2
423 #undef R4
424 #undef R6
425
426 static
427 uint8_t bit_reverse_u8(uint8_t v)
428 {
429 return BitReverseTable256[v];
430 }
431
432 #if (CAA_BITS_PER_LONG == 32)
433 static
434 uint32_t bit_reverse_u32(uint32_t v)
435 {
436 return ((uint32_t) bit_reverse_u8(v) << 24) |
437 ((uint32_t) bit_reverse_u8(v >> 8) << 16) |
438 ((uint32_t) bit_reverse_u8(v >> 16) << 8) |
439 ((uint32_t) bit_reverse_u8(v >> 24));
440 }
441 #else
442 static
443 uint64_t bit_reverse_u64(uint64_t v)
444 {
445 return ((uint64_t) bit_reverse_u8(v) << 56) |
446 ((uint64_t) bit_reverse_u8(v >> 8) << 48) |
447 ((uint64_t) bit_reverse_u8(v >> 16) << 40) |
448 ((uint64_t) bit_reverse_u8(v >> 24) << 32) |
449 ((uint64_t) bit_reverse_u8(v >> 32) << 24) |
450 ((uint64_t) bit_reverse_u8(v >> 40) << 16) |
451 ((uint64_t) bit_reverse_u8(v >> 48) << 8) |
452 ((uint64_t) bit_reverse_u8(v >> 56));
453 }
454 #endif
455
456 static
457 unsigned long bit_reverse_ulong(unsigned long v)
458 {
459 #if (CAA_BITS_PER_LONG == 32)
460 return bit_reverse_u32(v);
461 #else
462 return bit_reverse_u64(v);
463 #endif
464 }
465
466 /*
467 * fls: returns the position of the most significant bit.
468 * Returns 0 if no bit is set, else returns the position of the most
469 * significant bit (from 1 to 32 on 32-bit, from 1 to 64 on 64-bit).
470 */
471 #if defined(URCU_ARCH_X86)
472 static inline
473 unsigned int fls_u32(uint32_t x)
474 {
475 int r;
476
477 __asm__ ("bsrl %1,%0\n\t"
478 "jnz 1f\n\t"
479 "movl $-1,%0\n\t"
480 "1:\n\t"
481 : "=r" (r) : "rm" (x));
482 return r + 1;
483 }
484 #define HAS_FLS_U32
485 #endif
486
487 #if defined(URCU_ARCH_AMD64)
488 static inline
489 unsigned int fls_u64(uint64_t x)
490 {
491 long r;
492
493 __asm__ ("bsrq %1,%0\n\t"
494 "jnz 1f\n\t"
495 "movq $-1,%0\n\t"
496 "1:\n\t"
497 : "=r" (r) : "rm" (x));
498 return r + 1;
499 }
500 #define HAS_FLS_U64
501 #endif
502
503 #ifndef HAS_FLS_U64
504 static __attribute__((unused))
505 unsigned int fls_u64(uint64_t x)
506 {
507 unsigned int r = 64;
508
509 if (!x)
510 return 0;
511
512 if (!(x & 0xFFFFFFFF00000000ULL)) {
513 x <<= 32;
514 r -= 32;
515 }
516 if (!(x & 0xFFFF000000000000ULL)) {
517 x <<= 16;
518 r -= 16;
519 }
520 if (!(x & 0xFF00000000000000ULL)) {
521 x <<= 8;
522 r -= 8;
523 }
524 if (!(x & 0xF000000000000000ULL)) {
525 x <<= 4;
526 r -= 4;
527 }
528 if (!(x & 0xC000000000000000ULL)) {
529 x <<= 2;
530 r -= 2;
531 }
532 if (!(x & 0x8000000000000000ULL)) {
533 x <<= 1;
534 r -= 1;
535 }
536 return r;
537 }
538 #endif
539
540 #ifndef HAS_FLS_U32
541 static __attribute__((unused))
542 unsigned int fls_u32(uint32_t x)
543 {
544 unsigned int r = 32;
545
546 if (!x)
547 return 0;
548 if (!(x & 0xFFFF0000U)) {
549 x <<= 16;
550 r -= 16;
551 }
552 if (!(x & 0xFF000000U)) {
553 x <<= 8;
554 r -= 8;
555 }
556 if (!(x & 0xF0000000U)) {
557 x <<= 4;
558 r -= 4;
559 }
560 if (!(x & 0xC0000000U)) {
561 x <<= 2;
562 r -= 2;
563 }
564 if (!(x & 0x80000000U)) {
565 x <<= 1;
566 r -= 1;
567 }
568 return r;
569 }
570 #endif
571
572 unsigned int cds_lfht_fls_ulong(unsigned long x)
573 {
574 #if (CAA_BITS_PER_LONG == 32)
575 return fls_u32(x);
576 #else
577 return fls_u64(x);
578 #endif
579 }
580
581 /*
582 * Return the minimum order for which x <= (1UL << order).
583 * Return -1 if x is 0.
584 */
585 static
586 int cds_lfht_get_count_order_u32(uint32_t x)
587 {
588 if (!x)
589 return -1;
590
591 return fls_u32(x - 1);
592 }
593
594 /*
595 * Return the minimum order for which x <= (1UL << order).
596 * Return -1 if x is 0.
597 */
598 int cds_lfht_get_count_order_ulong(unsigned long x)
599 {
600 if (!x)
601 return -1;
602
603 return cds_lfht_fls_ulong(x - 1);
604 }
605
606 static
607 void cds_lfht_resize_lazy_grow(struct cds_lfht *ht, unsigned long size, int growth);
608
609 static
610 void cds_lfht_resize_lazy_count(struct cds_lfht *ht, unsigned long size,
611 unsigned long count);
612
613 static void mutex_lock(pthread_mutex_t *mutex)
614 {
615 int ret;
616
617 #ifndef DISTRUST_SIGNALS_EXTREME
618 ret = pthread_mutex_lock(mutex);
619 if (ret)
620 urcu_die(ret);
621 #else /* #ifndef DISTRUST_SIGNALS_EXTREME */
622 while ((ret = pthread_mutex_trylock(mutex)) != 0) {
623 if (ret != EBUSY && ret != EINTR)
624 urcu_die(ret);
625 if (CMM_LOAD_SHARED(URCU_TLS(rcu_reader).need_mb)) {
626 cmm_smp_mb();
627 _CMM_STORE_SHARED(URCU_TLS(rcu_reader).need_mb, 0);
628 cmm_smp_mb();
629 }
630 (void) poll(NULL, 0, 10);
631 }
632 #endif /* #else #ifndef DISTRUST_SIGNALS_EXTREME */
633 }
634
635 static void mutex_unlock(pthread_mutex_t *mutex)
636 {
637 int ret;
638
639 ret = pthread_mutex_unlock(mutex);
640 if (ret)
641 urcu_die(ret);
642 }
643
644 static long nr_cpus_mask = -1;
645 static long split_count_mask = -1;
646 static int split_count_order = -1;
647
648 static void ht_init_nr_cpus_mask(void)
649 {
650 long maxcpus;
651
652 maxcpus = get_possible_cpus_array_len();
653 if (maxcpus <= 0) {
654 nr_cpus_mask = -2;
655 return;
656 }
657 /*
658 * round up number of CPUs to next power of two, so we
659 * can use & for modulo.
660 */
661 maxcpus = 1UL << cds_lfht_get_count_order_ulong(maxcpus);
662 nr_cpus_mask = maxcpus - 1;
663 }
664
665 static
666 void alloc_split_items_count(struct cds_lfht *ht)
667 {
668 if (nr_cpus_mask == -1) {
669 ht_init_nr_cpus_mask();
670 if (nr_cpus_mask < 0)
671 split_count_mask = DEFAULT_SPLIT_COUNT_MASK;
672 else
673 split_count_mask = nr_cpus_mask;
674 split_count_order =
675 cds_lfht_get_count_order_ulong(split_count_mask + 1);
676 }
677
678 urcu_posix_assert(split_count_mask >= 0);
679
680 if (ht->flags & CDS_LFHT_ACCOUNTING) {
681 ht->split_count = calloc(split_count_mask + 1,
682 sizeof(struct ht_items_count));
683 urcu_posix_assert(ht->split_count);
684 } else {
685 ht->split_count = NULL;
686 }
687 }
688
689 static
690 void free_split_items_count(struct cds_lfht *ht)
691 {
692 poison_free(ht->split_count);
693 }
694
695 static
696 int ht_get_split_count_index(unsigned long hash)
697 {
698 int cpu;
699
700 urcu_posix_assert(split_count_mask >= 0);
701 cpu = urcu_sched_getcpu();
702 if (caa_unlikely(cpu < 0))
703 return hash & split_count_mask;
704 else
705 return cpu & split_count_mask;
706 }
707
708 static
709 void ht_count_add(struct cds_lfht *ht, unsigned long size, unsigned long hash)
710 {
711 unsigned long split_count, count;
712 int index;
713
714 if (caa_unlikely(!ht->split_count))
715 return;
716 index = ht_get_split_count_index(hash);
717 split_count = uatomic_add_return(&ht->split_count[index].add, 1);
718 if (caa_likely(split_count & ((1UL << COUNT_COMMIT_ORDER) - 1)))
719 return;
720 /* Only if number of add multiple of 1UL << COUNT_COMMIT_ORDER */
721
722 dbg_printf("add split count %lu\n", split_count);
723 count = uatomic_add_return(&ht->count,
724 1UL << COUNT_COMMIT_ORDER);
725 if (caa_likely(count & (count - 1)))
726 return;
727 /* Only if global count is power of 2 */
728
729 if ((count >> CHAIN_LEN_RESIZE_THRESHOLD) < size)
730 return;
731 dbg_printf("add set global %lu\n", count);
732 cds_lfht_resize_lazy_count(ht, size,
733 count >> (CHAIN_LEN_TARGET - 1));
734 }
735
736 static
737 void ht_count_del(struct cds_lfht *ht, unsigned long size, unsigned long hash)
738 {
739 unsigned long split_count, count;
740 int index;
741
742 if (caa_unlikely(!ht->split_count))
743 return;
744 index = ht_get_split_count_index(hash);
745 split_count = uatomic_add_return(&ht->split_count[index].del, 1);
746 if (caa_likely(split_count & ((1UL << COUNT_COMMIT_ORDER) - 1)))
747 return;
748 /* Only if number of deletes multiple of 1UL << COUNT_COMMIT_ORDER */
749
750 dbg_printf("del split count %lu\n", split_count);
751 count = uatomic_add_return(&ht->count,
752 -(1UL << COUNT_COMMIT_ORDER));
753 if (caa_likely(count & (count - 1)))
754 return;
755 /* Only if global count is power of 2 */
756
757 if ((count >> CHAIN_LEN_RESIZE_THRESHOLD) >= size)
758 return;
759 dbg_printf("del set global %lu\n", count);
760 /*
761 * Don't shrink table if the number of nodes is below a
762 * certain threshold.
763 */
764 if (count < (1UL << COUNT_COMMIT_ORDER) * (split_count_mask + 1))
765 return;
766 cds_lfht_resize_lazy_count(ht, size,
767 count >> (CHAIN_LEN_TARGET - 1));
768 }
769
770 static
771 void check_resize(struct cds_lfht *ht, unsigned long size, uint32_t chain_len)
772 {
773 unsigned long count;
774
775 if (!(ht->flags & CDS_LFHT_AUTO_RESIZE))
776 return;
777 count = uatomic_read(&ht->count);
778 /*
779 * Use bucket-local length for small table expand and for
780 * environments lacking per-cpu data support.
781 */
782 if (count >= (1UL << (COUNT_COMMIT_ORDER + split_count_order)))
783 return;
784 if (chain_len > 100)
785 dbg_printf("WARNING: large chain length: %u.\n",
786 chain_len);
787 if (chain_len >= CHAIN_LEN_RESIZE_THRESHOLD) {
788 int growth;
789
790 /*
791 * Ideal growth calculated based on chain length.
792 */
793 growth = cds_lfht_get_count_order_u32(chain_len
794 - (CHAIN_LEN_TARGET - 1));
795 if ((ht->flags & CDS_LFHT_ACCOUNTING)
796 && (size << growth)
797 >= (1UL << (COUNT_COMMIT_ORDER
798 + split_count_order))) {
799 /*
800 * If ideal growth expands the hash table size
801 * beyond the "small hash table" sizes, use the
802 * maximum small hash table size to attempt
803 * expanding the hash table. This only applies
804 * when node accounting is available, otherwise
805 * the chain length is used to expand the hash
806 * table in every case.
807 */
808 growth = COUNT_COMMIT_ORDER + split_count_order
809 - cds_lfht_get_count_order_ulong(size);
810 if (growth <= 0)
811 return;
812 }
813 cds_lfht_resize_lazy_grow(ht, size, growth);
814 }
815 }
816
817 static
818 struct cds_lfht_node *clear_flag(struct cds_lfht_node *node)
819 {
820 return (struct cds_lfht_node *) (((unsigned long) node) & ~FLAGS_MASK);
821 }
822
823 static
824 int is_removed(const struct cds_lfht_node *node)
825 {
826 return ((unsigned long) node) & REMOVED_FLAG;
827 }
828
829 static
830 int is_bucket(struct cds_lfht_node *node)
831 {
832 return ((unsigned long) node) & BUCKET_FLAG;
833 }
834
835 static
836 struct cds_lfht_node *flag_bucket(struct cds_lfht_node *node)
837 {
838 return (struct cds_lfht_node *) (((unsigned long) node) | BUCKET_FLAG);
839 }
840
841 static
842 int is_removal_owner(struct cds_lfht_node *node)
843 {
844 return ((unsigned long) node) & REMOVAL_OWNER_FLAG;
845 }
846
847 static
848 struct cds_lfht_node *flag_removed(struct cds_lfht_node *node)
849 {
850 return (struct cds_lfht_node *) (((unsigned long) node) | REMOVED_FLAG);
851 }
852
853 static
854 struct cds_lfht_node *flag_removal_owner(struct cds_lfht_node *node)
855 {
856 return (struct cds_lfht_node *) (((unsigned long) node) | REMOVAL_OWNER_FLAG);
857 }
858
859 static
860 struct cds_lfht_node *flag_removed_or_removal_owner(struct cds_lfht_node *node)
861 {
862 return (struct cds_lfht_node *) (((unsigned long) node) | REMOVED_FLAG | REMOVAL_OWNER_FLAG);
863 }
864
865 static
866 struct cds_lfht_node *get_end(void)
867 {
868 return (struct cds_lfht_node *) END_VALUE;
869 }
870
871 static
872 int is_end(struct cds_lfht_node *node)
873 {
874 return clear_flag(node) == (struct cds_lfht_node *) END_VALUE;
875 }
876
877 static
878 unsigned long _uatomic_xchg_monotonic_increase(unsigned long *ptr,
879 unsigned long v)
880 {
881 unsigned long old1, old2;
882
883 old1 = uatomic_read(ptr);
884 do {
885 old2 = old1;
886 if (old2 >= v)
887 return old2;
888 } while ((old1 = uatomic_cmpxchg(ptr, old2, v)) != old2);
889 return old2;
890 }
891
892 static
893 void cds_lfht_alloc_bucket_table(struct cds_lfht *ht, unsigned long order)
894 {
895 return ht->mm->alloc_bucket_table(ht, order);
896 }
897
898 /*
899 * cds_lfht_free_bucket_table() should be called with decreasing order.
900 * When cds_lfht_free_bucket_table(0) is called, it means the whole
901 * lfht is destroyed.
902 */
903 static
904 void cds_lfht_free_bucket_table(struct cds_lfht *ht, unsigned long order)
905 {
906 return ht->mm->free_bucket_table(ht, order);
907 }
908
909 static inline
910 struct cds_lfht_node *bucket_at(struct cds_lfht *ht, unsigned long index)
911 {
912 return ht->bucket_at(ht, index);
913 }
914
915 static inline
916 struct cds_lfht_node *lookup_bucket(struct cds_lfht *ht, unsigned long size,
917 unsigned long hash)
918 {
919 urcu_posix_assert(size > 0);
920 return bucket_at(ht, hash & (size - 1));
921 }
922
923 /*
924 * Remove all logically deleted nodes from a bucket up to a certain node key.
925 */
926 static
927 void _cds_lfht_gc_bucket(struct cds_lfht_node *bucket, struct cds_lfht_node *node)
928 {
929 struct cds_lfht_node *iter_prev, *iter, *next, *new_next;
930
931 urcu_posix_assert(!is_bucket(bucket));
932 urcu_posix_assert(!is_removed(bucket));
933 urcu_posix_assert(!is_removal_owner(bucket));
934 urcu_posix_assert(!is_bucket(node));
935 urcu_posix_assert(!is_removed(node));
936 urcu_posix_assert(!is_removal_owner(node));
937 for (;;) {
938 iter_prev = bucket;
939 /* We can always skip the bucket node initially */
940 iter = rcu_dereference(iter_prev->next);
941 urcu_posix_assert(!is_removed(iter));
942 urcu_posix_assert(!is_removal_owner(iter));
943 urcu_posix_assert(iter_prev->reverse_hash <= node->reverse_hash);
944 /*
945 * We should never be called with bucket (start of chain)
946 * and logically removed node (end of path compression
947 * marker) being the actual same node. This would be a
948 * bug in the algorithm implementation.
949 */
950 urcu_posix_assert(bucket != node);
951 for (;;) {
952 if (caa_unlikely(is_end(iter)))
953 return;
954 if (caa_likely(clear_flag(iter)->reverse_hash > node->reverse_hash))
955 return;
956 next = rcu_dereference(clear_flag(iter)->next);
957 if (caa_likely(is_removed(next)))
958 break;
959 iter_prev = clear_flag(iter);
960 iter = next;
961 }
962 urcu_posix_assert(!is_removed(iter));
963 urcu_posix_assert(!is_removal_owner(iter));
964 if (is_bucket(iter))
965 new_next = flag_bucket(clear_flag(next));
966 else
967 new_next = clear_flag(next);
968 (void) uatomic_cmpxchg(&iter_prev->next, iter, new_next);
969 }
970 }
971
972 static
973 int _cds_lfht_replace(struct cds_lfht *ht, unsigned long size,
974 struct cds_lfht_node *old_node,
975 struct cds_lfht_node *old_next,
976 struct cds_lfht_node *new_node)
977 {
978 struct cds_lfht_node *bucket, *ret_next;
979
980 if (!old_node) /* Return -ENOENT if asked to replace NULL node */
981 return -ENOENT;
982
983 urcu_posix_assert(!is_removed(old_node));
984 urcu_posix_assert(!is_removal_owner(old_node));
985 urcu_posix_assert(!is_bucket(old_node));
986 urcu_posix_assert(!is_removed(new_node));
987 urcu_posix_assert(!is_removal_owner(new_node));
988 urcu_posix_assert(!is_bucket(new_node));
989 urcu_posix_assert(new_node != old_node);
990 for (;;) {
991 /* Insert after node to be replaced */
992 if (is_removed(old_next)) {
993 /*
994 * Too late, the old node has been removed under us
995 * between lookup and replace. Fail.
996 */
997 return -ENOENT;
998 }
999 urcu_posix_assert(old_next == clear_flag(old_next));
1000 urcu_posix_assert(new_node != old_next);
1001 /*
1002 * REMOVAL_OWNER flag is _NEVER_ set before the REMOVED
1003 * flag. It is either set atomically at the same time
1004 * (replace) or after (del).
1005 */
1006 urcu_posix_assert(!is_removal_owner(old_next));
1007 new_node->next = old_next;
1008 /*
1009 * Here is the whole trick for lock-free replace: we add
1010 * the replacement node _after_ the node we want to
1011 * replace by atomically setting its next pointer at the
1012 * same time we set its removal flag. Given that
1013 * the lookups/get next use an iterator aware of the
1014 * next pointer, they will either skip the old node due
1015 * to the removal flag and see the new node, or use
1016 * the old node, but will not see the new one.
1017 * This is a replacement of a node with another node
1018 * that has the same value: we are therefore not
1019 * removing a value from the hash table. We set both the
1020 * REMOVED and REMOVAL_OWNER flags atomically so we own
1021 * the node after successful cmpxchg.
1022 */
1023 ret_next = uatomic_cmpxchg(&old_node->next,
1024 old_next, flag_removed_or_removal_owner(new_node));
1025 if (ret_next == old_next)
1026 break; /* We performed the replacement. */
1027 old_next = ret_next;
1028 }
1029
1030 /*
1031 * Ensure that the old node is not visible to readers anymore:
1032 * lookup for the node, and remove it (along with any other
1033 * logically removed node) if found.
1034 */
1035 bucket = lookup_bucket(ht, size, bit_reverse_ulong(old_node->reverse_hash));
1036 _cds_lfht_gc_bucket(bucket, new_node);
1037
1038 urcu_posix_assert(is_removed(CMM_LOAD_SHARED(old_node->next)));
1039 return 0;
1040 }
1041
1042 /*
1043 * A non-NULL unique_ret pointer uses the "add unique" (or uniquify) add
1044 * mode. A NULL unique_ret allows creation of duplicate keys.
1045 */
1046 static
1047 void _cds_lfht_add(struct cds_lfht *ht,
1048 unsigned long hash,
1049 cds_lfht_match_fct match,
1050 const void *key,
1051 unsigned long size,
1052 struct cds_lfht_node *node,
1053 struct cds_lfht_iter *unique_ret,
1054 int bucket_flag)
1055 {
1056 struct cds_lfht_node *iter_prev, *iter, *next, *new_node, *new_next,
1057 *return_node;
1058 struct cds_lfht_node *bucket;
1059
1060 urcu_posix_assert(!is_bucket(node));
1061 urcu_posix_assert(!is_removed(node));
1062 urcu_posix_assert(!is_removal_owner(node));
1063 bucket = lookup_bucket(ht, size, hash);
1064 for (;;) {
1065 uint32_t chain_len = 0;
1066
1067 /*
1068 * iter_prev points to the non-removed node prior to the
1069 * insert location.
1070 */
1071 iter_prev = bucket;
1072 /* We can always skip the bucket node initially */
1073 iter = rcu_dereference(iter_prev->next);
1074 urcu_posix_assert(iter_prev->reverse_hash <= node->reverse_hash);
1075 for (;;) {
1076 if (caa_unlikely(is_end(iter)))
1077 goto insert;
1078 if (caa_likely(clear_flag(iter)->reverse_hash > node->reverse_hash))
1079 goto insert;
1080
1081 /* bucket node is the first node of the identical-hash-value chain */
1082 if (bucket_flag && clear_flag(iter)->reverse_hash == node->reverse_hash)
1083 goto insert;
1084
1085 next = rcu_dereference(clear_flag(iter)->next);
1086 if (caa_unlikely(is_removed(next)))
1087 goto gc_node;
1088
1089 /* uniquely add */
1090 if (unique_ret
1091 && !is_bucket(next)
1092 && clear_flag(iter)->reverse_hash == node->reverse_hash) {
1093 struct cds_lfht_iter d_iter = {
1094 .node = node,
1095 .next = iter,
1096 #ifdef CONFIG_CDS_LFHT_ITER_DEBUG
1097 .lfht = ht,
1098 #endif
1099 };
1100
1101 /*
1102 * uniquely adding inserts the node as the first
1103 * node of the identical-hash-value node chain.
1104 *
1105 * This semantic ensures no duplicated keys
1106 * should ever be observable in the table
1107 * (including traversing the table node by
1108 * node by forward iterations)
1109 */
1110 cds_lfht_next_duplicate(ht, match, key, &d_iter);
1111 if (!d_iter.node)
1112 goto insert;
1113
1114 *unique_ret = d_iter;
1115 return;
1116 }
1117
1118 /* Only account for identical reverse hash once */
1119 if (iter_prev->reverse_hash != clear_flag(iter)->reverse_hash
1120 && !is_bucket(next))
1121 check_resize(ht, size, ++chain_len);
1122 iter_prev = clear_flag(iter);
1123 iter = next;
1124 }
1125
1126 insert:
1127 urcu_posix_assert(node != clear_flag(iter));
1128 urcu_posix_assert(!is_removed(iter_prev));
1129 urcu_posix_assert(!is_removal_owner(iter_prev));
1130 urcu_posix_assert(!is_removed(iter));
1131 urcu_posix_assert(!is_removal_owner(iter));
1132 urcu_posix_assert(iter_prev != node);
1133 if (!bucket_flag)
1134 node->next = clear_flag(iter);
1135 else
1136 node->next = flag_bucket(clear_flag(iter));
1137 if (is_bucket(iter))
1138 new_node = flag_bucket(node);
1139 else
1140 new_node = node;
1141 if (uatomic_cmpxchg(&iter_prev->next, iter,
1142 new_node) != iter) {
1143 continue; /* retry */
1144 } else {
1145 return_node = node;
1146 goto end;
1147 }
1148
1149 gc_node:
1150 urcu_posix_assert(!is_removed(iter));
1151 urcu_posix_assert(!is_removal_owner(iter));
1152 if (is_bucket(iter))
1153 new_next = flag_bucket(clear_flag(next));
1154 else
1155 new_next = clear_flag(next);
1156 (void) uatomic_cmpxchg(&iter_prev->next, iter, new_next);
1157 /* retry */
1158 }
1159 end:
1160 if (unique_ret) {
1161 unique_ret->node = return_node;
1162 /* unique_ret->next left unset, never used. */
1163 }
1164 }
1165
1166 static
1167 int _cds_lfht_del(struct cds_lfht *ht, unsigned long size,
1168 struct cds_lfht_node *node)
1169 {
1170 struct cds_lfht_node *bucket, *next;
1171
1172 if (!node) /* Return -ENOENT if asked to delete NULL node */
1173 return -ENOENT;
1174
1175 /* logically delete the node */
1176 urcu_posix_assert(!is_bucket(node));
1177 urcu_posix_assert(!is_removed(node));
1178 urcu_posix_assert(!is_removal_owner(node));
1179
1180 /*
1181 * We are first checking if the node had previously been
1182 * logically removed (this check is not atomic with setting the
1183 * logical removal flag). Return -ENOENT if the node had
1184 * previously been removed.
1185 */
1186 next = CMM_LOAD_SHARED(node->next); /* next is not dereferenced */
1187 if (caa_unlikely(is_removed(next)))
1188 return -ENOENT;
1189 urcu_posix_assert(!is_bucket(next));
1190 /*
1191 * The del operation semantic guarantees a full memory barrier
1192 * before the uatomic_or atomic commit of the deletion flag.
1193 */
1194 cmm_smp_mb__before_uatomic_or();
1195 /*
1196 * We set the REMOVED_FLAG unconditionally. Note that there may
1197 * be more than one concurrent thread setting this flag.
1198 * Knowing which wins the race will be known after the garbage
1199 * collection phase, stay tuned!
1200 */
1201 uatomic_or(&node->next, REMOVED_FLAG);
1202 /* We performed the (logical) deletion. */
1203
1204 /*
1205 * Ensure that the node is not visible to readers anymore: lookup for
1206 * the node, and remove it (along with any other logically removed node)
1207 * if found.
1208 */
1209 bucket = lookup_bucket(ht, size, bit_reverse_ulong(node->reverse_hash));
1210 _cds_lfht_gc_bucket(bucket, node);
1211
1212 urcu_posix_assert(is_removed(CMM_LOAD_SHARED(node->next)));
1213 /*
1214 * Last phase: atomically exchange node->next with a version
1215 * having "REMOVAL_OWNER_FLAG" set. If the returned node->next
1216 * pointer did _not_ have "REMOVAL_OWNER_FLAG" set, we now own
1217 * the node and win the removal race.
1218 * It is interesting to note that all "add" paths are forbidden
1219 * to change the next pointer starting from the point where the
1220 * REMOVED_FLAG is set, so here using a read, followed by a
1221 * xchg() suffice to guarantee that the xchg() will ever only
1222 * set the "REMOVAL_OWNER_FLAG" (or change nothing if the flag
1223 * was already set).
1224 */
1225 if (!is_removal_owner(uatomic_xchg(&node->next,
1226 flag_removal_owner(node->next))))
1227 return 0;
1228 else
1229 return -ENOENT;
1230 }
1231
1232 static
1233 void *partition_resize_thread(void *arg)
1234 {
1235 struct partition_resize_work *work = arg;
1236
1237 work->ht->flavor->register_thread();
1238 work->fct(work->ht, work->i, work->start, work->len);
1239 work->ht->flavor->unregister_thread();
1240 return NULL;
1241 }
1242
1243 static
1244 void partition_resize_helper(struct cds_lfht *ht, unsigned long i,
1245 unsigned long len,
1246 void (*fct)(struct cds_lfht *ht, unsigned long i,
1247 unsigned long start, unsigned long len))
1248 {
1249 unsigned long partition_len, start = 0;
1250 struct partition_resize_work *work;
1251 int ret;
1252 unsigned long thread, nr_threads;
1253 sigset_t newmask, oldmask;
1254
1255 urcu_posix_assert(nr_cpus_mask != -1);
1256 if (nr_cpus_mask < 0 || len < 2 * MIN_PARTITION_PER_THREAD)
1257 goto fallback;
1258
1259 /*
1260 * Note: nr_cpus_mask + 1 is always power of 2.
1261 * We spawn just the number of threads we need to satisfy the minimum
1262 * partition size, up to the number of CPUs in the system.
1263 */
1264 if (nr_cpus_mask > 0) {
1265 nr_threads = min_t(unsigned long, nr_cpus_mask + 1,
1266 len >> MIN_PARTITION_PER_THREAD_ORDER);
1267 } else {
1268 nr_threads = 1;
1269 }
1270 partition_len = len >> cds_lfht_get_count_order_ulong(nr_threads);
1271 work = calloc(nr_threads, sizeof(*work));
1272 if (!work) {
1273 dbg_printf("error allocating for resize, single-threading\n");
1274 goto fallback;
1275 }
1276
1277 ret = sigfillset(&newmask);
1278 urcu_posix_assert(!ret);
1279 ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask);
1280 urcu_posix_assert(!ret);
1281
1282 for (thread = 0; thread < nr_threads; thread++) {
1283 work[thread].ht = ht;
1284 work[thread].i = i;
1285 work[thread].len = partition_len;
1286 work[thread].start = thread * partition_len;
1287 work[thread].fct = fct;
1288 ret = pthread_create(&(work[thread].thread_id),
1289 ht->caller_resize_attr ? &ht->resize_attr : NULL,
1290 partition_resize_thread, &work[thread]);
1291 if (ret == EAGAIN) {
1292 /*
1293 * Out of resources: wait and join the threads
1294 * we've created, then handle leftovers.
1295 */
1296 dbg_printf("error spawning for resize, single-threading\n");
1297 start = work[thread].start;
1298 len -= start;
1299 nr_threads = thread;
1300 break;
1301 }
1302 urcu_posix_assert(!ret);
1303 }
1304
1305 ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
1306 urcu_posix_assert(!ret);
1307
1308 for (thread = 0; thread < nr_threads; thread++) {
1309 ret = pthread_join(work[thread].thread_id, NULL);
1310 urcu_posix_assert(!ret);
1311 }
1312 free(work);
1313
1314 /*
1315 * A pthread_create failure above will either lead in us having
1316 * no threads to join or starting at a non-zero offset,
1317 * fallback to single thread processing of leftovers.
1318 */
1319 if (start == 0 && nr_threads > 0)
1320 return;
1321 fallback:
1322 fct(ht, i, start, len);
1323 }
1324
1325 /*
1326 * Holding RCU read lock to protect _cds_lfht_add against memory
1327 * reclaim that could be performed by other worker threads (ABA
1328 * problem).
1329 *
1330 * When we reach a certain length, we can split this population phase over
1331 * many worker threads, based on the number of CPUs available in the system.
1332 * This should therefore take care of not having the expand lagging behind too
1333 * many concurrent insertion threads by using the scheduler's ability to
1334 * schedule bucket node population fairly with insertions.
1335 */
1336 static
1337 void init_table_populate_partition(struct cds_lfht *ht, unsigned long i,
1338 unsigned long start, unsigned long len)
1339 {
1340 unsigned long j, size = 1UL << (i - 1);
1341
1342 urcu_posix_assert(i > MIN_TABLE_ORDER);
1343 ht->flavor->read_lock();
1344 for (j = size + start; j < size + start + len; j++) {
1345 struct cds_lfht_node *new_node = bucket_at(ht, j);
1346
1347 urcu_posix_assert(j >= size && j < (size << 1));
1348 dbg_printf("init populate: order %lu index %lu hash %lu\n",
1349 i, j, j);
1350 new_node->reverse_hash = bit_reverse_ulong(j);
1351 _cds_lfht_add(ht, j, NULL, NULL, size, new_node, NULL, 1);
1352 }
1353 ht->flavor->read_unlock();
1354 }
1355
1356 static
1357 void init_table_populate(struct cds_lfht *ht, unsigned long i,
1358 unsigned long len)
1359 {
1360 partition_resize_helper(ht, i, len, init_table_populate_partition);
1361 }
1362
1363 static
1364 void init_table(struct cds_lfht *ht,
1365 unsigned long first_order, unsigned long last_order)
1366 {
1367 unsigned long i;
1368
1369 dbg_printf("init table: first_order %lu last_order %lu\n",
1370 first_order, last_order);
1371 urcu_posix_assert(first_order > MIN_TABLE_ORDER);
1372 for (i = first_order; i <= last_order; i++) {
1373 unsigned long len;
1374
1375 len = 1UL << (i - 1);
1376 dbg_printf("init order %lu len: %lu\n", i, len);
1377
1378 /* Stop expand if the resize target changes under us */
1379 if (CMM_LOAD_SHARED(ht->resize_target) < (1UL << i))
1380 break;
1381
1382 cds_lfht_alloc_bucket_table(ht, i);
1383
1384 /*
1385 * Set all bucket nodes reverse hash values for a level and
1386 * link all bucket nodes into the table.
1387 */
1388 init_table_populate(ht, i, len);
1389
1390 /*
1391 * Update table size.
1392 */
1393 cmm_smp_wmb(); /* populate data before RCU size */
1394 CMM_STORE_SHARED(ht->size, 1UL << i);
1395
1396 dbg_printf("init new size: %lu\n", 1UL << i);
1397 if (CMM_LOAD_SHARED(ht->in_progress_destroy))
1398 break;
1399 }
1400 }
1401
1402 /*
1403 * Holding RCU read lock to protect _cds_lfht_remove against memory
1404 * reclaim that could be performed by other worker threads (ABA
1405 * problem).
1406 * For a single level, we logically remove and garbage collect each node.
1407 *
1408 * As a design choice, we perform logical removal and garbage collection on a
1409 * node-per-node basis to simplify this algorithm. We also assume keeping good
1410 * cache locality of the operation would overweight possible performance gain
1411 * that could be achieved by batching garbage collection for multiple levels.
1412 * However, this would have to be justified by benchmarks.
1413 *
1414 * Concurrent removal and add operations are helping us perform garbage
1415 * collection of logically removed nodes. We guarantee that all logically
1416 * removed nodes have been garbage-collected (unlinked) before work
1417 * enqueue is invoked to free a hole level of bucket nodes (after a
1418 * grace period).
1419 *
1420 * Logical removal and garbage collection can therefore be done in batch
1421 * or on a node-per-node basis, as long as the guarantee above holds.
1422 *
1423 * When we reach a certain length, we can split this removal over many worker
1424 * threads, based on the number of CPUs available in the system. This should
1425 * take care of not letting resize process lag behind too many concurrent
1426 * updater threads actively inserting into the hash table.
1427 */
1428 static
1429 void remove_table_partition(struct cds_lfht *ht, unsigned long i,
1430 unsigned long start, unsigned long len)
1431 {
1432 unsigned long j, size = 1UL << (i - 1);
1433
1434 urcu_posix_assert(i > MIN_TABLE_ORDER);
1435 ht->flavor->read_lock();
1436 for (j = size + start; j < size + start + len; j++) {
1437 struct cds_lfht_node *fini_bucket = bucket_at(ht, j);
1438 struct cds_lfht_node *parent_bucket = bucket_at(ht, j - size);
1439
1440 urcu_posix_assert(j >= size && j < (size << 1));
1441 dbg_printf("remove entry: order %lu index %lu hash %lu\n",
1442 i, j, j);
1443 /* Set the REMOVED_FLAG to freeze the ->next for gc */
1444 uatomic_or(&fini_bucket->next, REMOVED_FLAG);
1445 _cds_lfht_gc_bucket(parent_bucket, fini_bucket);
1446 }
1447 ht->flavor->read_unlock();
1448 }
1449
1450 static
1451 void remove_table(struct cds_lfht *ht, unsigned long i, unsigned long len)
1452 {
1453 partition_resize_helper(ht, i, len, remove_table_partition);
1454 }
1455
1456 /*
1457 * fini_table() is never called for first_order == 0, which is why
1458 * free_by_rcu_order == 0 can be used as criterion to know if free must
1459 * be called.
1460 */
1461 static
1462 void fini_table(struct cds_lfht *ht,
1463 unsigned long first_order, unsigned long last_order)
1464 {
1465 unsigned long free_by_rcu_order = 0, i;
1466
1467 dbg_printf("fini table: first_order %lu last_order %lu\n",
1468 first_order, last_order);
1469 urcu_posix_assert(first_order > MIN_TABLE_ORDER);
1470 for (i = last_order; i >= first_order; i--) {
1471 unsigned long len;
1472
1473 len = 1UL << (i - 1);
1474 dbg_printf("fini order %ld len: %lu\n", i, len);
1475
1476 /* Stop shrink if the resize target changes under us */
1477 if (CMM_LOAD_SHARED(ht->resize_target) > (1UL << (i - 1)))
1478 break;
1479
1480 cmm_smp_wmb(); /* populate data before RCU size */
1481 CMM_STORE_SHARED(ht->size, 1UL << (i - 1));
1482
1483 /*
1484 * We need to wait for all add operations to reach Q.S. (and
1485 * thus use the new table for lookups) before we can start
1486 * releasing the old bucket nodes. Otherwise their lookup will
1487 * return a logically removed node as insert position.
1488 */
1489 ht->flavor->update_synchronize_rcu();
1490 if (free_by_rcu_order)
1491 cds_lfht_free_bucket_table(ht, free_by_rcu_order);
1492
1493 /*
1494 * Set "removed" flag in bucket nodes about to be removed.
1495 * Unlink all now-logically-removed bucket node pointers.
1496 * Concurrent add/remove operation are helping us doing
1497 * the gc.
1498 */
1499 remove_table(ht, i, len);
1500
1501 free_by_rcu_order = i;
1502
1503 dbg_printf("fini new size: %lu\n", 1UL << i);
1504 if (CMM_LOAD_SHARED(ht->in_progress_destroy))
1505 break;
1506 }
1507
1508 if (free_by_rcu_order) {
1509 ht->flavor->update_synchronize_rcu();
1510 cds_lfht_free_bucket_table(ht, free_by_rcu_order);
1511 }
1512 }
1513
1514 /*
1515 * Never called with size < 1.
1516 */
1517 static
1518 void cds_lfht_create_bucket(struct cds_lfht *ht, unsigned long size)
1519 {
1520 struct cds_lfht_node *prev, *node;
1521 unsigned long order, len, i;
1522 int bucket_order;
1523
1524 cds_lfht_alloc_bucket_table(ht, 0);
1525
1526 dbg_printf("create bucket: order 0 index 0 hash 0\n");
1527 node = bucket_at(ht, 0);
1528 node->next = flag_bucket(get_end());
1529 node->reverse_hash = 0;
1530
1531 bucket_order = cds_lfht_get_count_order_ulong(size);
1532 urcu_posix_assert(bucket_order >= 0);
1533
1534 for (order = 1; order < (unsigned long) bucket_order + 1; order++) {
1535 len = 1UL << (order - 1);
1536 cds_lfht_alloc_bucket_table(ht, order);
1537
1538 for (i = 0; i < len; i++) {
1539 /*
1540 * Now, we are trying to init the node with the
1541 * hash=(len+i) (which is also a bucket with the
1542 * index=(len+i)) and insert it into the hash table,
1543 * so this node has to be inserted after the bucket
1544 * with the index=(len+i)&(len-1)=i. And because there
1545 * is no other non-bucket node nor bucket node with
1546 * larger index/hash inserted, so the bucket node
1547 * being inserted should be inserted directly linked
1548 * after the bucket node with index=i.
1549 */
1550 prev = bucket_at(ht, i);
1551 node = bucket_at(ht, len + i);
1552
1553 dbg_printf("create bucket: order %lu index %lu hash %lu\n",
1554 order, len + i, len + i);
1555 node->reverse_hash = bit_reverse_ulong(len + i);
1556
1557 /* insert after prev */
1558 urcu_posix_assert(is_bucket(prev->next));
1559 node->next = prev->next;
1560 prev->next = flag_bucket(node);
1561 }
1562 }
1563 }
1564
1565 #if (CAA_BITS_PER_LONG > 32)
1566 /*
1567 * For 64-bit architectures, with max number of buckets small enough not to
1568 * use the entire 64-bit memory mapping space (and allowing a fair number of
1569 * hash table instances), use the mmap allocator, which is faster. Otherwise,
1570 * fallback to the order allocator.
1571 */
1572 static
1573 const struct cds_lfht_mm_type *get_mm_type(unsigned long max_nr_buckets)
1574 {
1575 if (max_nr_buckets && max_nr_buckets <= (1ULL << 32))
1576 return &cds_lfht_mm_mmap;
1577 else
1578 return &cds_lfht_mm_order;
1579 }
1580 #else
1581 /*
1582 * For 32-bit architectures, use the order allocator.
1583 */
1584 static
1585 const struct cds_lfht_mm_type *get_mm_type(
1586 unsigned long max_nr_buckets __attribute__((unused)))
1587 {
1588 return &cds_lfht_mm_order;
1589 }
1590 #endif
1591
1592 void cds_lfht_node_init_deleted(struct cds_lfht_node *node)
1593 {
1594 cds_lfht_node_init(node);
1595 node->next = flag_removed(NULL);
1596 }
1597
1598 struct cds_lfht *_cds_lfht_new(unsigned long init_size,
1599 unsigned long min_nr_alloc_buckets,
1600 unsigned long max_nr_buckets,
1601 int flags,
1602 const struct cds_lfht_mm_type *mm,
1603 const struct rcu_flavor_struct *flavor,
1604 pthread_attr_t *attr)
1605 {
1606 struct cds_lfht *ht;
1607 unsigned long order;
1608
1609 /* min_nr_alloc_buckets must be power of two */
1610 if (!min_nr_alloc_buckets || (min_nr_alloc_buckets & (min_nr_alloc_buckets - 1)))
1611 return NULL;
1612
1613 /* init_size must be power of two */
1614 if (!init_size || (init_size & (init_size - 1)))
1615 return NULL;
1616
1617 /*
1618 * Memory management plugin default.
1619 */
1620 if (!mm)
1621 mm = get_mm_type(max_nr_buckets);
1622
1623 /* max_nr_buckets == 0 for order based mm means infinite */
1624 if (mm == &cds_lfht_mm_order && !max_nr_buckets)
1625 max_nr_buckets = 1UL << (MAX_TABLE_ORDER - 1);
1626
1627 /* max_nr_buckets must be power of two */
1628 if (!max_nr_buckets || (max_nr_buckets & (max_nr_buckets - 1)))
1629 return NULL;
1630
1631 if (flags & CDS_LFHT_AUTO_RESIZE)
1632 cds_lfht_init_worker(flavor);
1633
1634 min_nr_alloc_buckets = max(min_nr_alloc_buckets, MIN_TABLE_SIZE);
1635 init_size = max(init_size, MIN_TABLE_SIZE);
1636 max_nr_buckets = max(max_nr_buckets, min_nr_alloc_buckets);
1637 init_size = min(init_size, max_nr_buckets);
1638
1639 ht = mm->alloc_cds_lfht(min_nr_alloc_buckets, max_nr_buckets);
1640 urcu_posix_assert(ht);
1641 urcu_posix_assert(ht->mm == mm);
1642 urcu_posix_assert(ht->bucket_at == mm->bucket_at);
1643
1644 ht->flags = flags;
1645 ht->flavor = flavor;
1646 ht->caller_resize_attr = attr;
1647 if (attr)
1648 ht->resize_attr = *attr;
1649 alloc_split_items_count(ht);
1650 /* this mutex should not nest in read-side C.S. */
1651 pthread_mutex_init(&ht->resize_mutex, NULL);
1652 order = cds_lfht_get_count_order_ulong(init_size);
1653 ht->resize_target = 1UL << order;
1654 cds_lfht_create_bucket(ht, 1UL << order);
1655 ht->size = 1UL << order;
1656 return ht;
1657 }
1658
1659 void cds_lfht_lookup(struct cds_lfht *ht, unsigned long hash,
1660 cds_lfht_match_fct match, const void *key,
1661 struct cds_lfht_iter *iter)
1662 {
1663 struct cds_lfht_node *node, *next, *bucket;
1664 unsigned long reverse_hash, size;
1665
1666 cds_lfht_iter_debug_set_ht(ht, iter);
1667
1668 reverse_hash = bit_reverse_ulong(hash);
1669
1670 size = rcu_dereference(ht->size);
1671 bucket = lookup_bucket(ht, size, hash);
1672 /* We can always skip the bucket node initially */
1673 node = rcu_dereference(bucket->next);
1674 node = clear_flag(node);
1675 for (;;) {
1676 if (caa_unlikely(is_end(node))) {
1677 node = next = NULL;
1678 break;
1679 }
1680 if (caa_unlikely(node->reverse_hash > reverse_hash)) {
1681 node = next = NULL;
1682 break;
1683 }
1684 next = rcu_dereference(node->next);
1685 urcu_posix_assert(node == clear_flag(node));
1686 if (caa_likely(!is_removed(next))
1687 && !is_bucket(next)
1688 && node->reverse_hash == reverse_hash
1689 && caa_likely(match(node, key))) {
1690 break;
1691 }
1692 node = clear_flag(next);
1693 }
1694 urcu_posix_assert(!node || !is_bucket(CMM_LOAD_SHARED(node->next)));
1695 iter->node = node;
1696 iter->next = next;
1697 }
1698
1699 void cds_lfht_next_duplicate(struct cds_lfht *ht __attribute__((unused)),
1700 cds_lfht_match_fct match,
1701 const void *key, struct cds_lfht_iter *iter)
1702 {
1703 struct cds_lfht_node *node, *next;
1704 unsigned long reverse_hash;
1705
1706 cds_lfht_iter_debug_assert(ht == iter->lfht);
1707 node = iter->node;
1708 reverse_hash = node->reverse_hash;
1709 next = iter->next;
1710 node = clear_flag(next);
1711
1712 for (;;) {
1713 if (caa_unlikely(is_end(node))) {
1714 node = next = NULL;
1715 break;
1716 }
1717 if (caa_unlikely(node->reverse_hash > reverse_hash)) {
1718 node = next = NULL;
1719 break;
1720 }
1721 next = rcu_dereference(node->next);
1722 if (caa_likely(!is_removed(next))
1723 && !is_bucket(next)
1724 && caa_likely(match(node, key))) {
1725 break;
1726 }
1727 node = clear_flag(next);
1728 }
1729 urcu_posix_assert(!node || !is_bucket(CMM_LOAD_SHARED(node->next)));
1730 iter->node = node;
1731 iter->next = next;
1732 }
1733
1734 void cds_lfht_next(struct cds_lfht *ht __attribute__((unused)),
1735 struct cds_lfht_iter *iter)
1736 {
1737 struct cds_lfht_node *node, *next;
1738
1739 cds_lfht_iter_debug_assert(ht == iter->lfht);
1740 node = clear_flag(iter->next);
1741 for (;;) {
1742 if (caa_unlikely(is_end(node))) {
1743 node = next = NULL;
1744 break;
1745 }
1746 next = rcu_dereference(node->next);
1747 if (caa_likely(!is_removed(next))
1748 && !is_bucket(next)) {
1749 break;
1750 }
1751 node = clear_flag(next);
1752 }
1753 urcu_posix_assert(!node || !is_bucket(CMM_LOAD_SHARED(node->next)));
1754 iter->node = node;
1755 iter->next = next;
1756 }
1757
1758 void cds_lfht_first(struct cds_lfht *ht, struct cds_lfht_iter *iter)
1759 {
1760 cds_lfht_iter_debug_set_ht(ht, iter);
1761 /*
1762 * Get next after first bucket node. The first bucket node is the
1763 * first node of the linked list.
1764 */
1765 iter->next = bucket_at(ht, 0)->next;
1766 cds_lfht_next(ht, iter);
1767 }
1768
1769 void cds_lfht_add(struct cds_lfht *ht, unsigned long hash,
1770 struct cds_lfht_node *node)
1771 {
1772 unsigned long size;
1773
1774 node->reverse_hash = bit_reverse_ulong(hash);
1775 size = rcu_dereference(ht->size);
1776 _cds_lfht_add(ht, hash, NULL, NULL, size, node, NULL, 0);
1777 ht_count_add(ht, size, hash);
1778 }
1779
1780 struct cds_lfht_node *cds_lfht_add_unique(struct cds_lfht *ht,
1781 unsigned long hash,
1782 cds_lfht_match_fct match,
1783 const void *key,
1784 struct cds_lfht_node *node)
1785 {
1786 unsigned long size;
1787 struct cds_lfht_iter iter;
1788
1789 node->reverse_hash = bit_reverse_ulong(hash);
1790 size = rcu_dereference(ht->size);
1791 _cds_lfht_add(ht, hash, match, key, size, node, &iter, 0);
1792 if (iter.node == node)
1793 ht_count_add(ht, size, hash);
1794 return iter.node;
1795 }
1796
1797 struct cds_lfht_node *cds_lfht_add_replace(struct cds_lfht *ht,
1798 unsigned long hash,
1799 cds_lfht_match_fct match,
1800 const void *key,
1801 struct cds_lfht_node *node)
1802 {
1803 unsigned long size;
1804 struct cds_lfht_iter iter;
1805
1806 node->reverse_hash = bit_reverse_ulong(hash);
1807 size = rcu_dereference(ht->size);
1808 for (;;) {
1809 _cds_lfht_add(ht, hash, match, key, size, node, &iter, 0);
1810 if (iter.node == node) {
1811 ht_count_add(ht, size, hash);
1812 return NULL;
1813 }
1814
1815 if (!_cds_lfht_replace(ht, size, iter.node, iter.next, node))
1816 return iter.node;
1817 }
1818 }
1819
1820 int cds_lfht_replace(struct cds_lfht *ht,
1821 struct cds_lfht_iter *old_iter,
1822 unsigned long hash,
1823 cds_lfht_match_fct match,
1824 const void *key,
1825 struct cds_lfht_node *new_node)
1826 {
1827 unsigned long size;
1828
1829 new_node->reverse_hash = bit_reverse_ulong(hash);
1830 if (!old_iter->node)
1831 return -ENOENT;
1832 if (caa_unlikely(old_iter->node->reverse_hash != new_node->reverse_hash))
1833 return -EINVAL;
1834 if (caa_unlikely(!match(old_iter->node, key)))
1835 return -EINVAL;
1836 size = rcu_dereference(ht->size);
1837 return _cds_lfht_replace(ht, size, old_iter->node, old_iter->next,
1838 new_node);
1839 }
1840
1841 int cds_lfht_del(struct cds_lfht *ht, struct cds_lfht_node *node)
1842 {
1843 unsigned long size;
1844 int ret;
1845
1846 size = rcu_dereference(ht->size);
1847 ret = _cds_lfht_del(ht, size, node);
1848 if (!ret) {
1849 unsigned long hash;
1850
1851 hash = bit_reverse_ulong(node->reverse_hash);
1852 ht_count_del(ht, size, hash);
1853 }
1854 return ret;
1855 }
1856
1857 int cds_lfht_is_node_deleted(const struct cds_lfht_node *node)
1858 {
1859 return is_removed(CMM_LOAD_SHARED(node->next));
1860 }
1861
1862 static
1863 bool cds_lfht_is_empty(struct cds_lfht *ht)
1864 {
1865 struct cds_lfht_node *node, *next;
1866 bool empty = true;
1867 bool was_online;
1868
1869 was_online = ht->flavor->read_ongoing();
1870 if (!was_online) {
1871 ht->flavor->thread_online();
1872 ht->flavor->read_lock();
1873 }
1874 /* Check that the table is empty */
1875 node = bucket_at(ht, 0);
1876 do {
1877 next = rcu_dereference(node->next);
1878 if (!is_bucket(next)) {
1879 empty = false;
1880 break;
1881 }
1882 node = clear_flag(next);
1883 } while (!is_end(node));
1884 if (!was_online) {
1885 ht->flavor->read_unlock();
1886 ht->flavor->thread_offline();
1887 }
1888 return empty;
1889 }
1890
1891 static
1892 int cds_lfht_delete_bucket(struct cds_lfht *ht)
1893 {
1894 struct cds_lfht_node *node;
1895 unsigned long order, i, size;
1896
1897 /* Check that the table is empty */
1898 node = bucket_at(ht, 0);
1899 do {
1900 node = clear_flag(node)->next;
1901 if (!is_bucket(node))
1902 return -EPERM;
1903 urcu_posix_assert(!is_removed(node));
1904 urcu_posix_assert(!is_removal_owner(node));
1905 } while (!is_end(node));
1906 /*
1907 * size accessed without rcu_dereference because hash table is
1908 * being destroyed.
1909 */
1910 size = ht->size;
1911 /* Internal sanity check: all nodes left should be buckets */
1912 for (i = 0; i < size; i++) {
1913 node = bucket_at(ht, i);
1914 dbg_printf("delete bucket: index %lu expected hash %lu hash %lu\n",
1915 i, i, bit_reverse_ulong(node->reverse_hash));
1916 urcu_posix_assert(is_bucket(node->next));
1917 }
1918
1919 for (order = cds_lfht_get_count_order_ulong(size); (long)order >= 0; order--)
1920 cds_lfht_free_bucket_table(ht, order);
1921
1922 return 0;
1923 }
1924
1925 static
1926 void do_auto_resize_destroy_cb(struct urcu_work *work)
1927 {
1928 struct cds_lfht *ht = caa_container_of(work, struct cds_lfht, destroy_work);
1929 int ret;
1930
1931 ht->flavor->register_thread();
1932 ret = cds_lfht_delete_bucket(ht);
1933 if (ret)
1934 urcu_die(-ret);
1935 free_split_items_count(ht);
1936 ret = pthread_mutex_destroy(&ht->resize_mutex);
1937 if (ret)
1938 urcu_die(ret);
1939 ht->flavor->unregister_thread();
1940 poison_free(ht);
1941 }
1942
1943 /*
1944 * Should only be called when no more concurrent readers nor writers can
1945 * possibly access the table.
1946 */
1947 int cds_lfht_destroy(struct cds_lfht *ht, pthread_attr_t **attr)
1948 {
1949 int ret;
1950
1951 if (ht->flags & CDS_LFHT_AUTO_RESIZE) {
1952 /*
1953 * Perform error-checking for emptiness before queuing
1954 * work, so we can return error to the caller. This runs
1955 * concurrently with ongoing resize.
1956 */
1957 if (!cds_lfht_is_empty(ht))
1958 return -EPERM;
1959 /* Cancel ongoing resize operations. */
1960 _CMM_STORE_SHARED(ht->in_progress_destroy, 1);
1961 if (attr) {
1962 *attr = ht->caller_resize_attr;
1963 ht->caller_resize_attr = NULL;
1964 }
1965 /*
1966 * Queue destroy work after prior queued resize
1967 * operations. Given there are no concurrent writers
1968 * accessing the hash table at this point, no resize
1969 * operations can be queued after this destroy work.
1970 */
1971 urcu_workqueue_queue_work(cds_lfht_workqueue,
1972 &ht->destroy_work, do_auto_resize_destroy_cb);
1973 return 0;
1974 }
1975 ret = cds_lfht_delete_bucket(ht);
1976 if (ret)
1977 return ret;
1978 free_split_items_count(ht);
1979 if (attr)
1980 *attr = ht->caller_resize_attr;
1981 ret = pthread_mutex_destroy(&ht->resize_mutex);
1982 if (ret)
1983 ret = -EBUSY;
1984 poison_free(ht);
1985 return ret;
1986 }
1987
1988 void cds_lfht_count_nodes(struct cds_lfht *ht,
1989 long *approx_before,
1990 unsigned long *count,
1991 long *approx_after)
1992 {
1993 struct cds_lfht_node *node, *next;
1994 unsigned long nr_bucket = 0, nr_removed = 0;
1995
1996 *approx_before = 0;
1997 if (ht->split_count) {
1998 int i;
1999
2000 for (i = 0; i < split_count_mask + 1; i++) {
2001 *approx_before += uatomic_read(&ht->split_count[i].add);
2002 *approx_before -= uatomic_read(&ht->split_count[i].del);
2003 }
2004 }
2005
2006 *count = 0;
2007
2008 /* Count non-bucket nodes in the table */
2009 node = bucket_at(ht, 0);
2010 do {
2011 next = rcu_dereference(node->next);
2012 if (is_removed(next)) {
2013 if (!is_bucket(next))
2014 (nr_removed)++;
2015 else
2016 (nr_bucket)++;
2017 } else if (!is_bucket(next))
2018 (*count)++;
2019 else
2020 (nr_bucket)++;
2021 node = clear_flag(next);
2022 } while (!is_end(node));
2023 dbg_printf("number of logically removed nodes: %lu\n", nr_removed);
2024 dbg_printf("number of bucket nodes: %lu\n", nr_bucket);
2025 *approx_after = 0;
2026 if (ht->split_count) {
2027 int i;
2028
2029 for (i = 0; i < split_count_mask + 1; i++) {
2030 *approx_after += uatomic_read(&ht->split_count[i].add);
2031 *approx_after -= uatomic_read(&ht->split_count[i].del);
2032 }
2033 }
2034 }
2035
2036 /* called with resize mutex held */
2037 static
2038 void _do_cds_lfht_grow(struct cds_lfht *ht,
2039 unsigned long old_size, unsigned long new_size)
2040 {
2041 unsigned long old_order, new_order;
2042
2043 old_order = cds_lfht_get_count_order_ulong(old_size);
2044 new_order = cds_lfht_get_count_order_ulong(new_size);
2045 dbg_printf("resize from %lu (order %lu) to %lu (order %lu) buckets\n",
2046 old_size, old_order, new_size, new_order);
2047 urcu_posix_assert(new_size > old_size);
2048 init_table(ht, old_order + 1, new_order);
2049 }
2050
2051 /* called with resize mutex held */
2052 static
2053 void _do_cds_lfht_shrink(struct cds_lfht *ht,
2054 unsigned long old_size, unsigned long new_size)
2055 {
2056 unsigned long old_order, new_order;
2057
2058 new_size = max(new_size, MIN_TABLE_SIZE);
2059 old_order = cds_lfht_get_count_order_ulong(old_size);
2060 new_order = cds_lfht_get_count_order_ulong(new_size);
2061 dbg_printf("resize from %lu (order %lu) to %lu (order %lu) buckets\n",
2062 old_size, old_order, new_size, new_order);
2063 urcu_posix_assert(new_size < old_size);
2064
2065 /* Remove and unlink all bucket nodes to remove. */
2066 fini_table(ht, new_order + 1, old_order);
2067 }
2068
2069
2070 /* called with resize mutex held */
2071 static
2072 void _do_cds_lfht_resize(struct cds_lfht *ht)
2073 {
2074 unsigned long new_size, old_size;
2075
2076 /*
2077 * Resize table, re-do if the target size has changed under us.
2078 */
2079 do {
2080 if (CMM_LOAD_SHARED(ht->in_progress_destroy))
2081 break;
2082 ht->resize_initiated = 1;
2083 old_size = ht->size;
2084 new_size = CMM_LOAD_SHARED(ht->resize_target);
2085 if (old_size < new_size)
2086 _do_cds_lfht_grow(ht, old_size, new_size);
2087 else if (old_size > new_size)
2088 _do_cds_lfht_shrink(ht, old_size, new_size);
2089 ht->resize_initiated = 0;
2090 /* write resize_initiated before read resize_target */
2091 cmm_smp_mb();
2092 } while (ht->size != CMM_LOAD_SHARED(ht->resize_target));
2093 }
2094
2095 static
2096 unsigned long resize_target_grow(struct cds_lfht *ht, unsigned long new_size)
2097 {
2098 return _uatomic_xchg_monotonic_increase(&ht->resize_target, new_size);
2099 }
2100
2101 static
2102 void resize_target_update_count(struct cds_lfht *ht,
2103 unsigned long count)
2104 {
2105 count = max(count, MIN_TABLE_SIZE);
2106 count = min(count, ht->max_nr_buckets);
2107 uatomic_set(&ht->resize_target, count);
2108 }
2109
2110 void cds_lfht_resize(struct cds_lfht *ht, unsigned long new_size)
2111 {
2112 resize_target_update_count(ht, new_size);
2113 CMM_STORE_SHARED(ht->resize_initiated, 1);
2114 mutex_lock(&ht->resize_mutex);
2115 _do_cds_lfht_resize(ht);
2116 mutex_unlock(&ht->resize_mutex);
2117 }
2118
2119 static
2120 void do_resize_cb(struct urcu_work *work)
2121 {
2122 struct resize_work *resize_work =
2123 caa_container_of(work, struct resize_work, work);
2124 struct cds_lfht *ht = resize_work->ht;
2125
2126 ht->flavor->register_thread();
2127 mutex_lock(&ht->resize_mutex);
2128 _do_cds_lfht_resize(ht);
2129 mutex_unlock(&ht->resize_mutex);
2130 ht->flavor->unregister_thread();
2131 poison_free(work);
2132 }
2133
2134 static
2135 void __cds_lfht_resize_lazy_launch(struct cds_lfht *ht)
2136 {
2137 struct resize_work *work;
2138
2139 /* Store resize_target before read resize_initiated */
2140 cmm_smp_mb();
2141 if (!CMM_LOAD_SHARED(ht->resize_initiated)) {
2142 if (CMM_LOAD_SHARED(ht->in_progress_destroy)) {
2143 return;
2144 }
2145 work = malloc(sizeof(*work));
2146 if (work == NULL) {
2147 dbg_printf("error allocating resize work, bailing out\n");
2148 return;
2149 }
2150 work->ht = ht;
2151 urcu_workqueue_queue_work(cds_lfht_workqueue,
2152 &work->work, do_resize_cb);
2153 CMM_STORE_SHARED(ht->resize_initiated, 1);
2154 }
2155 }
2156
2157 static
2158 void cds_lfht_resize_lazy_grow(struct cds_lfht *ht, unsigned long size, int growth)
2159 {
2160 unsigned long target_size = size << growth;
2161
2162 target_size = min(target_size, ht->max_nr_buckets);
2163 if (resize_target_grow(ht, target_size) >= target_size)
2164 return;
2165
2166 __cds_lfht_resize_lazy_launch(ht);
2167 }
2168
2169 /*
2170 * We favor grow operations over shrink. A shrink operation never occurs
2171 * if a grow operation is queued for lazy execution. A grow operation
2172 * cancels any pending shrink lazy execution.
2173 */
2174 static
2175 void cds_lfht_resize_lazy_count(struct cds_lfht *ht, unsigned long size,
2176 unsigned long count)
2177 {
2178 if (!(ht->flags & CDS_LFHT_AUTO_RESIZE))
2179 return;
2180 count = max(count, MIN_TABLE_SIZE);
2181 count = min(count, ht->max_nr_buckets);
2182 if (count == size)
2183 return; /* Already the right size, no resize needed */
2184 if (count > size) { /* lazy grow */
2185 if (resize_target_grow(ht, count) >= count)
2186 return;
2187 } else { /* lazy shrink */
2188 for (;;) {
2189 unsigned long s;
2190
2191 s = uatomic_cmpxchg(&ht->resize_target, size, count);
2192 if (s == size)
2193 break; /* no resize needed */
2194 if (s > size)
2195 return; /* growing is/(was just) in progress */
2196 if (s <= count)
2197 return; /* some other thread do shrink */
2198 size = s;
2199 }
2200 }
2201 __cds_lfht_resize_lazy_launch(ht);
2202 }
2203
2204 static void cds_lfht_before_fork(void *priv __attribute__((unused)))
2205 {
2206 if (cds_lfht_workqueue_atfork_nesting++)
2207 return;
2208 mutex_lock(&cds_lfht_fork_mutex);
2209 if (!cds_lfht_workqueue)
2210 return;
2211 urcu_workqueue_pause_worker(cds_lfht_workqueue);
2212 }
2213
2214 static void cds_lfht_after_fork_parent(void *priv __attribute__((unused)))
2215 {
2216 if (--cds_lfht_workqueue_atfork_nesting)
2217 return;
2218 if (!cds_lfht_workqueue)
2219 goto end;
2220 urcu_workqueue_resume_worker(cds_lfht_workqueue);
2221 end:
2222 mutex_unlock(&cds_lfht_fork_mutex);
2223 }
2224
2225 static void cds_lfht_after_fork_child(void *priv __attribute__((unused)))
2226 {
2227 if (--cds_lfht_workqueue_atfork_nesting)
2228 return;
2229 if (!cds_lfht_workqueue)
2230 goto end;
2231 urcu_workqueue_create_worker(cds_lfht_workqueue);
2232 end:
2233 mutex_unlock(&cds_lfht_fork_mutex);
2234 }
2235
2236 static struct urcu_atfork cds_lfht_atfork = {
2237 .before_fork = cds_lfht_before_fork,
2238 .after_fork_parent = cds_lfht_after_fork_parent,
2239 .after_fork_child = cds_lfht_after_fork_child,
2240 };
2241
2242 static void cds_lfht_init_worker(const struct rcu_flavor_struct *flavor)
2243 {
2244 flavor->register_rculfhash_atfork(&cds_lfht_atfork);
2245
2246 mutex_lock(&cds_lfht_fork_mutex);
2247 if (!cds_lfht_workqueue)
2248 cds_lfht_workqueue = urcu_workqueue_create(0, -1, NULL,
2249 NULL, NULL, NULL, NULL, NULL, NULL, NULL);
2250 mutex_unlock(&cds_lfht_fork_mutex);
2251 }
2252
2253 static void cds_lfht_exit(void)
2254 {
2255 mutex_lock(&cds_lfht_fork_mutex);
2256 if (cds_lfht_workqueue) {
2257 urcu_workqueue_flush_queued_work(cds_lfht_workqueue);
2258 urcu_workqueue_destroy(cds_lfht_workqueue);
2259 cds_lfht_workqueue = NULL;
2260 }
2261 mutex_unlock(&cds_lfht_fork_mutex);
2262 }
This page took 0.115896 seconds and 4 git commands to generate.