rculfhash: hold rcu read-side lock in resize
[urcu.git] / rculfhash.c
CommitLineData
5e28c532 1/*
abc490a1
MD
2 * rculfhash.c
3 *
1475579c 4 * Userspace RCU library - Lock-Free Resizable RCU Hash Table
abc490a1
MD
5 *
6 * Copyright 2010-2011 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
5e28c532
MD
21 */
22
e753ff5a
MD
23/*
24 * Based on the following articles:
25 * - Ori Shalev and Nir Shavit. Split-ordered lists: Lock-free
26 * extensible hash tables. J. ACM 53, 3 (May 2006), 379-405.
27 * - Michael, M. M. High performance dynamic lock-free hash tables
28 * and list-based sets. In Proceedings of the fourteenth annual ACM
29 * symposium on Parallel algorithms and architectures, ACM Press,
30 * (2002), 73-82.
31 *
1475579c 32 * Some specificities of this Lock-Free Resizable RCU Hash Table
e753ff5a
MD
33 * implementation:
34 *
35 * - RCU read-side critical section allows readers to perform hash
36 * table lookups and use the returned objects safely by delaying
37 * memory reclaim of a grace period.
38 * - Add and remove operations are lock-free, and do not need to
39 * allocate memory. They need to be executed within RCU read-side
40 * critical section to ensure the objects they read are valid and to
41 * deal with the cmpxchg ABA problem.
42 * - add and add_unique operations are supported. add_unique checks if
43 * the node key already exists in the hash table. It ensures no key
44 * duplicata exists.
45 * - The resize operation executes concurrently with add/remove/lookup.
46 * - Hash table nodes are contained within a split-ordered list. This
47 * list is ordered by incrementing reversed-bits-hash value.
48 * - An index of dummy nodes is kept. These dummy nodes are the hash
49 * table "buckets", and they are also chained together in the
50 * split-ordered list, which allows recursive expansion.
1475579c
MD
51 * - The resize operation for small tables only allows expanding the hash table.
52 * It is triggered automatically by detecting long chains in the add
53 * operation.
54 * - The resize operation for larger tables (and available through an
55 * API) allows both expanding and shrinking the hash table.
56 * - Per-CPU Split-counters are used to keep track of the number of
57 * nodes within the hash table for automatic resize triggering.
e753ff5a
MD
58 * - Resize operation initiated by long chain detection is executed by a
59 * call_rcu thread, which keeps lock-freedom of add and remove.
60 * - Resize operations are protected by a mutex.
61 * - The removal operation is split in two parts: first, a "removed"
62 * flag is set in the next pointer within the node to remove. Then,
63 * a "garbage collection" is performed in the bucket containing the
64 * removed node (from the start of the bucket up to the removed node).
65 * All encountered nodes with "removed" flag set in their next
66 * pointers are removed from the linked-list. If the cmpxchg used for
67 * removal fails (due to concurrent garbage-collection or concurrent
68 * add), we retry from the beginning of the bucket. This ensures that
69 * the node with "removed" flag set is removed from the hash table
70 * (not visible to lookups anymore) before the RCU read-side critical
71 * section held across removal ends. Furthermore, this ensures that
72 * the node with "removed" flag set is removed from the linked-list
73 * before its memory is reclaimed. Only the thread which removal
74 * successfully set the "removed" flag (with a cmpxchg) into a node's
75 * next pointer is considered to have succeeded its removal (and thus
76 * owns the node to reclaim). Because we garbage-collect starting from
77 * an invariant node (the start-of-bucket dummy node) up to the
78 * "removed" node (or find a reverse-hash that is higher), we are sure
79 * that a successful traversal of the chain leads to a chain that is
80 * present in the linked-list (the start node is never removed) and
81 * that is does not contain the "removed" node anymore, even if
82 * concurrent delete/add operations are changing the structure of the
83 * list concurrently.
29e669f6
MD
84 * - The add operation performs gargage collection of buckets if it
85 * encounters nodes with removed flag set in the bucket where it wants
86 * to add its new node. This ensures lock-freedom of add operation by
87 * helping the remover unlink nodes from the list rather than to wait
88 * for it do to so.
e753ff5a
MD
89 * - A RCU "order table" indexed by log2(hash index) is copied and
90 * expanded by the resize operation. This order table allows finding
91 * the "dummy node" tables.
92 * - There is one dummy node table per hash index order. The size of
93 * each dummy node table is half the number of hashes contained in
94 * this order.
95 * - call_rcu is used to garbage-collect the old order table.
96 * - The per-order dummy node tables contain a compact version of the
97 * hash table nodes. These tables are invariant after they are
98 * populated into the hash table.
1475579c
MD
99 *
100 * A bit of ascii art explanation:
101 *
102 * Order index is the off-by-one compare to the actual power of 2 because
103 * we use index 0 to deal with the 0 special-case.
104 *
105 * This shows the nodes for a small table ordered by reversed bits:
106 *
107 * bits reverse
108 * 0 000 000
109 * 4 100 001
110 * 2 010 010
111 * 6 110 011
112 * 1 001 100
113 * 5 101 101
114 * 3 011 110
115 * 7 111 111
116 *
117 * This shows the nodes in order of non-reversed bits, linked by
118 * reversed-bit order.
119 *
120 * order bits reverse
121 * 0 0 000 000
122 * |
f6fdd688
MD
123 * 1 | 1 001 100 <- <-
124 * | | | |
125 * 2 | | 2 010 010 | |
126 * | | | 3 011 110 | <- |
127 * | | | | | | |
1475579c
MD
128 * 3 -> | | | 4 100 001 | |
129 * -> | | 5 101 101 |
130 * -> | 6 110 011
131 * -> 7 111 111
e753ff5a
MD
132 */
133
2ed95849
MD
134#define _LGPL_SOURCE
135#include <stdlib.h>
e0ba718a
MD
136#include <errno.h>
137#include <assert.h>
138#include <stdio.h>
abc490a1 139#include <stdint.h>
f000907d 140#include <string.h>
e0ba718a 141
df44348d 142#include "config.h"
2ed95849 143#include <urcu.h>
abc490a1 144#include <urcu-call-rcu.h>
a42cc659
MD
145#include <urcu/arch.h>
146#include <urcu/uatomic.h>
674f7a69 147#include <urcu/jhash.h>
a42cc659 148#include <urcu/compiler.h>
abc490a1 149#include <urcu/rculfhash.h>
5e28c532 150#include <stdio.h>
464a1ec9 151#include <pthread.h>
44395fb7 152
f9830efd 153#ifdef DEBUG
f0c29ed7 154#define dbg_printf(fmt, args...) printf("[debug rculfhash] " fmt, ## args)
f9830efd 155#else
e753ff5a 156#define dbg_printf(fmt, args...)
f9830efd
MD
157#endif
158
98808fb1
MD
159/* For testing */
160#define POISON_FREE
161
f8994aee
MD
162/*
163 * Per-CPU split-counters lazily update the global counter each 1024
164 * addition/removal. It automatically keeps track of resize required.
165 * We use the bucket length as indicator for need to expand for small
166 * tables and machines lacking per-cpu data suppport.
167 */
168#define COUNT_COMMIT_ORDER 10
6ea6bc67
MD
169#define CHAIN_LEN_TARGET 1
170#define CHAIN_LEN_RESIZE_THRESHOLD 3
2ed95849 171
cd95516d
MD
172/*
173 * Define the minimum table size. Protects against hash table resize overload
174 * when too many entries are added quickly before the resize can complete.
175 * This is especially the case if the table could be shrinked to a size of 1.
176 * TODO: we might want to make the add/remove operations help the resize to
177 * add or remove dummy nodes when a resize is ongoing to ensure upper-bound on
178 * chain length.
179 */
180#define MIN_TABLE_SIZE 128
181
abc490a1
MD
182#ifndef max
183#define max(a, b) ((a) > (b) ? (a) : (b))
184#endif
2ed95849 185
d95bd160
MD
186/*
187 * The removed flag needs to be updated atomically with the pointer.
188 * The dummy flag does not require to be updated atomically with the
189 * pointer, but it is added as a pointer low bit flag to save space.
190 */
d37166c6 191#define REMOVED_FLAG (1UL << 0)
f5596c94
MD
192#define DUMMY_FLAG (1UL << 1)
193#define FLAGS_MASK ((1UL << 2) - 1)
d37166c6 194
df44348d 195struct ht_items_count {
3171717f 196 unsigned long add, remove;
df44348d
MD
197} __attribute__((aligned(CAA_CACHE_LINE_SIZE)));
198
1475579c
MD
199struct rcu_level {
200 struct rcu_head head;
201 struct _cds_lfht_node nodes[0];
202};
203
395270b6 204struct rcu_table {
abc490a1 205 unsigned long size; /* always a power of 2 */
f9830efd 206 unsigned long resize_target;
11519af6 207 int resize_initiated;
abc490a1 208 struct rcu_head head;
1475579c 209 struct rcu_level *tbl[0];
395270b6
MD
210};
211
14044b37 212struct cds_lfht {
395270b6 213 struct rcu_table *t; /* shared */
14044b37
MD
214 cds_lfht_hash_fct hash_fct;
215 cds_lfht_compare_fct compare_fct;
732ad076 216 unsigned long hash_seed;
b8af5011 217 int flags;
464a1ec9 218 pthread_mutex_t resize_mutex; /* resize mutex: add/del mutex */
33c7c748 219 unsigned int in_progress_resize, in_progress_destroy;
14044b37 220 void (*cds_lfht_call_rcu)(struct rcu_head *head,
abc490a1 221 void (*func)(struct rcu_head *head));
1475579c 222 void (*cds_lfht_synchronize_rcu)(void);
01dbfa62
MD
223 void (*cds_lfht_rcu_read_lock)(void);
224 void (*cds_lfht_rcu_read_unlock)(void);
df44348d
MD
225 unsigned long count; /* global approximate item count */
226 struct ht_items_count *percpu_count; /* per-cpu item count */
2ed95849
MD
227};
228
abc490a1
MD
229struct rcu_resize_work {
230 struct rcu_head head;
14044b37 231 struct cds_lfht *ht;
abc490a1 232};
2ed95849 233
abc490a1
MD
234/*
235 * Algorithm to reverse bits in a word by lookup table, extended to
236 * 64-bit words.
f9830efd 237 * Source:
abc490a1 238 * http://graphics.stanford.edu/~seander/bithacks.html#BitReverseTable
f9830efd 239 * Originally from Public Domain.
abc490a1
MD
240 */
241
242static const uint8_t BitReverseTable256[256] =
2ed95849 243{
abc490a1
MD
244#define R2(n) (n), (n) + 2*64, (n) + 1*64, (n) + 3*64
245#define R4(n) R2(n), R2((n) + 2*16), R2((n) + 1*16), R2((n) + 3*16)
246#define R6(n) R4(n), R4((n) + 2*4 ), R4((n) + 1*4 ), R4((n) + 3*4 )
247 R6(0), R6(2), R6(1), R6(3)
248};
249#undef R2
250#undef R4
251#undef R6
2ed95849 252
abc490a1
MD
253static
254uint8_t bit_reverse_u8(uint8_t v)
255{
256 return BitReverseTable256[v];
257}
ab7d5fc6 258
abc490a1
MD
259static __attribute__((unused))
260uint32_t bit_reverse_u32(uint32_t v)
261{
262 return ((uint32_t) bit_reverse_u8(v) << 24) |
263 ((uint32_t) bit_reverse_u8(v >> 8) << 16) |
264 ((uint32_t) bit_reverse_u8(v >> 16) << 8) |
265 ((uint32_t) bit_reverse_u8(v >> 24));
2ed95849
MD
266}
267
abc490a1
MD
268static __attribute__((unused))
269uint64_t bit_reverse_u64(uint64_t v)
2ed95849 270{
abc490a1
MD
271 return ((uint64_t) bit_reverse_u8(v) << 56) |
272 ((uint64_t) bit_reverse_u8(v >> 8) << 48) |
273 ((uint64_t) bit_reverse_u8(v >> 16) << 40) |
274 ((uint64_t) bit_reverse_u8(v >> 24) << 32) |
275 ((uint64_t) bit_reverse_u8(v >> 32) << 24) |
276 ((uint64_t) bit_reverse_u8(v >> 40) << 16) |
277 ((uint64_t) bit_reverse_u8(v >> 48) << 8) |
278 ((uint64_t) bit_reverse_u8(v >> 56));
279}
280
281static
282unsigned long bit_reverse_ulong(unsigned long v)
283{
284#if (CAA_BITS_PER_LONG == 32)
285 return bit_reverse_u32(v);
286#else
287 return bit_reverse_u64(v);
288#endif
289}
290
f9830efd 291/*
24365af7
MD
292 * fls: returns the position of the most significant bit.
293 * Returns 0 if no bit is set, else returns the position of the most
294 * significant bit (from 1 to 32 on 32-bit, from 1 to 64 on 64-bit).
f9830efd 295 */
24365af7
MD
296#if defined(__i386) || defined(__x86_64)
297static inline
298unsigned int fls_u32(uint32_t x)
f9830efd 299{
24365af7
MD
300 int r;
301
302 asm("bsrl %1,%0\n\t"
303 "jnz 1f\n\t"
304 "movl $-1,%0\n\t"
305 "1:\n\t"
306 : "=r" (r) : "rm" (x));
307 return r + 1;
308}
309#define HAS_FLS_U32
310#endif
311
312#if defined(__x86_64)
313static inline
314unsigned int fls_u64(uint64_t x)
315{
316 long r;
317
318 asm("bsrq %1,%0\n\t"
319 "jnz 1f\n\t"
320 "movq $-1,%0\n\t"
321 "1:\n\t"
322 : "=r" (r) : "rm" (x));
323 return r + 1;
324}
325#define HAS_FLS_U64
326#endif
327
328#ifndef HAS_FLS_U64
329static __attribute__((unused))
330unsigned int fls_u64(uint64_t x)
331{
332 unsigned int r = 64;
333
334 if (!x)
335 return 0;
336
337 if (!(x & 0xFFFFFFFF00000000ULL)) {
338 x <<= 32;
339 r -= 32;
340 }
341 if (!(x & 0xFFFF000000000000ULL)) {
342 x <<= 16;
343 r -= 16;
344 }
345 if (!(x & 0xFF00000000000000ULL)) {
346 x <<= 8;
347 r -= 8;
348 }
349 if (!(x & 0xF000000000000000ULL)) {
350 x <<= 4;
351 r -= 4;
352 }
353 if (!(x & 0xC000000000000000ULL)) {
354 x <<= 2;
355 r -= 2;
356 }
357 if (!(x & 0x8000000000000000ULL)) {
358 x <<= 1;
359 r -= 1;
360 }
361 return r;
362}
363#endif
364
365#ifndef HAS_FLS_U32
366static __attribute__((unused))
367unsigned int fls_u32(uint32_t x)
368{
369 unsigned int r = 32;
f9830efd 370
24365af7
MD
371 if (!x)
372 return 0;
373 if (!(x & 0xFFFF0000U)) {
374 x <<= 16;
375 r -= 16;
376 }
377 if (!(x & 0xFF000000U)) {
378 x <<= 8;
379 r -= 8;
380 }
381 if (!(x & 0xF0000000U)) {
382 x <<= 4;
383 r -= 4;
384 }
385 if (!(x & 0xC0000000U)) {
386 x <<= 2;
387 r -= 2;
388 }
389 if (!(x & 0x80000000U)) {
390 x <<= 1;
391 r -= 1;
392 }
393 return r;
394}
395#endif
396
397unsigned int fls_ulong(unsigned long x)
f9830efd 398{
24365af7
MD
399#if (CAA_BITS_PER_lONG == 32)
400 return fls_u32(x);
401#else
402 return fls_u64(x);
403#endif
404}
f9830efd 405
24365af7
MD
406int get_count_order_u32(uint32_t x)
407{
408 int order;
409
410 order = fls_u32(x) - 1;
411 if (x & (x - 1))
412 order++;
413 return order;
414}
415
416int get_count_order_ulong(unsigned long x)
417{
418 int order;
419
420 order = fls_ulong(x) - 1;
421 if (x & (x - 1))
422 order++;
423 return order;
f9830efd
MD
424}
425
98808fb1
MD
426#ifdef POISON_FREE
427#define poison_free(ptr) \
428 do { \
429 memset(ptr, 0x42, sizeof(*(ptr))); \
430 free(ptr); \
431 } while (0)
432#else
433#define poison_free(ptr) free(ptr)
434#endif
435
f9830efd 436static
14044b37 437void cds_lfht_resize_lazy(struct cds_lfht *ht, struct rcu_table *t, int growth);
f9830efd 438
df44348d
MD
439/*
440 * If the sched_getcpu() and sysconf(_SC_NPROCESSORS_CONF) calls are
441 * available, then we support hash table item accounting.
442 * In the unfortunate event the number of CPUs reported would be
443 * inaccurate, we use modulo arithmetic on the number of CPUs we got.
444 */
df44348d
MD
445#if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF)
446
f8994aee
MD
447static
448void cds_lfht_resize_lazy_count(struct cds_lfht *ht, struct rcu_table *t,
449 unsigned long count);
450
df44348d
MD
451static long nr_cpus_mask = -1;
452
453static
454struct ht_items_count *alloc_per_cpu_items_count(void)
455{
456 struct ht_items_count *count;
457
458 switch (nr_cpus_mask) {
459 case -2:
460 return NULL;
461 case -1:
462 {
463 long maxcpus;
464
465 maxcpus = sysconf(_SC_NPROCESSORS_CONF);
466 if (maxcpus <= 0) {
467 nr_cpus_mask = -2;
468 return NULL;
469 }
470 /*
471 * round up number of CPUs to next power of two, so we
472 * can use & for modulo.
473 */
474 maxcpus = 1UL << get_count_order_ulong(maxcpus);
475 nr_cpus_mask = maxcpus - 1;
476 }
477 /* Fall-through */
478 default:
479 return calloc(nr_cpus_mask + 1, sizeof(*count));
480 }
481}
482
483static
484void free_per_cpu_items_count(struct ht_items_count *count)
485{
98808fb1 486 poison_free(count);
df44348d
MD
487}
488
489static
490int ht_get_cpu(void)
491{
492 int cpu;
493
494 assert(nr_cpus_mask >= 0);
495 cpu = sched_getcpu();
496 if (unlikely(cpu < 0))
497 return cpu;
498 else
499 return cpu & nr_cpus_mask;
500}
501
502static
3171717f 503void ht_count_add(struct cds_lfht *ht, struct rcu_table *t)
df44348d 504{
3171717f 505 unsigned long percpu_count;
df44348d
MD
506 int cpu;
507
508 if (unlikely(!ht->percpu_count))
3171717f 509 return;
df44348d
MD
510 cpu = ht_get_cpu();
511 if (unlikely(cpu < 0))
3171717f
MD
512 return;
513 percpu_count = uatomic_add_return(&ht->percpu_count[cpu].add, 1);
df44348d
MD
514 if (unlikely(!(percpu_count & ((1UL << COUNT_COMMIT_ORDER) - 1)))) {
515 unsigned long count;
516
517 dbg_printf("add percpu %lu\n", percpu_count);
518 count = uatomic_add_return(&ht->count,
519 1UL << COUNT_COMMIT_ORDER);
520 /* If power of 2 */
521 if (!(count & (count - 1))) {
f8994aee
MD
522 if ((count >> CHAIN_LEN_RESIZE_THRESHOLD)
523 < t->size)
524 return;
525 dbg_printf("add set global %lu\n", count);
526 cds_lfht_resize_lazy_count(ht, t,
6ea6bc67 527 count >> (CHAIN_LEN_TARGET - 1));
df44348d
MD
528 }
529 }
530}
531
532static
533void ht_count_remove(struct cds_lfht *ht, struct rcu_table *t)
534{
535 unsigned long percpu_count;
3171717f 536 int cpu;
df44348d 537
3171717f
MD
538 if (unlikely(!ht->percpu_count))
539 return;
540 cpu = ht_get_cpu();
541 if (unlikely(cpu < 0))
542 return;
543 percpu_count = uatomic_add_return(&ht->percpu_count[cpu].remove, -1);
df44348d
MD
544 if (unlikely(!(percpu_count & ((1UL << COUNT_COMMIT_ORDER) - 1)))) {
545 unsigned long count;
546
547 dbg_printf("remove percpu %lu\n", percpu_count);
548 count = uatomic_add_return(&ht->count,
3171717f 549 -(1UL << COUNT_COMMIT_ORDER));
df44348d
MD
550 /* If power of 2 */
551 if (!(count & (count - 1))) {
f8994aee
MD
552 if ((count >> CHAIN_LEN_RESIZE_THRESHOLD)
553 >= t->size)
554 return;
555 dbg_printf("remove set global %lu\n", count);
556 cds_lfht_resize_lazy_count(ht, t,
6ea6bc67 557 count >> (CHAIN_LEN_TARGET - 1));
df44348d
MD
558 }
559 }
560}
561
562#else /* #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF) */
563
564static const long nr_cpus_mask = -1;
565
566static
567struct ht_items_count *alloc_per_cpu_items_count(void)
568{
569 return NULL;
570}
571
572static
573void free_per_cpu_items_count(struct ht_items_count *count)
574{
575}
576
577static
f8994aee 578void ht_count_add(struct cds_lfht *ht, struct rcu_table *t)
df44348d
MD
579{
580}
581
582static
f8994aee 583void ht_count_remove(struct cds_lfht *ht, struct rcu_table *t)
df44348d
MD
584{
585}
586
587#endif /* #else #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF) */
588
589
f9830efd 590static
14044b37 591void check_resize(struct cds_lfht *ht, struct rcu_table *t,
f9830efd
MD
592 uint32_t chain_len)
593{
f8994aee
MD
594 unsigned long count;
595
b8af5011
MD
596 if (!(ht->flags & CDS_LFHT_AUTO_RESIZE))
597 return;
f8994aee
MD
598 count = uatomic_read(&ht->count);
599 /*
600 * Use bucket-local length for small table expand and for
601 * environments lacking per-cpu data support.
602 */
603 if (count >= (1UL << COUNT_COMMIT_ORDER))
604 return;
24365af7 605 if (chain_len > 100)
f0c29ed7 606 dbg_printf("WARNING: large chain length: %u.\n",
24365af7 607 chain_len);
3390d470 608 if (chain_len >= CHAIN_LEN_RESIZE_THRESHOLD)
14044b37 609 cds_lfht_resize_lazy(ht, t,
01370f0b 610 get_count_order_u32(chain_len - (CHAIN_LEN_TARGET - 1)));
f9830efd
MD
611}
612
abc490a1 613static
14044b37 614struct cds_lfht_node *clear_flag(struct cds_lfht_node *node)
abc490a1 615{
14044b37 616 return (struct cds_lfht_node *) (((unsigned long) node) & ~FLAGS_MASK);
abc490a1
MD
617}
618
619static
14044b37 620int is_removed(struct cds_lfht_node *node)
abc490a1 621{
d37166c6 622 return ((unsigned long) node) & REMOVED_FLAG;
abc490a1
MD
623}
624
625static
14044b37 626struct cds_lfht_node *flag_removed(struct cds_lfht_node *node)
abc490a1 627{
14044b37 628 return (struct cds_lfht_node *) (((unsigned long) node) | REMOVED_FLAG);
abc490a1
MD
629}
630
f5596c94 631static
14044b37 632int is_dummy(struct cds_lfht_node *node)
f5596c94
MD
633{
634 return ((unsigned long) node) & DUMMY_FLAG;
635}
636
637static
14044b37 638struct cds_lfht_node *flag_dummy(struct cds_lfht_node *node)
f5596c94 639{
14044b37 640 return (struct cds_lfht_node *) (((unsigned long) node) | DUMMY_FLAG);
f5596c94
MD
641}
642
abc490a1 643static
f9830efd 644unsigned long _uatomic_max(unsigned long *ptr, unsigned long v)
abc490a1
MD
645{
646 unsigned long old1, old2;
647
648 old1 = uatomic_read(ptr);
649 do {
650 old2 = old1;
651 if (old2 >= v)
f9830efd 652 return old2;
abc490a1 653 } while ((old1 = uatomic_cmpxchg(ptr, old2, v)) != old2);
f9830efd 654 return v;
abc490a1
MD
655}
656
1475579c
MD
657static
658void cds_lfht_free_table_cb(struct rcu_head *head)
659{
660 struct rcu_table *t =
661 caa_container_of(head, struct rcu_table, head);
98808fb1 662 poison_free(t);
1475579c
MD
663}
664
665static
666void cds_lfht_free_level(struct rcu_head *head)
667{
668 struct rcu_level *l =
669 caa_container_of(head, struct rcu_level, head);
98808fb1 670 poison_free(l);
1475579c
MD
671}
672
273399de
MD
673/*
674 * Remove all logically deleted nodes from a bucket up to a certain node key.
675 */
676static
14044b37 677void _cds_lfht_gc_bucket(struct cds_lfht_node *dummy, struct cds_lfht_node *node)
273399de 678{
14044b37 679 struct cds_lfht_node *iter_prev, *iter, *next, *new_next;
53960c9f
MD
680 struct cds_lfht_node *iter_trace[64];
681 unsigned long trace_idx = 0;
273399de 682
53960c9f 683 memset(iter_trace, 0, sizeof(iter_trace));
c90201ac
MD
684 assert(!is_dummy(dummy));
685 assert(!is_removed(dummy));
686 assert(!is_dummy(node));
687 assert(!is_removed(node));
273399de 688 for (;;) {
53960c9f 689 iter_trace[trace_idx++ & (64 - 1)] = (void *) 0x1;
273399de
MD
690 iter_prev = dummy;
691 /* We can always skip the dummy node initially */
cc4fcb10 692 iter = rcu_dereference(iter_prev->p.next);
53960c9f 693 iter_trace[trace_idx++ & (64 - 1)] = iter;
cc4fcb10 694 assert(iter_prev->p.reverse_hash <= node->p.reverse_hash);
bd4db153
MD
695 /*
696 * We should never be called with dummy (start of chain)
697 * and logically removed node (end of path compression
698 * marker) being the actual same node. This would be a
699 * bug in the algorithm implementation.
700 */
701 assert(dummy != node);
273399de 702 for (;;) {
a2974903 703 if (unlikely(!clear_flag(iter)))
479c8a32 704 return;
76412f24 705 if (likely(clear_flag(iter)->p.reverse_hash > node->p.reverse_hash))
273399de 706 return;
cc4fcb10 707 next = rcu_dereference(clear_flag(iter)->p.next);
76412f24 708 if (likely(is_removed(next)))
273399de 709 break;
b453eae1 710 iter_prev = clear_flag(iter);
273399de 711 iter = next;
53960c9f 712 iter_trace[trace_idx++ & (64 - 1)] = iter;
273399de
MD
713 }
714 assert(!is_removed(iter));
f5596c94
MD
715 if (is_dummy(iter))
716 new_next = flag_dummy(clear_flag(next));
717 else
718 new_next = clear_flag(next);
719 (void) uatomic_cmpxchg(&iter_prev->p.next, iter, new_next);
53960c9f 720 iter_trace[trace_idx++ & (64 - 1)] = (void *) 0x2;
273399de
MD
721 }
722}
723
abc490a1 724static
14044b37
MD
725struct cds_lfht_node *_cds_lfht_add(struct cds_lfht *ht, struct rcu_table *t,
726 struct cds_lfht_node *node, int unique, int dummy)
abc490a1 727{
14044b37 728 struct cds_lfht_node *iter_prev, *iter, *next, *new_node, *new_next,
f5596c94 729 *dummy_node;
14044b37 730 struct _cds_lfht_node *lookup;
24365af7 731 unsigned long hash, index, order;
abc490a1 732
c90201ac
MD
733 assert(!is_dummy(node));
734 assert(!is_removed(node));
18117871 735 if (!t->size) {
f5596c94
MD
736 assert(dummy);
737 node->p.next = flag_dummy(NULL);
18117871
MD
738 return node; /* Initial first add (head) */
739 }
cc4fcb10 740 hash = bit_reverse_ulong(node->p.reverse_hash);
abc490a1 741 for (;;) {
f9830efd 742 uint32_t chain_len = 0;
abc490a1 743
11519af6
MD
744 /*
745 * iter_prev points to the non-removed node prior to the
746 * insert location.
11519af6 747 */
24365af7
MD
748 index = hash & (t->size - 1);
749 order = get_count_order_ulong(index + 1);
554c284e 750 lookup = &t->tbl[order]->nodes[index & ((!order ? 0 : (1UL << (order - 1))) - 1)];
14044b37 751 iter_prev = (struct cds_lfht_node *) lookup;
11519af6 752 /* We can always skip the dummy node initially */
cc4fcb10
MD
753 iter = rcu_dereference(iter_prev->p.next);
754 assert(iter_prev->p.reverse_hash <= node->p.reverse_hash);
abc490a1 755 for (;;) {
c90201ac 756 /* TODO: check if removed */
a2974903 757 if (unlikely(!clear_flag(iter)))
273399de 758 goto insert;
c90201ac 759 /* TODO: check if removed */
76412f24 760 if (likely(clear_flag(iter)->p.reverse_hash > node->p.reverse_hash))
273399de 761 goto insert;
cc4fcb10 762 next = rcu_dereference(clear_flag(iter)->p.next);
76412f24 763 if (unlikely(is_removed(next)))
9dba85be 764 goto gc_node;
e43f23f8 765 if (unique
1b81fe1a 766 && !is_dummy(next)
e43f23f8
MD
767 && !ht->compare_fct(node->key, node->key_len,
768 clear_flag(iter)->key,
769 clear_flag(iter)->key_len))
18117871 770 return clear_flag(iter);
11519af6 771 /* Only account for identical reverse hash once */
24365af7
MD
772 if (iter_prev->p.reverse_hash != clear_flag(iter)->p.reverse_hash
773 && !is_dummy(next))
11519af6
MD
774 check_resize(ht, t, ++chain_len);
775 iter_prev = clear_flag(iter);
273399de 776 iter = next;
abc490a1 777 }
273399de 778 insert:
7ec59d3b 779 assert(node != clear_flag(iter));
11519af6 780 assert(!is_removed(iter_prev));
c90201ac 781 assert(!is_removed(iter));
f000907d 782 assert(iter_prev != node);
f5596c94 783 if (!dummy)
1b81fe1a 784 node->p.next = clear_flag(iter);
f5596c94 785 else
1b81fe1a 786 node->p.next = flag_dummy(clear_flag(iter));
f5596c94
MD
787 if (is_dummy(iter))
788 new_node = flag_dummy(node);
789 else
790 new_node = node;
cc4fcb10 791 if (uatomic_cmpxchg(&iter_prev->p.next, iter,
f5596c94 792 new_node) != iter)
273399de 793 continue; /* retry */
11519af6 794 else
273399de 795 goto gc_end;
9dba85be
MD
796 gc_node:
797 assert(!is_removed(iter));
f5596c94
MD
798 if (is_dummy(iter))
799 new_next = flag_dummy(clear_flag(next));
800 else
801 new_next = clear_flag(next);
802 (void) uatomic_cmpxchg(&iter_prev->p.next, iter, new_next);
273399de 803 /* retry */
464a1ec9 804 }
273399de
MD
805gc_end:
806 /* Garbage collect logically removed nodes in the bucket */
24365af7
MD
807 index = hash & (t->size - 1);
808 order = get_count_order_ulong(index + 1);
554c284e 809 lookup = &t->tbl[order]->nodes[index & (!order ? 0 : ((1UL << (order - 1)) - 1))];
14044b37
MD
810 dummy_node = (struct cds_lfht_node *) lookup;
811 _cds_lfht_gc_bucket(dummy_node, node);
18117871 812 return node;
abc490a1 813}
464a1ec9 814
abc490a1 815static
14044b37 816int _cds_lfht_remove(struct cds_lfht *ht, struct rcu_table *t,
1475579c 817 struct cds_lfht_node *node, int dummy_removal)
abc490a1 818{
14044b37
MD
819 struct cds_lfht_node *dummy, *next, *old;
820 struct _cds_lfht_node *lookup;
abc490a1 821 int flagged = 0;
24365af7 822 unsigned long hash, index, order;
5e28c532 823
7ec59d3b 824 /* logically delete the node */
c90201ac
MD
825 assert(!is_dummy(node));
826 assert(!is_removed(node));
cc4fcb10 827 old = rcu_dereference(node->p.next);
7ec59d3b
MD
828 do {
829 next = old;
76412f24 830 if (unlikely(is_removed(next)))
7ec59d3b 831 goto end;
1475579c
MD
832 if (dummy_removal)
833 assert(is_dummy(next));
834 else
835 assert(!is_dummy(next));
cc4fcb10 836 old = uatomic_cmpxchg(&node->p.next, next,
7ec59d3b
MD
837 flag_removed(next));
838 } while (old != next);
839
840 /* We performed the (logical) deletion. */
841 flagged = 1;
842
843 /*
844 * Ensure that the node is not visible to readers anymore: lookup for
273399de
MD
845 * the node, and remove it (along with any other logically removed node)
846 * if found.
11519af6 847 */
cc4fcb10 848 hash = bit_reverse_ulong(node->p.reverse_hash);
4c3a0329
MD
849 assert(t->size > 0);
850 index = hash & (t->size - 1);
24365af7 851 order = get_count_order_ulong(index + 1);
554c284e 852 lookup = &t->tbl[order]->nodes[index & (!order ? 0 : ((1UL << (order - 1)) - 1))];
14044b37
MD
853 dummy = (struct cds_lfht_node *) lookup;
854 _cds_lfht_gc_bucket(dummy, node);
2ed95849 855end:
11519af6
MD
856 /*
857 * Only the flagging action indicated that we (and no other)
858 * removed the node from the hash.
859 */
7ec59d3b 860 if (flagged) {
cc4fcb10 861 assert(is_removed(rcu_dereference(node->p.next)));
11519af6 862 return 0;
7ec59d3b 863 } else
11519af6 864 return -ENOENT;
abc490a1 865}
2ed95849 866
01dbfa62
MD
867/*
868 * Holding RCU read lock to protect _cds_lfht_add against memory
869 * reclaim that could be performed by other call_rcu worker threads (ABA
870 * problem).
871 */
abc490a1 872static
14044b37 873void init_table(struct cds_lfht *ht, struct rcu_table *t,
24365af7
MD
874 unsigned long first_order, unsigned long len_order)
875{
876 unsigned long i, end_order;
877
f0c29ed7 878 dbg_printf("init table: first_order %lu end_order %lu\n",
24365af7
MD
879 first_order, first_order + len_order);
880 end_order = first_order + len_order;
881 t->size = !first_order ? 0 : (1UL << (first_order - 1));
882 for (i = first_order; i < end_order; i++) {
883 unsigned long j, len;
884
885 len = !i ? 1 : 1UL << (i - 1);
f0c29ed7 886 dbg_printf("init order %lu len: %lu\n", i, len);
1475579c
MD
887 t->tbl[i] = calloc(1, sizeof(struct rcu_level)
888 + (len * sizeof(struct _cds_lfht_node)));
01dbfa62 889 ht->cds_lfht_rcu_read_lock();
24365af7 890 for (j = 0; j < len; j++) {
1475579c
MD
891 struct cds_lfht_node *new_node =
892 (struct cds_lfht_node *) &t->tbl[i]->nodes[j];
893
f0c29ed7 894 dbg_printf("init entry: i %lu j %lu hash %lu\n",
24365af7 895 i, j, !i ? 0 : (1UL << (i - 1)) + j);
24365af7
MD
896 new_node->p.reverse_hash =
897 bit_reverse_ulong(!i ? 0 : (1UL << (i - 1)) + j);
14044b37 898 (void) _cds_lfht_add(ht, t, new_node, 0, 1);
33c7c748
MD
899 if (CMM_LOAD_SHARED(ht->in_progress_destroy))
900 break;
24365af7 901 }
01dbfa62 902 ht->cds_lfht_rcu_read_unlock();
24365af7
MD
903 /* Update table size */
904 t->size = !i ? 1 : (1UL << i);
f0c29ed7 905 dbg_printf("init new size: %lu\n", t->size);
33c7c748
MD
906 if (CMM_LOAD_SHARED(ht->in_progress_destroy))
907 break;
abc490a1 908 }
24365af7 909 t->resize_target = t->size;
11519af6 910 t->resize_initiated = 0;
2ed95849
MD
911}
912
01dbfa62
MD
913/*
914 * Holding RCU read lock to protect _cds_lfht_remove against memory
915 * reclaim that could be performed by other call_rcu worker threads (ABA
916 * problem).
917 */
1475579c
MD
918static
919void fini_table(struct cds_lfht *ht, struct rcu_table *t,
920 unsigned long first_order, unsigned long len_order)
921{
922 long i, end_order;
923
924 dbg_printf("fini table: first_order %lu end_order %lu\n",
925 first_order, first_order + len_order);
926 end_order = first_order + len_order;
927 assert(first_order > 0);
928 assert(t->size == (1UL << (end_order - 1)));
929 for (i = end_order - 1; i >= first_order; i--) {
930 unsigned long j, len;
931
932 len = !i ? 1 : 1UL << (i - 1);
933 dbg_printf("fini order %lu len: %lu\n", i, len);
21263e21
MD
934 /*
935 * Update table size. Need to shrink this table prior to
936 * removal so gc lookups use non-logically-removed dummy
937 * nodes.
938 */
e1a68f1d 939 t->size = 1UL << (i - 1);
1475579c 940 /* Unlink */
01dbfa62 941 ht->cds_lfht_rcu_read_lock();
1475579c 942 for (j = 0; j < len; j++) {
c90201ac 943 struct cds_lfht_node *fini_node =
1475579c
MD
944 (struct cds_lfht_node *) &t->tbl[i]->nodes[j];
945
946 dbg_printf("fini entry: i %lu j %lu hash %lu\n",
947 i, j, !i ? 0 : (1UL << (i - 1)) + j);
c90201ac 948 fini_node->p.reverse_hash =
1475579c 949 bit_reverse_ulong(!i ? 0 : (1UL << (i - 1)) + j);
c90201ac 950 (void) _cds_lfht_remove(ht, t, fini_node, 1);
1475579c
MD
951 if (CMM_LOAD_SHARED(ht->in_progress_destroy))
952 break;
953 }
01dbfa62 954 ht->cds_lfht_rcu_read_unlock();
1475579c 955 ht->cds_lfht_call_rcu(&t->tbl[i]->head, cds_lfht_free_level);
1475579c
MD
956 dbg_printf("fini new size: %lu\n", t->size);
957 if (CMM_LOAD_SHARED(ht->in_progress_destroy))
958 break;
959 }
960 t->resize_target = t->size;
961 t->resize_initiated = 0;
962}
963
14044b37
MD
964struct cds_lfht *cds_lfht_new(cds_lfht_hash_fct hash_fct,
965 cds_lfht_compare_fct compare_fct,
966 unsigned long hash_seed,
967 unsigned long init_size,
b8af5011 968 int flags,
14044b37 969 void (*cds_lfht_call_rcu)(struct rcu_head *head,
1475579c 970 void (*func)(struct rcu_head *head)),
01dbfa62
MD
971 void (*cds_lfht_synchronize_rcu)(void),
972 void (*cds_lfht_rcu_read_lock)(void),
973 void (*cds_lfht_rcu_read_unlock)(void))
abc490a1 974{
14044b37 975 struct cds_lfht *ht;
24365af7 976 unsigned long order;
abc490a1 977
8129be4e 978 /* init_size must be power of two */
49619ea0 979 if (init_size && (init_size & (init_size - 1)))
8129be4e 980 return NULL;
14044b37 981 ht = calloc(1, sizeof(struct cds_lfht));
abc490a1 982 ht->hash_fct = hash_fct;
732ad076
MD
983 ht->compare_fct = compare_fct;
984 ht->hash_seed = hash_seed;
14044b37 985 ht->cds_lfht_call_rcu = cds_lfht_call_rcu;
1475579c 986 ht->cds_lfht_synchronize_rcu = cds_lfht_synchronize_rcu;
01dbfa62
MD
987 ht->cds_lfht_rcu_read_lock = cds_lfht_rcu_read_lock;
988 ht->cds_lfht_rcu_read_unlock = cds_lfht_rcu_read_unlock;
848d4088 989 ht->in_progress_resize = 0;
df44348d 990 ht->percpu_count = alloc_per_cpu_items_count();
abc490a1
MD
991 /* this mutex should not nest in read-side C.S. */
992 pthread_mutex_init(&ht->resize_mutex, NULL);
cd95516d 993 order = get_count_order_ulong(max(init_size, MIN_TABLE_SIZE)) + 1;
14044b37 994 ht->t = calloc(1, sizeof(struct cds_lfht)
1475579c 995 + (order * sizeof(struct rcu_level *)));
abc490a1 996 ht->t->size = 0;
b8af5011 997 ht->flags = flags;
f000907d 998 pthread_mutex_lock(&ht->resize_mutex);
24365af7 999 init_table(ht, ht->t, 0, order);
f000907d 1000 pthread_mutex_unlock(&ht->resize_mutex);
abc490a1
MD
1001 return ht;
1002}
1003
14044b37 1004struct cds_lfht_node *cds_lfht_lookup(struct cds_lfht *ht, void *key, size_t key_len)
2ed95849 1005{
395270b6 1006 struct rcu_table *t;
14044b37
MD
1007 struct cds_lfht_node *node, *next;
1008 struct _cds_lfht_node *lookup;
24365af7 1009 unsigned long hash, reverse_hash, index, order;
2ed95849 1010
732ad076 1011 hash = ht->hash_fct(key, key_len, ht->hash_seed);
abc490a1 1012 reverse_hash = bit_reverse_ulong(hash);
464a1ec9 1013
395270b6 1014 t = rcu_dereference(ht->t);
24365af7
MD
1015 index = hash & (t->size - 1);
1016 order = get_count_order_ulong(index + 1);
554c284e 1017 lookup = &t->tbl[order]->nodes[index & (!order ? 0 : ((1UL << (order - 1))) - 1)];
f0c29ed7 1018 dbg_printf("lookup hash %lu index %lu order %lu aridx %lu\n",
554c284e 1019 hash, index, order, index & (!order ? 0 : ((1UL << (order - 1)) - 1)));
14044b37 1020 node = (struct cds_lfht_node *) lookup;
2ed95849 1021 for (;;) {
abc490a1
MD
1022 if (unlikely(!node))
1023 break;
cc4fcb10 1024 if (unlikely(node->p.reverse_hash > reverse_hash)) {
abc490a1
MD
1025 node = NULL;
1026 break;
2ed95849 1027 }
1b81fe1a
MD
1028 next = rcu_dereference(node->p.next);
1029 if (likely(!is_removed(next))
1030 && !is_dummy(next)
49c2e2d6 1031 && likely(!ht->compare_fct(node->key, node->key_len, key, key_len))) {
273399de 1032 break;
2ed95849 1033 }
1b81fe1a 1034 node = clear_flag(next);
2ed95849 1035 }
1b81fe1a 1036 assert(!node || !is_dummy(rcu_dereference(node->p.next)));
abc490a1
MD
1037 return node;
1038}
e0ba718a 1039
a481e5ff
MD
1040struct cds_lfht_node *cds_lfht_next(struct cds_lfht *ht,
1041 struct cds_lfht_node *node)
1042{
1043 struct cds_lfht_node *next;
1044 unsigned long reverse_hash;
1045 void *key;
1046 size_t key_len;
1047
1048 reverse_hash = node->p.reverse_hash;
1049 key = node->key;
1050 key_len = node->key_len;
1051 next = rcu_dereference(node->p.next);
1052 node = clear_flag(next);
1053
1054 for (;;) {
1055 if (unlikely(!node))
1056 break;
1057 if (unlikely(node->p.reverse_hash > reverse_hash)) {
1058 node = NULL;
1059 break;
1060 }
1061 next = rcu_dereference(node->p.next);
1062 if (likely(!is_removed(next))
1063 && !is_dummy(next)
1064 && likely(!ht->compare_fct(node->key, node->key_len, key, key_len))) {
1065 break;
1066 }
1067 node = clear_flag(next);
1068 }
1069 assert(!node || !is_dummy(rcu_dereference(node->p.next)));
1070 return node;
1071}
1072
14044b37 1073void cds_lfht_add(struct cds_lfht *ht, struct cds_lfht_node *node)
abc490a1
MD
1074{
1075 struct rcu_table *t;
49c2e2d6 1076 unsigned long hash;
ab7d5fc6 1077
49c2e2d6 1078 hash = ht->hash_fct(node->key, node->key_len, ht->hash_seed);
cc4fcb10 1079 node->p.reverse_hash = bit_reverse_ulong((unsigned long) hash);
2ed95849 1080
abc490a1 1081 t = rcu_dereference(ht->t);
14044b37 1082 (void) _cds_lfht_add(ht, t, node, 0, 0);
df44348d 1083 ht_count_add(ht, t);
3eca1b8c
MD
1084}
1085
14044b37
MD
1086struct cds_lfht_node *cds_lfht_add_unique(struct cds_lfht *ht,
1087 struct cds_lfht_node *node)
3eca1b8c
MD
1088{
1089 struct rcu_table *t;
49c2e2d6 1090 unsigned long hash;
df44348d 1091 struct cds_lfht_node *ret;
3eca1b8c 1092
49c2e2d6 1093 hash = ht->hash_fct(node->key, node->key_len, ht->hash_seed);
cc4fcb10 1094 node->p.reverse_hash = bit_reverse_ulong((unsigned long) hash);
3eca1b8c
MD
1095
1096 t = rcu_dereference(ht->t);
df44348d
MD
1097 ret = _cds_lfht_add(ht, t, node, 1, 0);
1098 if (ret != node)
1099 ht_count_add(ht, t);
1100 return ret;
2ed95849
MD
1101}
1102
14044b37 1103int cds_lfht_remove(struct cds_lfht *ht, struct cds_lfht_node *node)
2ed95849 1104{
abc490a1 1105 struct rcu_table *t;
df44348d 1106 int ret;
abc490a1
MD
1107
1108 t = rcu_dereference(ht->t);
1475579c 1109 ret = _cds_lfht_remove(ht, t, node, 0);
df44348d
MD
1110 if (!ret)
1111 ht_count_remove(ht, t);
1112 return ret;
2ed95849 1113}
ab7d5fc6 1114
abc490a1 1115static
14044b37 1116int cds_lfht_delete_dummy(struct cds_lfht *ht)
674f7a69 1117{
395270b6 1118 struct rcu_table *t;
14044b37
MD
1119 struct cds_lfht_node *node;
1120 struct _cds_lfht_node *lookup;
24365af7 1121 unsigned long order, i;
674f7a69 1122
abc490a1
MD
1123 t = ht->t;
1124 /* Check that the table is empty */
1475579c 1125 lookup = &t->tbl[0]->nodes[0];
14044b37 1126 node = (struct cds_lfht_node *) lookup;
abc490a1 1127 do {
1b81fe1a
MD
1128 node = clear_flag(node)->p.next;
1129 if (!is_dummy(node))
abc490a1 1130 return -EPERM;
273399de 1131 assert(!is_removed(node));
a2974903 1132 } while (clear_flag(node));
abc490a1 1133 /* Internal sanity check: all nodes left should be dummy */
24365af7
MD
1134 for (order = 0; order < get_count_order_ulong(t->size) + 1; order++) {
1135 unsigned long len;
1136
1137 len = !order ? 1 : 1UL << (order - 1);
1138 for (i = 0; i < len; i++) {
f0c29ed7 1139 dbg_printf("delete order %lu i %lu hash %lu\n",
24365af7 1140 order, i,
1475579c
MD
1141 bit_reverse_ulong(t->tbl[order]->nodes[i].reverse_hash));
1142 assert(is_dummy(t->tbl[order]->nodes[i].next));
24365af7 1143 }
98808fb1 1144 poison_free(t->tbl[order]);
674f7a69 1145 }
abc490a1 1146 return 0;
674f7a69
MD
1147}
1148
1149/*
1150 * Should only be called when no more concurrent readers nor writers can
1151 * possibly access the table.
1152 */
14044b37 1153int cds_lfht_destroy(struct cds_lfht *ht)
674f7a69 1154{
5e28c532
MD
1155 int ret;
1156
848d4088 1157 /* Wait for in-flight resize operations to complete */
33c7c748 1158 CMM_STORE_SHARED(ht->in_progress_destroy, 1);
848d4088
MD
1159 while (uatomic_read(&ht->in_progress_resize))
1160 poll(NULL, 0, 100); /* wait for 100ms */
14044b37 1161 ret = cds_lfht_delete_dummy(ht);
abc490a1
MD
1162 if (ret)
1163 return ret;
98808fb1 1164 poison_free(ht->t);
df44348d 1165 free_per_cpu_items_count(ht->percpu_count);
98808fb1 1166 poison_free(ht);
5e28c532 1167 return ret;
674f7a69
MD
1168}
1169
14044b37 1170void cds_lfht_count_nodes(struct cds_lfht *ht,
273399de
MD
1171 unsigned long *count,
1172 unsigned long *removed)
1173{
1174 struct rcu_table *t;
14044b37
MD
1175 struct cds_lfht_node *node, *next;
1176 struct _cds_lfht_node *lookup;
24365af7 1177 unsigned long nr_dummy = 0;
273399de
MD
1178
1179 *count = 0;
1180 *removed = 0;
1181
1182 t = rcu_dereference(ht->t);
24365af7 1183 /* Count non-dummy nodes in the table */
1475579c 1184 lookup = &t->tbl[0]->nodes[0];
14044b37 1185 node = (struct cds_lfht_node *) lookup;
273399de 1186 do {
cc4fcb10 1187 next = rcu_dereference(node->p.next);
273399de 1188 if (is_removed(next)) {
1b81fe1a 1189 assert(!is_dummy(next));
273399de 1190 (*removed)++;
1b81fe1a 1191 } else if (!is_dummy(next))
273399de 1192 (*count)++;
24365af7
MD
1193 else
1194 (nr_dummy)++;
273399de
MD
1195 node = clear_flag(next);
1196 } while (node);
f0c29ed7 1197 dbg_printf("number of dummy nodes: %lu\n", nr_dummy);
273399de
MD
1198}
1199
1475579c 1200/* called with resize mutex held */
abc490a1 1201static
1475579c
MD
1202void _do_cds_lfht_grow(struct cds_lfht *ht, struct rcu_table *old_t,
1203 unsigned long old_size, unsigned long new_size)
abc490a1 1204{
1475579c
MD
1205 unsigned long old_order, new_order;
1206 struct rcu_table *new_t;
1207
1208 old_order = get_count_order_ulong(old_size) + 1;
1209 new_order = get_count_order_ulong(new_size) + 1;
1210 printf("resize from %lu (order %lu) to %lu (order %lu) buckets\n",
1211 old_size, old_order, new_size, new_order);
1212 new_t = malloc(sizeof(struct cds_lfht)
1213 + (new_order * sizeof(struct rcu_level *)));
1214 assert(new_size > old_size);
1215 memcpy(&new_t->tbl, &old_t->tbl,
1216 old_order * sizeof(struct rcu_level *));
1217 init_table(ht, new_t, old_order, new_order - old_order);
1218 /* Changing table and size atomically wrt lookups */
1219 rcu_assign_pointer(ht->t, new_t);
1220 ht->cds_lfht_call_rcu(&old_t->head, cds_lfht_free_table_cb);
abc490a1
MD
1221}
1222
1223/* called with resize mutex held */
1224static
1475579c
MD
1225void _do_cds_lfht_shrink(struct cds_lfht *ht, struct rcu_table *old_t,
1226 unsigned long old_size, unsigned long new_size)
464a1ec9 1227{
1475579c
MD
1228 unsigned long old_order, new_order;
1229 struct rcu_table *new_t;
464a1ec9 1230
cd95516d 1231 new_size = max(new_size, MIN_TABLE_SIZE);
24365af7 1232 old_order = get_count_order_ulong(old_size) + 1;
24365af7 1233 new_order = get_count_order_ulong(new_size) + 1;
df44348d 1234 printf("resize from %lu (order %lu) to %lu (order %lu) buckets\n",
df03fab8 1235 old_size, old_order, new_size, new_order);
14044b37 1236 new_t = malloc(sizeof(struct cds_lfht)
1475579c
MD
1237 + (new_order * sizeof(struct rcu_level *)));
1238 assert(new_size < old_size);
f000907d 1239 memcpy(&new_t->tbl, &old_t->tbl,
1475579c
MD
1240 new_order * sizeof(struct rcu_level *));
1241 new_t->size = !new_order ? 1 : (1UL << (new_order - 1));
c90201ac 1242 assert(new_t->size == new_size);
1475579c
MD
1243 new_t->resize_target = new_t->size;
1244 new_t->resize_initiated = 0;
1245
f000907d
MD
1246 /* Changing table and size atomically wrt lookups */
1247 rcu_assign_pointer(ht->t, new_t);
1475579c
MD
1248
1249 /*
c90201ac 1250 * We need to wait for all add operations to reach Q.S. (and
1475579c 1251 * thus use the new table for lookups) before we can start
c90201ac
MD
1252 * releasing the old dummy nodes. Otherwise their lookup will
1253 * return a logically removed node as insert position.
1475579c
MD
1254 */
1255 ht->cds_lfht_synchronize_rcu();
1256
1257 /* Unlink and remove all now-unused dummy node pointers. */
1258 fini_table(ht, old_t, new_order, old_order - new_order);
14044b37 1259 ht->cds_lfht_call_rcu(&old_t->head, cds_lfht_free_table_cb);
464a1ec9
MD
1260}
1261
1475579c
MD
1262
1263/* called with resize mutex held */
1264static
1265void _do_cds_lfht_resize(struct cds_lfht *ht)
1266{
1267 unsigned long new_size, old_size;
1268 struct rcu_table *old_t;
1269
1270 old_t = ht->t;
1271 old_size = old_t->size;
1272 new_size = CMM_LOAD_SHARED(old_t->resize_target);
1273 if (old_size < new_size)
1274 _do_cds_lfht_grow(ht, old_t, old_size, new_size);
1275 else if (old_size > new_size)
1276 _do_cds_lfht_shrink(ht, old_t, old_size, new_size);
1277 else
1278 CMM_STORE_SHARED(old_t->resize_initiated, 0);
1279}
1280
abc490a1 1281static
f9830efd
MD
1282unsigned long resize_target_update(struct rcu_table *t,
1283 int growth_order)
464a1ec9 1284{
f9830efd
MD
1285 return _uatomic_max(&t->resize_target,
1286 t->size << growth_order);
464a1ec9
MD
1287}
1288
1475579c 1289static
b8af5011
MD
1290void resize_target_update_count(struct rcu_table *t,
1291 unsigned long count)
1475579c 1292{
cd95516d 1293 count = max(count, MIN_TABLE_SIZE);
b8af5011 1294 uatomic_set(&t->resize_target, count);
1475579c
MD
1295}
1296
1297void cds_lfht_resize(struct cds_lfht *ht, unsigned long new_size)
464a1ec9 1298{
f9830efd 1299 struct rcu_table *t = rcu_dereference(ht->t);
f9830efd 1300
b8af5011 1301 resize_target_update_count(t, new_size);
1475579c
MD
1302 CMM_STORE_SHARED(t->resize_initiated, 1);
1303 pthread_mutex_lock(&ht->resize_mutex);
1304 _do_cds_lfht_resize(ht);
1305 pthread_mutex_unlock(&ht->resize_mutex);
abc490a1 1306}
464a1ec9 1307
abc490a1
MD
1308static
1309void do_resize_cb(struct rcu_head *head)
1310{
1311 struct rcu_resize_work *work =
1312 caa_container_of(head, struct rcu_resize_work, head);
14044b37 1313 struct cds_lfht *ht = work->ht;
abc490a1
MD
1314
1315 pthread_mutex_lock(&ht->resize_mutex);
14044b37 1316 _do_cds_lfht_resize(ht);
abc490a1 1317 pthread_mutex_unlock(&ht->resize_mutex);
98808fb1 1318 poison_free(work);
848d4088
MD
1319 cmm_smp_mb(); /* finish resize before decrement */
1320 uatomic_dec(&ht->in_progress_resize);
464a1ec9
MD
1321}
1322
abc490a1 1323static
14044b37 1324void cds_lfht_resize_lazy(struct cds_lfht *ht, struct rcu_table *t, int growth)
ab7d5fc6 1325{
abc490a1 1326 struct rcu_resize_work *work;
f9830efd 1327 unsigned long target_size;
abc490a1 1328
f9830efd 1329 target_size = resize_target_update(t, growth);
11519af6 1330 if (!CMM_LOAD_SHARED(t->resize_initiated) && t->size < target_size) {
848d4088
MD
1331 uatomic_inc(&ht->in_progress_resize);
1332 cmm_smp_mb(); /* increment resize count before calling it */
f9830efd
MD
1333 work = malloc(sizeof(*work));
1334 work->ht = ht;
14044b37 1335 ht->cds_lfht_call_rcu(&work->head, do_resize_cb);
11519af6 1336 CMM_STORE_SHARED(t->resize_initiated, 1);
f9830efd 1337 }
ab7d5fc6 1338}
3171717f 1339
f8994aee
MD
1340#if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF)
1341
3171717f
MD
1342static
1343void cds_lfht_resize_lazy_count(struct cds_lfht *ht, struct rcu_table *t,
1344 unsigned long count)
1345{
1346 struct rcu_resize_work *work;
3171717f 1347
b8af5011
MD
1348 if (!(ht->flags & CDS_LFHT_AUTO_RESIZE))
1349 return;
1350 resize_target_update_count(t, count);
1475579c 1351 if (!CMM_LOAD_SHARED(t->resize_initiated)) {
3171717f
MD
1352 uatomic_inc(&ht->in_progress_resize);
1353 cmm_smp_mb(); /* increment resize count before calling it */
1354 work = malloc(sizeof(*work));
1355 work->ht = ht;
1356 ht->cds_lfht_call_rcu(&work->head, do_resize_cb);
1357 CMM_STORE_SHARED(t->resize_initiated, 1);
1358 }
1359}
f8994aee
MD
1360
1361#endif
This page took 0.091395 seconds and 4 git commands to generate.