X-Git-Url: https://git.liburcu.org/?p=urcu.git;a=blobdiff_plain;f=rculfhash.c;h=034c7f9053cfbd881496b592cbb3bcddf92a1d8f;hp=6ac53ea5bae5b1f3fea8ec1ecf98f5939110f25e;hb=adc0de68ccd78689659ed49d4b3d5d36c6720e20;hpb=9ee0fc9a6151f828ea815b5ccca4e80f6587209b diff --git a/rculfhash.c b/rculfhash.c index 6ac53ea..034c7f9 100644 --- a/rculfhash.c +++ b/rculfhash.c @@ -177,6 +177,12 @@ #define MAX_TABLE_ORDER 64 #endif +/* + * Minimum number of dummy nodes to touch per thread to parallelize grow/shrink. + */ +#define MIN_PARTITION_PER_THREAD_ORDER 12 +#define MIN_PARTITION_PER_THREAD (1UL << MIN_PARTITION_PER_THREAD_ORDER) + #ifndef min #define min(a, b) ((a) < (b) ? (a) : (b)) #endif @@ -187,18 +193,22 @@ /* * The removed flag needs to be updated atomically with the pointer. + * It indicates that no node must attach to the node scheduled for + * removal. The gc flag also needs to be updated atomically with the + * pointer. It indicates that node garbage collection must be performed. * The dummy flag does not require to be updated atomically with the * pointer, but it is added as a pointer low bit flag to save space. */ #define REMOVED_FLAG (1UL << 0) -#define DUMMY_FLAG (1UL << 1) -#define FLAGS_MASK ((1UL << 2) - 1) +#define GC_FLAG (1UL << 1) +#define DUMMY_FLAG (1UL << 2) +#define FLAGS_MASK ((1UL << 3) - 1) /* Value of the end pointer. Should not interact with flags. */ #define END_VALUE NULL struct ht_items_count { - unsigned long add, remove; + unsigned long add, del; } __attribute__((aligned(CAA_CACHE_LINE_SIZE))); struct rcu_level { @@ -235,6 +245,9 @@ struct cds_lfht { void (*cds_lfht_rcu_read_unlock)(void); void (*cds_lfht_rcu_thread_offline)(void); void (*cds_lfht_rcu_thread_online)(void); + void (*cds_lfht_rcu_register_thread)(void); + void (*cds_lfht_rcu_unregister_thread)(void); + pthread_attr_t *resize_attr; /* Resize threads attributes */ unsigned long count; /* global approximate item count */ struct ht_items_count *percpu_count; /* per-cpu item count */ }; @@ -244,11 +257,30 @@ struct rcu_resize_work { struct cds_lfht *ht; }; +struct partition_resize_work { + struct rcu_head head; + struct cds_lfht *ht; + unsigned long i, start, len; + void (*fct)(struct cds_lfht *ht, unsigned long i, + unsigned long start, unsigned long len); +}; + +enum add_mode { + ADD_DEFAULT = 0, + ADD_UNIQUE = 1, + ADD_REPLACE = 2, +}; + static struct cds_lfht_node *_cds_lfht_add(struct cds_lfht *ht, unsigned long size, struct cds_lfht_node *node, - int unique, int dummy); + enum add_mode mode, int dummy); + +static +int _cds_lfht_del(struct cds_lfht *ht, unsigned long size, + struct cds_lfht_node *node, + int dummy_removal, int do_gc); /* * Algorithm to reverse bits in a word by lookup table, extended to @@ -548,7 +580,7 @@ void ht_count_add(struct cds_lfht *ht, unsigned long size) } static -void ht_count_remove(struct cds_lfht *ht, unsigned long size) +void ht_count_del(struct cds_lfht *ht, unsigned long size) { unsigned long percpu_count; int cpu; @@ -558,18 +590,18 @@ void ht_count_remove(struct cds_lfht *ht, unsigned long size) cpu = ht_get_cpu(); if (unlikely(cpu < 0)) return; - percpu_count = uatomic_add_return(&ht->percpu_count[cpu].remove, -1); + percpu_count = uatomic_add_return(&ht->percpu_count[cpu].del, -1); if (unlikely(!(percpu_count & ((1UL << COUNT_COMMIT_ORDER) - 1)))) { unsigned long count; - dbg_printf("remove percpu %lu\n", percpu_count); + dbg_printf("del percpu %lu\n", percpu_count); count = uatomic_add_return(&ht->count, -(1UL << COUNT_COMMIT_ORDER)); /* If power of 2 */ if (!(count & (count - 1))) { if ((count >> CHAIN_LEN_RESIZE_THRESHOLD) >= size) return; - dbg_printf("remove set global %lu\n", count); + dbg_printf("del set global %lu\n", count); cds_lfht_resize_lazy_count(ht, size, count >> (CHAIN_LEN_TARGET - 1)); } @@ -597,7 +629,7 @@ void ht_count_add(struct cds_lfht *ht, unsigned long size) } static -void ht_count_remove(struct cds_lfht *ht, unsigned long size) +void ht_count_del(struct cds_lfht *ht, unsigned long size) { } @@ -644,6 +676,18 @@ struct cds_lfht_node *flag_removed(struct cds_lfht_node *node) return (struct cds_lfht_node *) (((unsigned long) node) | REMOVED_FLAG); } +static +int is_gc(struct cds_lfht_node *node) +{ + return ((unsigned long) node) & GC_FLAG; +} + +static +struct cds_lfht_node *flag_gc(struct cds_lfht_node *node) +{ + return (struct cds_lfht_node *) (((unsigned long) node) | GC_FLAG); +} + static int is_dummy(struct cds_lfht_node *node) { @@ -699,8 +743,10 @@ void _cds_lfht_gc_bucket(struct cds_lfht_node *dummy, struct cds_lfht_node *node struct cds_lfht_node *iter_prev, *iter, *next, *new_next; assert(!is_dummy(dummy)); + assert(!is_gc(dummy)); assert(!is_removed(dummy)); assert(!is_dummy(node)); + assert(!is_gc(node)); assert(!is_removed(node)); for (;;) { iter_prev = dummy; @@ -720,16 +766,18 @@ void _cds_lfht_gc_bucket(struct cds_lfht_node *dummy, struct cds_lfht_node *node if (likely(clear_flag(iter)->p.reverse_hash > node->p.reverse_hash)) return; next = rcu_dereference(clear_flag(iter)->p.next); - if (likely(is_removed(next))) + if (likely(is_gc(next))) break; iter_prev = clear_flag(iter); iter = next; } - assert(!is_removed(iter)); + assert(!is_gc(iter)); if (is_dummy(iter)) new_next = flag_dummy(clear_flag(next)); else new_next = clear_flag(next); + if (is_removed(iter)) + new_next = flag_removed(new_next); (void) uatomic_cmpxchg(&iter_prev->p.next, iter, new_next); } return; @@ -739,14 +787,15 @@ static struct cds_lfht_node *_cds_lfht_add(struct cds_lfht *ht, unsigned long size, struct cds_lfht_node *node, - int unique, int dummy) + enum add_mode mode, int dummy) { struct cds_lfht_node *iter_prev, *iter, *next, *new_node, *new_next, - *dummy_node; + *dummy_node, *return_node; struct _cds_lfht_node *lookup; unsigned long hash, index, order; assert(!is_dummy(node)); + assert(!is_gc(node)); assert(!is_removed(node)); if (!size) { assert(dummy); @@ -774,14 +823,19 @@ struct cds_lfht_node *_cds_lfht_add(struct cds_lfht *ht, if (likely(clear_flag(iter)->p.reverse_hash > node->p.reverse_hash)) goto insert; next = rcu_dereference(clear_flag(iter)->p.next); - if (unlikely(is_removed(next))) + if (unlikely(is_gc(next))) goto gc_node; - if (unique + assert(!is_removed(next)); + if ((mode == ADD_UNIQUE || mode == ADD_REPLACE) && !is_dummy(next) && !ht->compare_fct(node->key, node->key_len, clear_flag(iter)->key, - clear_flag(iter)->key_len)) - return clear_flag(iter); + clear_flag(iter)->key_len)) { + if (mode == ADD_UNIQUE) + return clear_flag(iter); + else /* mode == ADD_REPLACE */ + goto replace; + } /* Only account for identical reverse hash once */ if (iter_prev->p.reverse_hash != clear_flag(iter)->p.reverse_hash && !is_dummy(next)) @@ -789,10 +843,13 @@ struct cds_lfht_node *_cds_lfht_add(struct cds_lfht *ht, iter_prev = clear_flag(iter); iter = next; } + insert: assert(node != clear_flag(iter)); assert(!is_removed(iter_prev)); assert(!is_removed(iter)); + assert(!is_gc(iter_prev)); + assert(!is_gc(iter)); assert(iter_prev != node); if (!dummy) node->p.next = clear_flag(iter); @@ -803,12 +860,55 @@ struct cds_lfht_node *_cds_lfht_add(struct cds_lfht *ht, else new_node = node; if (uatomic_cmpxchg(&iter_prev->p.next, iter, - new_node) != iter) + new_node) != iter) { continue; /* retry */ + } else { + if (mode == ADD_REPLACE) + return_node = NULL; + else /* ADD_DEFAULT and ADD_UNIQUE */ + return_node = node; + goto gc_end; + } + + replace: + /* Insert after node to be replaced */ + iter_prev = clear_flag(iter); + iter = next; + assert(node != clear_flag(iter)); + assert(!is_removed(iter_prev)); + assert(!is_removed(iter)); + assert(!is_gc(iter_prev)); + assert(!is_gc(iter)); + assert(iter_prev != node); + assert(!dummy); + node->p.next = clear_flag(iter); + if (is_dummy(iter)) + new_node = flag_dummy(node); else + new_node = node; + /* + * Here is the whole trick for lock-free replace: we add + * the replacement node _after_ the node we want to + * replace by atomically setting its next pointer at the + * same time we set its removal and gc flags. Given that + * the lookups/get next use an iterator aware of the + * next pointer, they will either skip the old node due + * to the removal/gc flag and see the new node, or use + * the old new, but will not see the new one. + */ + new_node = flag_removed(new_node); + new_node = flag_gc(new_node); + if (uatomic_cmpxchg(&iter_prev->p.next, + iter, new_node) != iter) { + continue; /* retry */ + } else { + return_node = iter_prev; goto gc_end; + } + gc_node: assert(!is_removed(iter)); + assert(!is_gc(iter)); if (is_dummy(iter)) new_next = flag_dummy(clear_flag(next)); else @@ -823,13 +923,13 @@ gc_end: lookup = &ht->t.tbl[order]->nodes[index & (!order ? 0 : ((1UL << (order - 1)) - 1))]; dummy_node = (struct cds_lfht_node *) lookup; _cds_lfht_gc_bucket(dummy_node, node); - return node; + return return_node; } static -int _cds_lfht_remove(struct cds_lfht *ht, unsigned long size, +int _cds_lfht_del(struct cds_lfht *ht, unsigned long size, struct cds_lfht_node *node, - int dummy_removal) + int dummy_removal, int do_gc) { struct cds_lfht_node *dummy, *next, *old; struct _cds_lfht_node *lookup; @@ -838,9 +938,12 @@ int _cds_lfht_remove(struct cds_lfht *ht, unsigned long size, /* logically delete the node */ assert(!is_dummy(node)); + assert(!is_gc(node)); assert(!is_removed(node)); old = rcu_dereference(node->p.next); do { + struct cds_lfht_node *new_next; + next = old; if (unlikely(is_removed(next))) goto end; @@ -848,13 +951,18 @@ int _cds_lfht_remove(struct cds_lfht *ht, unsigned long size, assert(is_dummy(next)); else assert(!is_dummy(next)); - old = uatomic_cmpxchg(&node->p.next, next, - flag_removed(next)); + new_next = flag_removed(next); + if (do_gc) + new_next = flag_gc(new_next); + old = uatomic_cmpxchg(&node->p.next, next, new_next); } while (old != next); /* We performed the (logical) deletion. */ flagged = 1; + if (!do_gc) + goto end; + /* * Ensure that the node is not visible to readers anymore: lookup for * the node, and remove it (along with any other logically removed node) @@ -879,25 +987,77 @@ end: return -ENOENT; } +static +void *partition_resize_thread(void *arg) +{ + struct partition_resize_work *work = arg; + + work->ht->cds_lfht_rcu_register_thread(); + work->fct(work->ht, work->i, work->start, work->len); + work->ht->cds_lfht_rcu_unregister_thread(); + return NULL; +} + +static +void partition_resize_helper(struct cds_lfht *ht, unsigned long i, + unsigned long len, + void (*fct)(struct cds_lfht *ht, unsigned long i, + unsigned long start, unsigned long len)) +{ + unsigned long partition_len; + struct partition_resize_work *work; + int thread, ret; + unsigned long nr_threads; + pthread_t *thread_id; + + /* + * Note: nr_cpus_mask + 1 is always power of 2. + * We spawn just the number of threads we need to satisfy the minimum + * partition size, up to the number of CPUs in the system. + */ + nr_threads = min(nr_cpus_mask + 1, + len >> MIN_PARTITION_PER_THREAD_ORDER); + partition_len = len >> get_count_order_ulong(nr_threads); + work = calloc(nr_threads, sizeof(*work)); + thread_id = calloc(nr_threads, sizeof(*thread_id)); + assert(work); + for (thread = 0; thread < nr_threads; thread++) { + work[thread].ht = ht; + work[thread].i = i; + work[thread].len = partition_len; + work[thread].start = thread * partition_len; + work[thread].fct = fct; + ret = pthread_create(&thread_id[thread], ht->resize_attr, + partition_resize_thread, &work[thread]); + assert(!ret); + } + for (thread = 0; thread < nr_threads; thread++) { + ret = pthread_join(thread_id[thread], NULL); + assert(!ret); + } + free(work); + free(thread_id); +} + /* * Holding RCU read lock to protect _cds_lfht_add against memory * reclaim that could be performed by other call_rcu worker threads (ABA * problem). * - * TODO: when we reach a certain length, we can split this population phase over + * When we reach a certain length, we can split this population phase over * many worker threads, based on the number of CPUs available in the system. * This should therefore take care of not having the expand lagging behind too * many concurrent insertion threads by using the scheduler's ability to * schedule dummy node population fairly with insertions. */ static -void init_table_populate(struct cds_lfht *ht, unsigned long i, unsigned long len) +void init_table_populate_partition(struct cds_lfht *ht, unsigned long i, + unsigned long start, unsigned long len) { unsigned long j; - ht->cds_lfht_rcu_thread_online(); ht->cds_lfht_rcu_read_lock(); - for (j = 0; j < len; j++) { + for (j = start; j < start + len; j++) { struct cds_lfht_node *new_node = (struct cds_lfht_node *) &ht->t.tbl[i]->nodes[j]; @@ -906,12 +1066,25 @@ void init_table_populate(struct cds_lfht *ht, unsigned long i, unsigned long len new_node->p.reverse_hash = bit_reverse_ulong(!i ? 0 : (1UL << (i - 1)) + j); (void) _cds_lfht_add(ht, !i ? 0 : (1UL << (i - 1)), - new_node, 0, 1); + new_node, ADD_DEFAULT, 1); if (CMM_LOAD_SHARED(ht->in_progress_destroy)) break; } ht->cds_lfht_rcu_read_unlock(); - ht->cds_lfht_rcu_thread_offline(); +} + +static +void init_table_populate(struct cds_lfht *ht, unsigned long i, + unsigned long len) +{ + assert(nr_cpus_mask != -1); + if (nr_cpus_mask < 0 || len < 2 * MIN_PARTITION_PER_THREAD) { + ht->cds_lfht_rcu_thread_online(); + init_table_populate_partition(ht, i, 0, len); + ht->cds_lfht_rcu_thread_offline(); + return; + } + partition_resize_helper(ht, i, len, init_table_populate_partition); } static @@ -935,6 +1108,7 @@ void init_table(struct cds_lfht *ht, ht->t.tbl[i] = calloc(1, sizeof(struct rcu_level) + (len * sizeof(struct _cds_lfht_node))); + assert(ht->t.tbl[i]); /* * Set all dummy nodes reverse hash values for a level and @@ -974,19 +1148,19 @@ void init_table(struct cds_lfht *ht, * Logical removal and garbage collection can therefore be done in batch or on a * node-per-node basis, as long as the guarantee above holds. * - * TODO: when we reach a certain length, we can split this removal over many - * worker threads, based on the number of CPUs available in the system. This - * should take care of not letting resize process lag behind too many concurrent + * When we reach a certain length, we can split this removal over many worker + * threads, based on the number of CPUs available in the system. This should + * take care of not letting resize process lag behind too many concurrent * updater threads actively inserting into the hash table. */ static -void remove_table(struct cds_lfht *ht, unsigned long i, unsigned long len) +void remove_table_partition(struct cds_lfht *ht, unsigned long i, + unsigned long start, unsigned long len) { unsigned long j; - ht->cds_lfht_rcu_thread_online(); ht->cds_lfht_rcu_read_lock(); - for (j = 0; j < len; j++) { + for (j = start; j < start + len; j++) { struct cds_lfht_node *fini_node = (struct cds_lfht_node *) &ht->t.tbl[i]->nodes[j]; @@ -994,13 +1168,26 @@ void remove_table(struct cds_lfht *ht, unsigned long i, unsigned long len) i, j, !i ? 0 : (1UL << (i - 1)) + j); fini_node->p.reverse_hash = bit_reverse_ulong(!i ? 0 : (1UL << (i - 1)) + j); - (void) _cds_lfht_remove(ht, !i ? 0 : (1UL << (i - 1)), - fini_node, 1); + (void) _cds_lfht_del(ht, !i ? 0 : (1UL << (i - 1)), + fini_node, 1, 1); if (CMM_LOAD_SHARED(ht->in_progress_destroy)) break; } ht->cds_lfht_rcu_read_unlock(); - ht->cds_lfht_rcu_thread_offline(); +} + +static +void remove_table(struct cds_lfht *ht, unsigned long i, unsigned long len) +{ + + assert(nr_cpus_mask != -1); + if (nr_cpus_mask < 0 || len < 2 * MIN_PARTITION_PER_THREAD) { + ht->cds_lfht_rcu_thread_online(); + remove_table_partition(ht, i, 0, len); + ht->cds_lfht_rcu_thread_offline(); + return; + } + partition_resize_helper(ht, i, len, remove_table_partition); } static @@ -1050,7 +1237,7 @@ void fini_table(struct cds_lfht *ht, } } -struct cds_lfht *cds_lfht_new(cds_lfht_hash_fct hash_fct, +struct cds_lfht *_cds_lfht_new(cds_lfht_hash_fct hash_fct, cds_lfht_compare_fct compare_fct, unsigned long hash_seed, unsigned long init_size, @@ -1061,7 +1248,10 @@ struct cds_lfht *cds_lfht_new(cds_lfht_hash_fct hash_fct, void (*cds_lfht_rcu_read_lock)(void), void (*cds_lfht_rcu_read_unlock)(void), void (*cds_lfht_rcu_thread_offline)(void), - void (*cds_lfht_rcu_thread_online)(void)) + void (*cds_lfht_rcu_thread_online)(void), + void (*cds_lfht_rcu_register_thread)(void), + void (*cds_lfht_rcu_unregister_thread)(void), + pthread_attr_t *attr) { struct cds_lfht *ht; unsigned long order; @@ -1070,6 +1260,7 @@ struct cds_lfht *cds_lfht_new(cds_lfht_hash_fct hash_fct, if (init_size && (init_size & (init_size - 1))) return NULL; ht = calloc(1, sizeof(struct cds_lfht)); + assert(ht); ht->hash_fct = hash_fct; ht->compare_fct = compare_fct; ht->hash_seed = hash_seed; @@ -1079,6 +1270,9 @@ struct cds_lfht *cds_lfht_new(cds_lfht_hash_fct hash_fct, ht->cds_lfht_rcu_read_unlock = cds_lfht_rcu_read_unlock; ht->cds_lfht_rcu_thread_offline = cds_lfht_rcu_thread_offline; ht->cds_lfht_rcu_thread_online = cds_lfht_rcu_thread_online; + ht->cds_lfht_rcu_register_thread = cds_lfht_rcu_register_thread; + ht->cds_lfht_rcu_unregister_thread = cds_lfht_rcu_unregister_thread; + ht->resize_attr = attr; ht->percpu_count = alloc_per_cpu_items_count(); /* this mutex should not nest in read-side C.S. */ pthread_mutex_init(&ht->resize_mutex, NULL); @@ -1093,7 +1287,8 @@ struct cds_lfht *cds_lfht_new(cds_lfht_hash_fct hash_fct, return ht; } -struct cds_lfht_node *cds_lfht_lookup(struct cds_lfht *ht, void *key, size_t key_len) +void cds_lfht_lookup(struct cds_lfht *ht, void *key, size_t key_len, + struct cds_lfht_iter *iter) { struct cds_lfht_node *node, *next, *dummy_node; struct _cds_lfht_node *lookup; @@ -1130,21 +1325,22 @@ struct cds_lfht_node *cds_lfht_lookup(struct cds_lfht *ht, void *key, size_t key node = clear_flag(next); } assert(!node || !is_dummy(rcu_dereference(node->p.next))); - return node; + iter->node = node; + iter->next = next; } -struct cds_lfht_node *cds_lfht_next(struct cds_lfht *ht, - struct cds_lfht_node *node) +void cds_lfht_next(struct cds_lfht *ht, struct cds_lfht_iter *iter) { - struct cds_lfht_node *next; + struct cds_lfht_node *node, *next; unsigned long reverse_hash; void *key; size_t key_len; + node = iter->node; reverse_hash = node->p.reverse_hash; key = node->key; key_len = node->key_len; - next = rcu_dereference(node->p.next); + next = iter->next; node = clear_flag(next); for (;;) { @@ -1165,7 +1361,8 @@ struct cds_lfht_node *cds_lfht_next(struct cds_lfht *ht, node = clear_flag(next); } assert(!node || !is_dummy(rcu_dereference(node->p.next))); - return node; + iter->node = node; + iter->next = next; } void cds_lfht_add(struct cds_lfht *ht, struct cds_lfht_node *node) @@ -1176,12 +1373,12 @@ void cds_lfht_add(struct cds_lfht *ht, struct cds_lfht_node *node) node->p.reverse_hash = bit_reverse_ulong((unsigned long) hash); size = rcu_dereference(ht->t.size); - (void) _cds_lfht_add(ht, size, node, 0, 0); + (void) _cds_lfht_add(ht, size, node, ADD_DEFAULT, 0); ht_count_add(ht, size); } struct cds_lfht_node *cds_lfht_add_unique(struct cds_lfht *ht, - struct cds_lfht_node *node) + struct cds_lfht_node *node) { unsigned long hash, size; struct cds_lfht_node *ret; @@ -1190,21 +1387,37 @@ struct cds_lfht_node *cds_lfht_add_unique(struct cds_lfht *ht, node->p.reverse_hash = bit_reverse_ulong((unsigned long) hash); size = rcu_dereference(ht->t.size); - ret = _cds_lfht_add(ht, size, node, 1, 0); + ret = _cds_lfht_add(ht, size, node, ADD_UNIQUE, 0); if (ret == node) ht_count_add(ht, size); return ret; } -int cds_lfht_remove(struct cds_lfht *ht, struct cds_lfht_node *node) +struct cds_lfht_node *cds_lfht_replace(struct cds_lfht *ht, + struct cds_lfht_node *node) +{ + unsigned long hash, size; + struct cds_lfht_node *ret; + + hash = ht->hash_fct(node->key, node->key_len, ht->hash_seed); + node->p.reverse_hash = bit_reverse_ulong((unsigned long) hash); + + size = rcu_dereference(ht->t.size); + ret = _cds_lfht_add(ht, size, node, ADD_REPLACE, 0); + if (ret == NULL) + ht_count_add(ht, size); + return ret; +} + +int cds_lfht_del(struct cds_lfht *ht, struct cds_lfht_node *node) { unsigned long size; int ret; size = rcu_dereference(ht->t.size); - ret = _cds_lfht_remove(ht, size, node, 0); + ret = _cds_lfht_del(ht, size, node, 0, 1); if (!ret) - ht_count_remove(ht, size); + ht_count_del(ht, size); return ret; } @@ -1223,6 +1436,7 @@ int cds_lfht_delete_dummy(struct cds_lfht *ht) if (!is_dummy(node)) return -EPERM; assert(!is_removed(node)); + assert(!is_gc(node)); } while (!is_end(node)); /* * size accessed without rcu_dereference because hash table is @@ -1249,7 +1463,7 @@ int cds_lfht_delete_dummy(struct cds_lfht *ht) * Should only be called when no more concurrent readers nor writers can * possibly access the table. */ -int cds_lfht_destroy(struct cds_lfht *ht) +int cds_lfht_destroy(struct cds_lfht *ht, pthread_attr_t **attr) { int ret; @@ -1261,6 +1475,8 @@ int cds_lfht_destroy(struct cds_lfht *ht) if (ret) return ret; free_per_cpu_items_count(ht->percpu_count); + if (attr) + *attr = ht->resize_attr; poison_free(ht); return ret; } @@ -1281,7 +1497,7 @@ void cds_lfht_count_nodes(struct cds_lfht *ht, node = (struct cds_lfht_node *) lookup; do { next = rcu_dereference(node->p.next); - if (is_removed(next)) { + if (is_removed(next) || is_gc(next)) { assert(!is_dummy(next)); (*removed)++; } else if (!is_dummy(next))