X-Git-Url: https://git.liburcu.org/?p=urcu.git;a=blobdiff_plain;f=rculfhash.c;h=be995dfeaf5ef70795a6fd74a7bc34f4ced558c6;hp=93c2d7850fad9da9c333b47f738af08758ed62ed;hb=3fb86f26cfea354e44be1099642fc88436e3edf3;hpb=973e5e1b7837176071749d727e37bdfe9e3c4f0e diff --git a/rculfhash.c b/rculfhash.c index 93c2d78..be995df 100644 --- a/rculfhash.c +++ b/rculfhash.c @@ -144,7 +144,6 @@ #include #include #include -#include #include #include #include @@ -210,7 +209,7 @@ struct ht_items_count { } __attribute__((aligned(CAA_CACHE_LINE_SIZE))); struct rcu_level { - struct rcu_head head; + /* Note: manually update allocation length when adding a field */ struct _cds_lfht_node nodes[0]; }; @@ -246,7 +245,7 @@ struct cds_lfht { void (*cds_lfht_rcu_register_thread)(void); void (*cds_lfht_rcu_unregister_thread)(void); pthread_attr_t *resize_attr; /* Resize threads attributes */ - unsigned long count; /* global approximate item count */ + long count; /* global approximate item count */ struct ht_items_count *percpu_count; /* per-cpu item count */ }; @@ -256,7 +255,7 @@ struct rcu_resize_work { }; struct partition_resize_work { - struct rcu_head head; + pthread_t thread_id; struct cds_lfht *ht; unsigned long i, start, len; void (*fct)(struct cds_lfht *ht, unsigned long i, @@ -447,24 +446,28 @@ unsigned int fls_ulong(unsigned long x) #endif } +/* + * Return the minimum order for which x <= (1UL << order). + * Return -1 if x is 0. + */ int get_count_order_u32(uint32_t x) { - int order; + if (!x) + return -1; - order = fls_u32(x) - 1; - if (x & (x - 1)) - order++; - return order; + return fls_u32(x - 1); } +/* + * Return the minimum order for which x <= (1UL << order). + * Return -1 if x is 0. + */ int get_count_order_ulong(unsigned long x) { - int order; + if (!x) + return -1; - order = fls_ulong(x) - 1; - if (x & (x - 1)) - order++; - return order; + return fls_ulong(x - 1); } #ifdef POISON_FREE @@ -556,7 +559,7 @@ void ht_count_add(struct cds_lfht *ht, unsigned long size) return; percpu_count = uatomic_add_return(&ht->percpu_count[cpu].add, 1); if (unlikely(!(percpu_count & ((1UL << COUNT_COMMIT_ORDER) - 1)))) { - unsigned long count; + long count; dbg_printf("add percpu %lu\n", percpu_count); count = uatomic_add_return(&ht->count, @@ -565,7 +568,7 @@ void ht_count_add(struct cds_lfht *ht, unsigned long size) if (!(count & (count - 1))) { if ((count >> CHAIN_LEN_RESIZE_THRESHOLD) < size) return; - dbg_printf("add set global %lu\n", count); + dbg_printf("add set global %ld\n", count); cds_lfht_resize_lazy_count(ht, size, count >> (CHAIN_LEN_TARGET - 1)); } @@ -585,7 +588,7 @@ void ht_count_del(struct cds_lfht *ht, unsigned long size) return; percpu_count = uatomic_add_return(&ht->percpu_count[cpu].del, 1); if (unlikely(!(percpu_count & ((1UL << COUNT_COMMIT_ORDER) - 1)))) { - unsigned long count; + long count; dbg_printf("del percpu %lu\n", percpu_count); count = uatomic_add_return(&ht->count, @@ -594,7 +597,13 @@ void ht_count_del(struct cds_lfht *ht, unsigned long size) if (!(count & (count - 1))) { if ((count >> CHAIN_LEN_RESIZE_THRESHOLD) >= size) return; - dbg_printf("del set global %lu\n", count); + dbg_printf("del set global %ld\n", count); + /* + * Don't shrink table if the number of nodes is below a + * certain threshold. + */ + if (count < (1UL << COUNT_COMMIT_ORDER) * (nr_cpus_mask + 1)) + return; cds_lfht_resize_lazy_count(ht, size, count >> (CHAIN_LEN_TARGET - 1)); } @@ -603,7 +612,7 @@ void ht_count_del(struct cds_lfht *ht, unsigned long size) #else /* #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF) */ -static const long nr_cpus_mask = -1; +static const long nr_cpus_mask = -2; static struct ht_items_count *alloc_per_cpu_items_count(void) @@ -708,11 +717,24 @@ unsigned long _uatomic_max(unsigned long *ptr, unsigned long v) } static -void cds_lfht_free_level(struct rcu_head *head) +struct _cds_lfht_node *lookup_bucket(struct cds_lfht *ht, unsigned long size, + unsigned long hash) { - struct rcu_level *l = - caa_container_of(head, struct rcu_level, head); - poison_free(l); + unsigned long index, order; + + assert(size > 0); + index = hash & (size - 1); + /* + * equivalent to get_count_order_ulong(index + 1), but optimizes + * away the non-existing 0 special-case for + * get_count_order_ulong. + */ + order = fls_ulong(index); + + dbg_printf("lookup hash %lu index %lu order %lu aridx %lu\n", + hash, index, order, index & (!order ? 0 : ((1UL << (order - 1)) - 1))); + + return &ht->t.tbl[order]->nodes[index & (!order ? 0 : ((1UL << (order - 1)) - 1))]; } /* @@ -762,6 +784,77 @@ void _cds_lfht_gc_bucket(struct cds_lfht_node *dummy, struct cds_lfht_node *node return; } +static +int _cds_lfht_replace(struct cds_lfht *ht, unsigned long size, + struct cds_lfht_node *old_node, + struct cds_lfht_node *old_next, + struct cds_lfht_node *new_node) +{ + struct cds_lfht_node *dummy, *ret_next; + struct _cds_lfht_node *lookup; + int flagged = 0; + + if (!old_node) /* Return -ENOENT if asked to replace NULL node */ + goto end; + + assert(!is_removed(old_node)); + assert(!is_dummy(old_node)); + assert(!is_removed(new_node)); + assert(!is_dummy(new_node)); + assert(new_node != old_node); + for (;;) { + /* Insert after node to be replaced */ + if (is_removed(old_next)) { + /* + * Too late, the old node has been removed under us + * between lookup and replace. Fail. + */ + goto end; + } + assert(!is_dummy(old_next)); + assert(new_node != clear_flag(old_next)); + new_node->p.next = clear_flag(old_next); + /* + * Here is the whole trick for lock-free replace: we add + * the replacement node _after_ the node we want to + * replace by atomically setting its next pointer at the + * same time we set its removal flag. Given that + * the lookups/get next use an iterator aware of the + * next pointer, they will either skip the old node due + * to the removal flag and see the new node, or use + * the old node, but will not see the new one. + */ + ret_next = uatomic_cmpxchg(&old_node->p.next, + old_next, flag_removed(new_node)); + if (ret_next == old_next) + break; + old_next = ret_next; + } + + /* We performed the replacement. */ + flagged = 1; + + /* + * Ensure that the old node is not visible to readers anymore: + * lookup for the node, and remove it (along with any other + * logically removed node) if found. + */ + lookup = lookup_bucket(ht, size, bit_reverse_ulong(old_node->p.reverse_hash)); + dummy = (struct cds_lfht_node *) lookup; + _cds_lfht_gc_bucket(dummy, new_node); +end: + /* + * Only the flagging action indicated that we (and no other) + * replaced the node from the hash table. + */ + if (flagged) { + assert(is_removed(rcu_dereference(old_node->p.next))); + return 0; + } else { + return -ENOENT; + } +} + static struct cds_lfht_node *_cds_lfht_add(struct cds_lfht *ht, unsigned long size, @@ -769,9 +862,8 @@ struct cds_lfht_node *_cds_lfht_add(struct cds_lfht *ht, enum add_mode mode, int dummy) { struct cds_lfht_node *iter_prev, *iter, *next, *new_node, *new_next, - *dummy_node, *return_node; + *return_node; struct _cds_lfht_node *lookup; - unsigned long hash, index, order; assert(!is_dummy(node)); assert(!is_removed(node)); @@ -780,7 +872,7 @@ struct cds_lfht_node *_cds_lfht_add(struct cds_lfht *ht, node->p.next = flag_dummy(get_end()); return node; /* Initial first add (head) */ } - hash = bit_reverse_ulong(node->p.reverse_hash); + lookup = lookup_bucket(ht, size, bit_reverse_ulong(node->p.reverse_hash)); for (;;) { uint32_t chain_len = 0; @@ -788,9 +880,6 @@ struct cds_lfht_node *_cds_lfht_add(struct cds_lfht *ht, * iter_prev points to the non-removed node prior to the * insert location. */ - index = hash & (size - 1); - order = get_count_order_ulong(index + 1); - lookup = &ht->t.tbl[order]->nodes[index & ((!order ? 0 : (1UL << (order - 1))) - 1)]; iter_prev = (struct cds_lfht_node *) lookup; /* We can always skip the dummy node initially */ iter = rcu_dereference(iter_prev->p.next); @@ -800,11 +889,15 @@ struct cds_lfht_node *_cds_lfht_add(struct cds_lfht *ht, goto insert; if (likely(clear_flag(iter)->p.reverse_hash > node->p.reverse_hash)) goto insert; + /* dummy node is the first node of the identical-hash-value chain */ + if (dummy && clear_flag(iter)->p.reverse_hash == node->p.reverse_hash) + goto insert; next = rcu_dereference(clear_flag(iter)->p.next); if (unlikely(is_removed(next))) goto gc_node; if ((mode == ADD_UNIQUE || mode == ADD_REPLACE) && !is_dummy(next) + && clear_flag(iter)->p.reverse_hash == node->p.reverse_hash && !ht->compare_fct(node->key, node->key_len, clear_flag(iter)->key, clear_flag(iter)->key_len)) { @@ -842,40 +935,17 @@ struct cds_lfht_node *_cds_lfht_add(struct cds_lfht *ht, return_node = NULL; else /* ADD_DEFAULT and ADD_UNIQUE */ return_node = node; - goto gc_end; + goto end; } replace: - /* Insert after node to be replaced */ - iter_prev = clear_flag(iter); - iter = next; - assert(node != clear_flag(iter)); - assert(!is_removed(iter_prev)); - assert(!is_removed(iter)); - assert(iter_prev != node); - assert(!dummy); - node->p.next = clear_flag(iter); - if (is_dummy(iter)) - new_node = flag_dummy(node); - else - new_node = node; - /* - * Here is the whole trick for lock-free replace: we add - * the replacement node _after_ the node we want to - * replace by atomically setting its next pointer at the - * same time we set its removal flag. Given that - * the lookups/get next use an iterator aware of the - * next pointer, they will either skip the old node due - * to the removal flag and see the new node, or use - * the old node, but will not see the new one. - */ - new_node = flag_removed(new_node); - if (uatomic_cmpxchg(&iter_prev->p.next, - iter, new_node) != iter) { - continue; /* retry */ + + if (!_cds_lfht_replace(ht, size, clear_flag(iter), next, + node)) { + return_node = clear_flag(iter); + goto end; /* gc already done */ } else { - return_node = iter_prev; - goto gc_end; + continue; /* retry */ } gc_node: @@ -887,13 +957,7 @@ struct cds_lfht_node *_cds_lfht_add(struct cds_lfht *ht, (void) uatomic_cmpxchg(&iter_prev->p.next, iter, new_next); /* retry */ } -gc_end: - /* Garbage collect logically removed nodes in the bucket */ - index = hash & (size - 1); - order = get_count_order_ulong(index + 1); - lookup = &ht->t.tbl[order]->nodes[index & (!order ? 0 : ((1UL << (order - 1)) - 1))]; - dummy_node = (struct cds_lfht_node *) lookup; - _cds_lfht_gc_bucket(dummy_node, node); +end: return return_node; } @@ -905,7 +969,9 @@ int _cds_lfht_del(struct cds_lfht *ht, unsigned long size, struct cds_lfht_node *dummy, *next, *old; struct _cds_lfht_node *lookup; int flagged = 0; - unsigned long hash, index, order; + + if (!node) /* Return -ENOENT if asked to delete NULL node */ + goto end; /* logically delete the node */ assert(!is_dummy(node)); @@ -933,11 +999,7 @@ int _cds_lfht_del(struct cds_lfht *ht, unsigned long size, * the node, and remove it (along with any other logically removed node) * if found. */ - hash = bit_reverse_ulong(node->p.reverse_hash); - assert(size > 0); - index = hash & (size - 1); - order = get_count_order_ulong(index + 1); - lookup = &ht->t.tbl[order]->nodes[index & (!order ? 0 : ((1UL << (order - 1)) - 1))]; + lookup = lookup_bucket(ht, size, bit_reverse_ulong(node->p.reverse_hash)); dummy = (struct cds_lfht_node *) lookup; _cds_lfht_gc_bucket(dummy, node); end: @@ -948,8 +1010,9 @@ end: if (flagged) { assert(is_removed(rcu_dereference(node->p.next))); return 0; - } else + } else { return -ENOENT; + } } static @@ -973,18 +1036,20 @@ void partition_resize_helper(struct cds_lfht *ht, unsigned long i, struct partition_resize_work *work; int thread, ret; unsigned long nr_threads; - pthread_t *thread_id; /* * Note: nr_cpus_mask + 1 is always power of 2. * We spawn just the number of threads we need to satisfy the minimum * partition size, up to the number of CPUs in the system. */ - nr_threads = min(nr_cpus_mask + 1, - len >> MIN_PARTITION_PER_THREAD_ORDER); + if (nr_cpus_mask > 0) { + nr_threads = min(nr_cpus_mask + 1, + len >> MIN_PARTITION_PER_THREAD_ORDER); + } else { + nr_threads = 1; + } partition_len = len >> get_count_order_ulong(nr_threads); work = calloc(nr_threads, sizeof(*work)); - thread_id = calloc(nr_threads, sizeof(*thread_id)); assert(work); for (thread = 0; thread < nr_threads; thread++) { work[thread].ht = ht; @@ -992,16 +1057,15 @@ void partition_resize_helper(struct cds_lfht *ht, unsigned long i, work[thread].len = partition_len; work[thread].start = thread * partition_len; work[thread].fct = fct; - ret = pthread_create(&thread_id[thread], ht->resize_attr, + ret = pthread_create(&(work[thread].thread_id), ht->resize_attr, partition_resize_thread, &work[thread]); assert(!ret); } for (thread = 0; thread < nr_threads; thread++) { - ret = pthread_join(thread_id[thread], NULL); + ret = pthread_join(work[thread].thread_id, NULL); assert(!ret); } free(work); - free(thread_id); } /* @@ -1032,8 +1096,6 @@ void init_table_populate_partition(struct cds_lfht *ht, unsigned long i, bit_reverse_ulong(!i ? 0 : (1UL << (i - 1)) + j); (void) _cds_lfht_add(ht, !i ? 0 : (1UL << (i - 1)), new_node, ADD_DEFAULT, 1); - if (CMM_LOAD_SHARED(ht->in_progress_destroy)) - break; } ht->cds_lfht_rcu_read_unlock(); } @@ -1071,8 +1133,7 @@ void init_table(struct cds_lfht *ht, if (CMM_LOAD_SHARED(ht->t.resize_target) < (!i ? 1 : (1UL << i))) break; - ht->t.tbl[i] = calloc(1, sizeof(struct rcu_level) - + (len * sizeof(struct _cds_lfht_node))); + ht->t.tbl[i] = calloc(1, len * sizeof(struct _cds_lfht_node)); assert(ht->t.tbl[i]); /* @@ -1135,8 +1196,6 @@ void remove_table_partition(struct cds_lfht *ht, unsigned long i, bit_reverse_ulong(!i ? 0 : (1UL << (i - 1)) + j); (void) _cds_lfht_del(ht, !i ? 0 : (1UL << (i - 1)), fini_node, 1); - if (CMM_LOAD_SHARED(ht->in_progress_destroy)) - break; } ht->cds_lfht_rcu_read_unlock(); } @@ -1160,6 +1219,7 @@ void fini_table(struct cds_lfht *ht, unsigned long first_order, unsigned long len_order) { long i, end_order; + void *free_by_rcu = NULL; dbg_printf("fini table: first_order %lu end_order %lu\n", first_order, first_order + len_order); @@ -1185,6 +1245,8 @@ void fini_table(struct cds_lfht *ht, * return a logically removed node as insert position. */ ht->cds_lfht_synchronize_rcu(); + if (free_by_rcu) + free(free_by_rcu); /* * Set "removed" flag in dummy nodes about to be removed. @@ -1194,12 +1256,17 @@ void fini_table(struct cds_lfht *ht, */ remove_table(ht, i, len); - ht->cds_lfht_call_rcu(&ht->t.tbl[i]->head, cds_lfht_free_level); + free_by_rcu = ht->t.tbl[i]; dbg_printf("fini new size: %lu\n", 1UL << i); if (CMM_LOAD_SHARED(ht->in_progress_destroy)) break; } + + if (free_by_rcu) { + ht->cds_lfht_synchronize_rcu(); + free(free_by_rcu); + } } struct cds_lfht *_cds_lfht_new(cds_lfht_hash_fct hash_fct, @@ -1257,33 +1324,30 @@ void cds_lfht_lookup(struct cds_lfht *ht, void *key, size_t key_len, { struct cds_lfht_node *node, *next, *dummy_node; struct _cds_lfht_node *lookup; - unsigned long hash, reverse_hash, index, order, size; + unsigned long hash, reverse_hash, size; hash = ht->hash_fct(key, key_len, ht->hash_seed); reverse_hash = bit_reverse_ulong(hash); size = rcu_dereference(ht->t.size); - index = hash & (size - 1); - order = get_count_order_ulong(index + 1); - lookup = &ht->t.tbl[order]->nodes[index & (!order ? 0 : ((1UL << (order - 1))) - 1)]; - dbg_printf("lookup hash %lu index %lu order %lu aridx %lu\n", - hash, index, order, index & (!order ? 0 : ((1UL << (order - 1)) - 1))); + lookup = lookup_bucket(ht, size, hash); dummy_node = (struct cds_lfht_node *) lookup; /* We can always skip the dummy node initially */ node = rcu_dereference(dummy_node->p.next); node = clear_flag(node); for (;;) { if (unlikely(is_end(node))) { - node = NULL; + node = next = NULL; break; } if (unlikely(node->p.reverse_hash > reverse_hash)) { - node = NULL; + node = next = NULL; break; } next = rcu_dereference(node->p.next); if (likely(!is_removed(next)) && !is_dummy(next) + && clear_flag(node)->p.reverse_hash == reverse_hash && likely(!ht->compare_fct(node->key, node->key_len, key, key_len))) { break; } @@ -1294,7 +1358,7 @@ void cds_lfht_lookup(struct cds_lfht *ht, void *key, size_t key_len, iter->next = next; } -void cds_lfht_next(struct cds_lfht *ht, struct cds_lfht_iter *iter) +void cds_lfht_next_duplicate(struct cds_lfht *ht, struct cds_lfht_iter *iter) { struct cds_lfht_node *node, *next; unsigned long reverse_hash; @@ -1310,11 +1374,11 @@ void cds_lfht_next(struct cds_lfht *ht, struct cds_lfht_iter *iter) for (;;) { if (unlikely(is_end(node))) { - node = NULL; + node = next = NULL; break; } if (unlikely(node->p.reverse_hash > reverse_hash)) { - node = NULL; + node = next = NULL; break; } next = rcu_dereference(node->p.next); @@ -1330,6 +1394,41 @@ void cds_lfht_next(struct cds_lfht *ht, struct cds_lfht_iter *iter) iter->next = next; } +void cds_lfht_next(struct cds_lfht *ht, struct cds_lfht_iter *iter) +{ + struct cds_lfht_node *node, *next; + + node = clear_flag(iter->next); + for (;;) { + if (unlikely(is_end(node))) { + node = next = NULL; + break; + } + next = rcu_dereference(node->p.next); + if (likely(!is_removed(next)) + && !is_dummy(next)) { + break; + } + node = clear_flag(next); + } + assert(!node || !is_dummy(rcu_dereference(node->p.next))); + iter->node = node; + iter->next = next; +} + +void cds_lfht_first(struct cds_lfht *ht, struct cds_lfht_iter *iter) +{ + struct _cds_lfht_node *lookup; + + /* + * Get next after first dummy node. The first dummy node is the + * first node of the linked list. + */ + lookup = &ht->t.tbl[0]->nodes[0]; + iter->next = lookup->next; + cds_lfht_next(ht, iter); +} + void cds_lfht_add(struct cds_lfht *ht, struct cds_lfht_node *node) { unsigned long hash, size; @@ -1358,7 +1457,7 @@ struct cds_lfht_node *cds_lfht_add_unique(struct cds_lfht *ht, return ret; } -struct cds_lfht_node *cds_lfht_replace(struct cds_lfht *ht, +struct cds_lfht_node *cds_lfht_add_replace(struct cds_lfht *ht, struct cds_lfht_node *node) { unsigned long hash, size; @@ -1374,13 +1473,23 @@ struct cds_lfht_node *cds_lfht_replace(struct cds_lfht *ht, return ret; } -int cds_lfht_del(struct cds_lfht *ht, struct cds_lfht_node *node) +int cds_lfht_replace(struct cds_lfht *ht, struct cds_lfht_iter *old_iter, + struct cds_lfht_node *new_node) +{ + unsigned long size; + + size = rcu_dereference(ht->t.size); + return _cds_lfht_replace(ht, size, old_iter->node, old_iter->next, + new_node); +} + +int cds_lfht_del(struct cds_lfht *ht, struct cds_lfht_iter *iter) { unsigned long size; int ret; size = rcu_dereference(ht->t.size); - ret = _cds_lfht_del(ht, size, node, 0); + ret = _cds_lfht_del(ht, size, iter->node, 0); if (!ret) ht_count_del(ht, size); return ret; @@ -1432,7 +1541,8 @@ int cds_lfht_destroy(struct cds_lfht *ht, pthread_attr_t **attr) int ret; /* Wait for in-flight resize operations to complete */ - CMM_STORE_SHARED(ht->in_progress_destroy, 1); + _CMM_STORE_SHARED(ht->in_progress_destroy, 1); + cmm_smp_mb(); /* Store destroy before load resize */ while (uatomic_read(&ht->in_progress_resize)) poll(NULL, 0, 100); /* wait for 100ms */ ret = cds_lfht_delete_dummy(ht); @@ -1446,16 +1556,16 @@ int cds_lfht_destroy(struct cds_lfht *ht, pthread_attr_t **attr) } void cds_lfht_count_nodes(struct cds_lfht *ht, - unsigned long *approx_before, + long *approx_before, unsigned long *count, unsigned long *removed, - unsigned long *approx_after) + long *approx_after) { struct cds_lfht_node *node, *next; struct _cds_lfht_node *lookup; unsigned long nr_dummy = 0; - *approx_before = uatomic_read(&ht->count); + *approx_before = 0; if (nr_cpus_mask >= 0) { int i; @@ -1485,7 +1595,7 @@ void cds_lfht_count_nodes(struct cds_lfht *ht, node = clear_flag(next); } while (!is_end(node)); dbg_printf("number of dummy nodes: %lu\n", nr_dummy); - *approx_after = uatomic_read(&ht->count); + *approx_after = 0; if (nr_cpus_mask >= 0) { int i; @@ -1505,8 +1615,8 @@ void _do_cds_lfht_grow(struct cds_lfht *ht, old_order = get_count_order_ulong(old_size) + 1; new_order = get_count_order_ulong(new_size) + 1; - printf("resize from %lu (order %lu) to %lu (order %lu) buckets\n", - old_size, old_order, new_size, new_order); + dbg_printf("resize from %lu (order %lu) to %lu (order %lu) buckets\n", + old_size, old_order, new_size, new_order); assert(new_size > old_size); init_table(ht, old_order, new_order - old_order); } @@ -1521,8 +1631,8 @@ void _do_cds_lfht_shrink(struct cds_lfht *ht, new_size = max(new_size, MIN_TABLE_SIZE); old_order = get_count_order_ulong(old_size) + 1; new_order = get_count_order_ulong(new_size) + 1; - printf("resize from %lu (order %lu) to %lu (order %lu) buckets\n", - old_size, old_order, new_size, new_order); + dbg_printf("resize from %lu (order %lu) to %lu (order %lu) buckets\n", + old_size, old_order, new_size, new_order); assert(new_size < old_size); /* Remove and unlink all dummy nodes to remove. */ @@ -1540,6 +1650,9 @@ void _do_cds_lfht_resize(struct cds_lfht *ht) * Resize table, re-do if the target size has changed under us. */ do { + assert(uatomic_read(&ht->in_progress_resize)); + if (CMM_LOAD_SHARED(ht->in_progress_destroy)) + break; ht->t.resize_initiated = 1; old_size = ht->t.size; new_size = CMM_LOAD_SHARED(ht->t.resize_target); @@ -1608,7 +1721,11 @@ void cds_lfht_resize_lazy(struct cds_lfht *ht, unsigned long size, int growth) cmm_smp_mb(); if (!CMM_LOAD_SHARED(ht->t.resize_initiated) && size < target_size) { uatomic_inc(&ht->in_progress_resize); - cmm_smp_mb(); /* increment resize count before calling it */ + cmm_smp_mb(); /* increment resize count before load destroy */ + if (CMM_LOAD_SHARED(ht->in_progress_destroy)) { + uatomic_dec(&ht->in_progress_resize); + return; + } work = malloc(sizeof(*work)); work->ht = ht; ht->cds_lfht_call_rcu(&work->head, do_resize_cb); @@ -1631,7 +1748,11 @@ void cds_lfht_resize_lazy_count(struct cds_lfht *ht, unsigned long size, cmm_smp_mb(); if (!CMM_LOAD_SHARED(ht->t.resize_initiated)) { uatomic_inc(&ht->in_progress_resize); - cmm_smp_mb(); /* increment resize count before calling it */ + cmm_smp_mb(); /* increment resize count before load destroy */ + if (CMM_LOAD_SHARED(ht->in_progress_destroy)) { + uatomic_dec(&ht->in_progress_resize); + return; + } work = malloc(sizeof(*work)); work->ht = ht; ht->cds_lfht_call_rcu(&work->head, do_resize_cb);