X-Git-Url: http://git.liburcu.org/?a=blobdiff_plain;f=rculfhash.c;h=fa2121c9820edaa010856bf96a675a9cd6eace2e;hb=d2be3620f8040e68c0abdfb6ee9f11cc8b0961bc;hp=bff26a8d4fe668e2d4330927dcc812384aea039e;hpb=15cfbec77d2c573110cc936d5b33745d44207b50;p=urcu.git diff --git a/rculfhash.c b/rculfhash.c index bff26a8..fa2121c 100644 --- a/rculfhash.c +++ b/rculfhash.c @@ -608,7 +608,7 @@ void ht_count_del(struct cds_lfht *ht, unsigned long size) #else /* #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF) */ -static const long nr_cpus_mask = -1; +static const long nr_cpus_mask = -2; static struct ht_items_count *alloc_per_cpu_items_count(void) @@ -1041,8 +1041,12 @@ void partition_resize_helper(struct cds_lfht *ht, unsigned long i, * We spawn just the number of threads we need to satisfy the minimum * partition size, up to the number of CPUs in the system. */ - nr_threads = min(nr_cpus_mask + 1, - len >> MIN_PARTITION_PER_THREAD_ORDER); + if (nr_cpus_mask > 0) { + nr_threads = min(nr_cpus_mask + 1, + len >> MIN_PARTITION_PER_THREAD_ORDER); + } else { + nr_threads = 1; + } partition_len = len >> get_count_order_ulong(nr_threads); work = calloc(nr_threads, sizeof(*work)); thread_id = calloc(nr_threads, sizeof(*thread_id)); @@ -1395,10 +1399,7 @@ void cds_lfht_next(struct cds_lfht *ht, struct cds_lfht_iter *iter) { struct cds_lfht_node *node, *next; - node = iter->node; - next = iter->next; - node = clear_flag(next); - + node = clear_flag(iter->next); for (;;) { if (unlikely(is_end(node))) { node = next = NULL; @@ -1425,7 +1426,7 @@ void cds_lfht_first(struct cds_lfht *ht, struct cds_lfht_iter *iter) * first node of the linked list. */ lookup = &ht->t.tbl[0]->nodes[0]; - iter->node = (struct cds_lfht_node *) lookup; + iter->next = lookup->next; cds_lfht_next(ht, iter); } @@ -1541,7 +1542,8 @@ int cds_lfht_destroy(struct cds_lfht *ht, pthread_attr_t **attr) int ret; /* Wait for in-flight resize operations to complete */ - CMM_STORE_SHARED(ht->in_progress_destroy, 1); + _CMM_STORE_SHARED(ht->in_progress_destroy, 1); + cmm_smp_mb(); /* Store destroy before load resize */ while (uatomic_read(&ht->in_progress_resize)) poll(NULL, 0, 100); /* wait for 100ms */ ret = cds_lfht_delete_dummy(ht); @@ -1649,6 +1651,9 @@ void _do_cds_lfht_resize(struct cds_lfht *ht) * Resize table, re-do if the target size has changed under us. */ do { + assert(uatomic_read(&ht->in_progress_resize)); + if (CMM_LOAD_SHARED(ht->in_progress_destroy)) + break; ht->t.resize_initiated = 1; old_size = ht->t.size; new_size = CMM_LOAD_SHARED(ht->t.resize_target); @@ -1717,7 +1722,11 @@ void cds_lfht_resize_lazy(struct cds_lfht *ht, unsigned long size, int growth) cmm_smp_mb(); if (!CMM_LOAD_SHARED(ht->t.resize_initiated) && size < target_size) { uatomic_inc(&ht->in_progress_resize); - cmm_smp_mb(); /* increment resize count before calling it */ + cmm_smp_mb(); /* increment resize count before load destroy */ + if (CMM_LOAD_SHARED(ht->in_progress_destroy)) { + uatomic_dec(&ht->in_progress_resize); + return; + } work = malloc(sizeof(*work)); work->ht = ht; ht->cds_lfht_call_rcu(&work->head, do_resize_cb); @@ -1740,7 +1749,11 @@ void cds_lfht_resize_lazy_count(struct cds_lfht *ht, unsigned long size, cmm_smp_mb(); if (!CMM_LOAD_SHARED(ht->t.resize_initiated)) { uatomic_inc(&ht->in_progress_resize); - cmm_smp_mb(); /* increment resize count before calling it */ + cmm_smp_mb(); /* increment resize count before load destroy */ + if (CMM_LOAD_SHARED(ht->in_progress_destroy)) { + uatomic_dec(&ht->in_progress_resize); + return; + } work = malloc(sizeof(*work)); work->ht = ht; ht->cds_lfht_call_rcu(&work->head, do_resize_cb);