X-Git-Url: http://git.liburcu.org/?a=blobdiff_plain;f=rculfhash.c;h=070bcaf7c52acf5687ef3f729a62f5e41572d4e1;hb=24953e081b6cb2e0f4ce1de1c42664ece3c3ce1b;hp=c7a429e6902fa521ff0d72d2d62e6f194878ad66;hpb=4e9b9fbf56ad649395778b7385baaa44d8db032c;p=urcu.git diff --git a/rculfhash.c b/rculfhash.c index c7a429e..070bcaf 100644 --- a/rculfhash.c +++ b/rculfhash.c @@ -139,7 +139,7 @@ #include #include -#include +#include "config.h" #include #include #include @@ -608,7 +608,7 @@ void ht_count_del(struct cds_lfht *ht, unsigned long size) #else /* #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF) */ -static const long nr_cpus_mask = -1; +static const long nr_cpus_mask = -2; static struct ht_items_count *alloc_per_cpu_items_count(void) @@ -1041,8 +1041,12 @@ void partition_resize_helper(struct cds_lfht *ht, unsigned long i, * We spawn just the number of threads we need to satisfy the minimum * partition size, up to the number of CPUs in the system. */ - nr_threads = min(nr_cpus_mask + 1, - len >> MIN_PARTITION_PER_THREAD_ORDER); + if (nr_cpus_mask > 0) { + nr_threads = min(nr_cpus_mask + 1, + len >> MIN_PARTITION_PER_THREAD_ORDER); + } else { + nr_threads = 1; + } partition_len = len >> get_count_order_ulong(nr_threads); work = calloc(nr_threads, sizeof(*work)); thread_id = calloc(nr_threads, sizeof(*thread_id)); @@ -1395,10 +1399,7 @@ void cds_lfht_next(struct cds_lfht *ht, struct cds_lfht_iter *iter) { struct cds_lfht_node *node, *next; - node = iter->node; - next = iter->next; - node = clear_flag(next); - + node = clear_flag(iter->next); for (;;) { if (unlikely(is_end(node))) { node = next = NULL; @@ -1425,7 +1426,7 @@ void cds_lfht_first(struct cds_lfht *ht, struct cds_lfht_iter *iter) * first node of the linked list. */ lookup = &ht->t.tbl[0]->nodes[0]; - iter->node = (struct cds_lfht_node *) lookup; + iter->next = lookup->next; cds_lfht_next(ht, iter); } @@ -1541,7 +1542,8 @@ int cds_lfht_destroy(struct cds_lfht *ht, pthread_attr_t **attr) int ret; /* Wait for in-flight resize operations to complete */ - CMM_STORE_SHARED(ht->in_progress_destroy, 1); + _CMM_STORE_SHARED(ht->in_progress_destroy, 1); + cmm_smp_mb(); /* Store destroy before load resize */ while (uatomic_read(&ht->in_progress_resize)) poll(NULL, 0, 100); /* wait for 100ms */ ret = cds_lfht_delete_dummy(ht); @@ -1649,6 +1651,8 @@ void _do_cds_lfht_resize(struct cds_lfht *ht) * Resize table, re-do if the target size has changed under us. */ do { + if (CMM_LOAD_SHARED(ht->in_progress_destroy)) + break; ht->t.resize_initiated = 1; old_size = ht->t.size; new_size = CMM_LOAD_SHARED(ht->t.resize_target);