rculfhash: remove unused rcu_head in partition_resize_work
[urcu.git] / rculfhash.c
index bff26a8d4fe668e2d4330927dcc812384aea039e..13274189887182f81a2a8ed5336db50a36e13c90 100644 (file)
@@ -255,7 +255,6 @@ struct rcu_resize_work {
 };
 
 struct partition_resize_work {
-       struct rcu_head head;
        struct cds_lfht *ht;
        unsigned long i, start, len;
        void (*fct)(struct cds_lfht *ht, unsigned long i,
@@ -608,7 +607,7 @@ void ht_count_del(struct cds_lfht *ht, unsigned long size)
 
 #else /* #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF) */
 
-static const long nr_cpus_mask = -1;
+static const long nr_cpus_mask = -2;
 
 static
 struct ht_items_count *alloc_per_cpu_items_count(void)
@@ -1041,8 +1040,12 @@ void partition_resize_helper(struct cds_lfht *ht, unsigned long i,
         * We spawn just the number of threads we need to satisfy the minimum
         * partition size, up to the number of CPUs in the system.
         */
-       nr_threads = min(nr_cpus_mask + 1,
-                        len >> MIN_PARTITION_PER_THREAD_ORDER);
+       if (nr_cpus_mask > 0) {
+               nr_threads = min(nr_cpus_mask + 1,
+                                len >> MIN_PARTITION_PER_THREAD_ORDER);
+       } else {
+               nr_threads = 1;
+       }
        partition_len = len >> get_count_order_ulong(nr_threads);
        work = calloc(nr_threads, sizeof(*work));
        thread_id = calloc(nr_threads, sizeof(*thread_id));
@@ -1093,8 +1096,6 @@ void init_table_populate_partition(struct cds_lfht *ht, unsigned long i,
                        bit_reverse_ulong(!i ? 0 : (1UL << (i - 1)) + j);
                (void) _cds_lfht_add(ht, !i ? 0 : (1UL << (i - 1)),
                                new_node, ADD_DEFAULT, 1);
-               if (CMM_LOAD_SHARED(ht->in_progress_destroy))
-                       break;
        }
        ht->cds_lfht_rcu_read_unlock();
 }
@@ -1196,8 +1197,6 @@ void remove_table_partition(struct cds_lfht *ht, unsigned long i,
                        bit_reverse_ulong(!i ? 0 : (1UL << (i - 1)) + j);
                (void) _cds_lfht_del(ht, !i ? 0 : (1UL << (i - 1)),
                                fini_node, 1);
-               if (CMM_LOAD_SHARED(ht->in_progress_destroy))
-                       break;
        }
        ht->cds_lfht_rcu_read_unlock();
 }
@@ -1395,10 +1394,7 @@ void cds_lfht_next(struct cds_lfht *ht, struct cds_lfht_iter *iter)
 {
        struct cds_lfht_node *node, *next;
 
-       node = iter->node;
-       next = iter->next;
-       node = clear_flag(next);
-
+       node = clear_flag(iter->next);
        for (;;) {
                if (unlikely(is_end(node))) {
                        node = next = NULL;
@@ -1425,7 +1421,7 @@ void cds_lfht_first(struct cds_lfht *ht, struct cds_lfht_iter *iter)
         * first node of the linked list.
         */
        lookup = &ht->t.tbl[0]->nodes[0];
-       iter->node = (struct cds_lfht_node *) lookup;
+       iter->next = lookup->next;
        cds_lfht_next(ht, iter);
 }
 
@@ -1541,7 +1537,8 @@ int cds_lfht_destroy(struct cds_lfht *ht, pthread_attr_t **attr)
        int ret;
 
        /* Wait for in-flight resize operations to complete */
-       CMM_STORE_SHARED(ht->in_progress_destroy, 1);
+       _CMM_STORE_SHARED(ht->in_progress_destroy, 1);
+       cmm_smp_mb();   /* Store destroy before load resize */
        while (uatomic_read(&ht->in_progress_resize))
                poll(NULL, 0, 100);     /* wait for 100ms */
        ret = cds_lfht_delete_dummy(ht);
@@ -1649,6 +1646,9 @@ void _do_cds_lfht_resize(struct cds_lfht *ht)
         * Resize table, re-do if the target size has changed under us.
         */
        do {
+               assert(uatomic_read(&ht->in_progress_resize));
+               if (CMM_LOAD_SHARED(ht->in_progress_destroy))
+                       break;
                ht->t.resize_initiated = 1;
                old_size = ht->t.size;
                new_size = CMM_LOAD_SHARED(ht->t.resize_target);
@@ -1717,7 +1717,11 @@ void cds_lfht_resize_lazy(struct cds_lfht *ht, unsigned long size, int growth)
        cmm_smp_mb();
        if (!CMM_LOAD_SHARED(ht->t.resize_initiated) && size < target_size) {
                uatomic_inc(&ht->in_progress_resize);
-               cmm_smp_mb();   /* increment resize count before calling it */
+               cmm_smp_mb();   /* increment resize count before load destroy */
+               if (CMM_LOAD_SHARED(ht->in_progress_destroy)) {
+                       uatomic_dec(&ht->in_progress_resize);
+                       return;
+               }
                work = malloc(sizeof(*work));
                work->ht = ht;
                ht->cds_lfht_call_rcu(&work->head, do_resize_cb);
@@ -1740,7 +1744,11 @@ void cds_lfht_resize_lazy_count(struct cds_lfht *ht, unsigned long size,
        cmm_smp_mb();
        if (!CMM_LOAD_SHARED(ht->t.resize_initiated)) {
                uatomic_inc(&ht->in_progress_resize);
-               cmm_smp_mb();   /* increment resize count before calling it */
+               cmm_smp_mb();   /* increment resize count before load destroy */
+               if (CMM_LOAD_SHARED(ht->in_progress_destroy)) {
+                       uatomic_dec(&ht->in_progress_resize);
+                       return;
+               }
                work = malloc(sizeof(*work));
                work->ht = ht;
                ht->cds_lfht_call_rcu(&work->head, do_resize_cb);
This page took 0.023949 seconds and 4 git commands to generate.