rculfhash: remove unused rcu_head in partition_resize_work
[urcu.git] / rculfhash.c
index ae7229273740485367ac9a279e4de5ed17b08aa0..13274189887182f81a2a8ed5336db50a36e13c90 100644 (file)
 #include <urcu-call-rcu.h>
 #include <urcu/arch.h>
 #include <urcu/uatomic.h>
-#include <urcu/jhash.h>
 #include <urcu/compiler.h>
 #include <urcu/rculfhash.h>
 #include <stdio.h>
@@ -256,7 +255,6 @@ struct rcu_resize_work {
 };
 
 struct partition_resize_work {
-       struct rcu_head head;
        struct cds_lfht *ht;
        unsigned long i, start, len;
        void (*fct)(struct cds_lfht *ht, unsigned long i,
@@ -609,7 +607,7 @@ void ht_count_del(struct cds_lfht *ht, unsigned long size)
 
 #else /* #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF) */
 
-static const long nr_cpus_mask = -1;
+static const long nr_cpus_mask = -2;
 
 static
 struct ht_items_count *alloc_per_cpu_items_count(void)
@@ -1042,8 +1040,12 @@ void partition_resize_helper(struct cds_lfht *ht, unsigned long i,
         * We spawn just the number of threads we need to satisfy the minimum
         * partition size, up to the number of CPUs in the system.
         */
-       nr_threads = min(nr_cpus_mask + 1,
-                        len >> MIN_PARTITION_PER_THREAD_ORDER);
+       if (nr_cpus_mask > 0) {
+               nr_threads = min(nr_cpus_mask + 1,
+                                len >> MIN_PARTITION_PER_THREAD_ORDER);
+       } else {
+               nr_threads = 1;
+       }
        partition_len = len >> get_count_order_ulong(nr_threads);
        work = calloc(nr_threads, sizeof(*work));
        thread_id = calloc(nr_threads, sizeof(*thread_id));
@@ -1094,8 +1096,6 @@ void init_table_populate_partition(struct cds_lfht *ht, unsigned long i,
                        bit_reverse_ulong(!i ? 0 : (1UL << (i - 1)) + j);
                (void) _cds_lfht_add(ht, !i ? 0 : (1UL << (i - 1)),
                                new_node, ADD_DEFAULT, 1);
-               if (CMM_LOAD_SHARED(ht->in_progress_destroy))
-                       break;
        }
        ht->cds_lfht_rcu_read_unlock();
 }
@@ -1197,8 +1197,6 @@ void remove_table_partition(struct cds_lfht *ht, unsigned long i,
                        bit_reverse_ulong(!i ? 0 : (1UL << (i - 1)) + j);
                (void) _cds_lfht_del(ht, !i ? 0 : (1UL << (i - 1)),
                                fini_node, 1);
-               if (CMM_LOAD_SHARED(ht->in_progress_destroy))
-                       break;
        }
        ht->cds_lfht_rcu_read_unlock();
 }
@@ -1356,7 +1354,7 @@ void cds_lfht_lookup(struct cds_lfht *ht, void *key, size_t key_len,
        iter->next = next;
 }
 
-void cds_lfht_next(struct cds_lfht *ht, struct cds_lfht_iter *iter)
+void cds_lfht_next_duplicate(struct cds_lfht *ht, struct cds_lfht_iter *iter)
 {
        struct cds_lfht_node *node, *next;
        unsigned long reverse_hash;
@@ -1392,6 +1390,41 @@ void cds_lfht_next(struct cds_lfht *ht, struct cds_lfht_iter *iter)
        iter->next = next;
 }
 
+void cds_lfht_next(struct cds_lfht *ht, struct cds_lfht_iter *iter)
+{
+       struct cds_lfht_node *node, *next;
+
+       node = clear_flag(iter->next);
+       for (;;) {
+               if (unlikely(is_end(node))) {
+                       node = next = NULL;
+                       break;
+               }
+               next = rcu_dereference(node->p.next);
+               if (likely(!is_removed(next))
+                   && !is_dummy(next)) {
+                               break;
+               }
+               node = clear_flag(next);
+       }
+       assert(!node || !is_dummy(rcu_dereference(node->p.next)));
+       iter->node = node;
+       iter->next = next;
+}
+
+void cds_lfht_first(struct cds_lfht *ht, struct cds_lfht_iter *iter)
+{
+       struct _cds_lfht_node *lookup;
+
+       /*
+        * Get next after first dummy node. The first dummy node is the
+        * first node of the linked list.
+        */
+       lookup = &ht->t.tbl[0]->nodes[0];
+       iter->next = lookup->next;
+       cds_lfht_next(ht, iter);
+}
+
 void cds_lfht_add(struct cds_lfht *ht, struct cds_lfht_node *node)
 {
        unsigned long hash, size;
@@ -1504,7 +1537,8 @@ int cds_lfht_destroy(struct cds_lfht *ht, pthread_attr_t **attr)
        int ret;
 
        /* Wait for in-flight resize operations to complete */
-       CMM_STORE_SHARED(ht->in_progress_destroy, 1);
+       _CMM_STORE_SHARED(ht->in_progress_destroy, 1);
+       cmm_smp_mb();   /* Store destroy before load resize */
        while (uatomic_read(&ht->in_progress_resize))
                poll(NULL, 0, 100);     /* wait for 100ms */
        ret = cds_lfht_delete_dummy(ht);
@@ -1612,6 +1646,9 @@ void _do_cds_lfht_resize(struct cds_lfht *ht)
         * Resize table, re-do if the target size has changed under us.
         */
        do {
+               assert(uatomic_read(&ht->in_progress_resize));
+               if (CMM_LOAD_SHARED(ht->in_progress_destroy))
+                       break;
                ht->t.resize_initiated = 1;
                old_size = ht->t.size;
                new_size = CMM_LOAD_SHARED(ht->t.resize_target);
@@ -1680,7 +1717,11 @@ void cds_lfht_resize_lazy(struct cds_lfht *ht, unsigned long size, int growth)
        cmm_smp_mb();
        if (!CMM_LOAD_SHARED(ht->t.resize_initiated) && size < target_size) {
                uatomic_inc(&ht->in_progress_resize);
-               cmm_smp_mb();   /* increment resize count before calling it */
+               cmm_smp_mb();   /* increment resize count before load destroy */
+               if (CMM_LOAD_SHARED(ht->in_progress_destroy)) {
+                       uatomic_dec(&ht->in_progress_resize);
+                       return;
+               }
                work = malloc(sizeof(*work));
                work->ht = ht;
                ht->cds_lfht_call_rcu(&work->head, do_resize_cb);
@@ -1703,7 +1744,11 @@ void cds_lfht_resize_lazy_count(struct cds_lfht *ht, unsigned long size,
        cmm_smp_mb();
        if (!CMM_LOAD_SHARED(ht->t.resize_initiated)) {
                uatomic_inc(&ht->in_progress_resize);
-               cmm_smp_mb();   /* increment resize count before calling it */
+               cmm_smp_mb();   /* increment resize count before load destroy */
+               if (CMM_LOAD_SHARED(ht->in_progress_destroy)) {
+                       uatomic_dec(&ht->in_progress_resize);
+                       return;
+               }
                work = malloc(sizeof(*work));
                work->ht = ht;
                ht->cds_lfht_call_rcu(&work->head, do_resize_cb);
This page took 0.024811 seconds and 4 git commands to generate.