rculfhash: handle small and negative table size approximation
[urcu.git] / rculfhash.c
index 93c2d7850fad9da9c333b47f738af08758ed62ed..9a95be260783e66aea715fa3806f0e5f0571e66f 100644 (file)
@@ -556,7 +556,7 @@ void ht_count_add(struct cds_lfht *ht, unsigned long size)
                return;
        percpu_count = uatomic_add_return(&ht->percpu_count[cpu].add, 1);
        if (unlikely(!(percpu_count & ((1UL << COUNT_COMMIT_ORDER) - 1)))) {
-               unsigned long count;
+               long count;
 
                dbg_printf("add percpu %lu\n", percpu_count);
                count = uatomic_add_return(&ht->count,
@@ -565,7 +565,13 @@ void ht_count_add(struct cds_lfht *ht, unsigned long size)
                if (!(count & (count - 1))) {
                        if ((count >> CHAIN_LEN_RESIZE_THRESHOLD) < size)
                                return;
-                       dbg_printf("add set global %lu\n", count);
+                       dbg_printf("add set global %ld\n", count);
+                       /*
+                        * Don't resize table if the number of nodes is below a
+                        * certain threshold.
+                        */
+                       if (count < (1UL << COUNT_COMMIT_ORDER))
+                               return;
                        cds_lfht_resize_lazy_count(ht, size,
                                count >> (CHAIN_LEN_TARGET - 1));
                }
@@ -585,7 +591,7 @@ void ht_count_del(struct cds_lfht *ht, unsigned long size)
                return;
        percpu_count = uatomic_add_return(&ht->percpu_count[cpu].del, 1);
        if (unlikely(!(percpu_count & ((1UL << COUNT_COMMIT_ORDER) - 1)))) {
-               unsigned long count;
+               long count;
 
                dbg_printf("del percpu %lu\n", percpu_count);
                count = uatomic_add_return(&ht->count,
@@ -594,7 +600,13 @@ void ht_count_del(struct cds_lfht *ht, unsigned long size)
                if (!(count & (count - 1))) {
                        if ((count >> CHAIN_LEN_RESIZE_THRESHOLD) >= size)
                                return;
-                       dbg_printf("del set global %lu\n", count);
+                       dbg_printf("del set global %ld\n", count);
+                       /*
+                        * Don't resize table if the number of nodes is below a
+                        * certain threshold.
+                        */
+                       if (count < (1UL << COUNT_COMMIT_ORDER))
+                               return;
                        cds_lfht_resize_lazy_count(ht, size,
                                count >> (CHAIN_LEN_TARGET - 1));
                }
@@ -1455,7 +1467,7 @@ void cds_lfht_count_nodes(struct cds_lfht *ht,
        struct _cds_lfht_node *lookup;
        unsigned long nr_dummy = 0;
 
-       *approx_before = uatomic_read(&ht->count);
+       *approx_before = 0;
        if (nr_cpus_mask >= 0) {
                int i;
 
@@ -1485,7 +1497,7 @@ void cds_lfht_count_nodes(struct cds_lfht *ht,
                node = clear_flag(next);
        } while (!is_end(node));
        dbg_printf("number of dummy nodes: %lu\n", nr_dummy);
-       *approx_after = uatomic_read(&ht->count);
+       *approx_after = 0;
        if (nr_cpus_mask >= 0) {
                int i;
 
This page took 0.02448 seconds and 4 git commands to generate.