rculfhash: Simplify default logic
[urcu.git] / rculfhash.c
index 49c7863a78a0d1b965dd9023451f795eb55d282e..d04451fece7ac744cb0b894b1f2b5e3803b9785f 100644 (file)
@@ -242,15 +242,6 @@ struct partition_resize_work {
                    unsigned long start, unsigned long len);
 };
 
-static
-void _cds_lfht_add(struct cds_lfht *ht,
-               cds_lfht_match_fct match,
-               const void *key,
-               unsigned long size,
-               struct cds_lfht_node *node,
-               struct cds_lfht_iter *unique_ret,
-               int bucket);
-
 /*
  * Algorithm to reverse bits in a word by lookup table, extended to
  * 64-bit words.
@@ -536,26 +527,28 @@ void ht_count_add(struct cds_lfht *ht, unsigned long size, unsigned long hash)
 {
        unsigned long split_count;
        int index;
+       long count;
 
        if (caa_unlikely(!ht->split_count))
                return;
        index = ht_get_split_count_index(hash);
        split_count = uatomic_add_return(&ht->split_count[index].add, 1);
-       if (caa_unlikely(!(split_count & ((1UL << COUNT_COMMIT_ORDER) - 1)))) {
-               long count;
-
-               dbg_printf("add split count %lu\n", split_count);
-               count = uatomic_add_return(&ht->count,
-                                          1UL << COUNT_COMMIT_ORDER);
-               /* If power of 2 */
-               if (!(count & (count - 1))) {
-                       if ((count >> CHAIN_LEN_RESIZE_THRESHOLD) < size)
-                               return;
-                       dbg_printf("add set global %ld\n", count);
-                       cds_lfht_resize_lazy_count(ht, size,
-                               count >> (CHAIN_LEN_TARGET - 1));
-               }
-       }
+       if (caa_likely(split_count & ((1UL << COUNT_COMMIT_ORDER) - 1)))
+               return;
+       /* Only if number of add multiple of 1UL << COUNT_COMMIT_ORDER */
+
+       dbg_printf("add split count %lu\n", split_count);
+       count = uatomic_add_return(&ht->count,
+                                  1UL << COUNT_COMMIT_ORDER);
+       if (caa_likely(count & (count - 1)))
+               return;
+       /* Only if global count is power of 2 */
+
+       if ((count >> CHAIN_LEN_RESIZE_THRESHOLD) < size)
+               return;
+       dbg_printf("add set global %ld\n", count);
+       cds_lfht_resize_lazy_count(ht, size,
+               count >> (CHAIN_LEN_TARGET - 1));
 }
 
 static
@@ -563,32 +556,34 @@ void ht_count_del(struct cds_lfht *ht, unsigned long size, unsigned long hash)
 {
        unsigned long split_count;
        int index;
+       long count;
 
        if (caa_unlikely(!ht->split_count))
                return;
        index = ht_get_split_count_index(hash);
        split_count = uatomic_add_return(&ht->split_count[index].del, 1);
-       if (caa_unlikely(!(split_count & ((1UL << COUNT_COMMIT_ORDER) - 1)))) {
-               long count;
-
-               dbg_printf("del split count %lu\n", split_count);
-               count = uatomic_add_return(&ht->count,
-                                          -(1UL << COUNT_COMMIT_ORDER));
-               /* If power of 2 */
-               if (!(count & (count - 1))) {
-                       if ((count >> CHAIN_LEN_RESIZE_THRESHOLD) >= size)
-                               return;
-                       dbg_printf("del set global %ld\n", count);
-                       /*
-                        * Don't shrink table if the number of nodes is below a
-                        * certain threshold.
-                        */
-                       if (count < (1UL << COUNT_COMMIT_ORDER) * (split_count_mask + 1))
-                               return;
-                       cds_lfht_resize_lazy_count(ht, size,
-                               count >> (CHAIN_LEN_TARGET - 1));
-               }
-       }
+       if (caa_likely(split_count & ((1UL << COUNT_COMMIT_ORDER) - 1)))
+               return;
+       /* Only if number of deletes multiple of 1UL << COUNT_COMMIT_ORDER */
+
+       dbg_printf("del split count %lu\n", split_count);
+       count = uatomic_add_return(&ht->count,
+                                  -(1UL << COUNT_COMMIT_ORDER));
+       if (caa_likely(count & (count - 1)))
+               return;
+       /* Only if global count is power of 2 */
+
+       if ((count >> CHAIN_LEN_RESIZE_THRESHOLD) >= size)
+               return;
+       dbg_printf("del set global %ld\n", count);
+       /*
+        * Don't shrink table if the number of nodes is below a
+        * certain threshold.
+        */
+       if (count < (1UL << COUNT_COMMIT_ORDER) * (split_count_mask + 1))
+               return;
+       cds_lfht_resize_lazy_count(ht, size,
+               count >> (CHAIN_LEN_TARGET - 1));
 }
 
 static
@@ -1154,6 +1149,11 @@ void remove_table(struct cds_lfht *ht, unsigned long i, unsigned long len)
        partition_resize_helper(ht, i, len, remove_table_partition);
 }
 
+/*
+ * fini_table() is never called for first_order == 0, which is why
+ * free_by_rcu_order == 0 can be used as criterion to know if free must
+ * be called.
+ */
 static
 void fini_table(struct cds_lfht *ht,
                unsigned long first_order, unsigned long last_order)
@@ -1271,6 +1271,30 @@ struct cds_lfht *_cds_lfht_new(unsigned long init_size,
        if (!init_size || (init_size & (init_size - 1)))
                return NULL;
 
+       /*
+        * Memory management plugin default.
+        */
+       if (!mm) {
+               if (CAA_BITS_PER_LONG > 32
+                               && max_nr_buckets
+                               && max_nr_buckets <= (1ULL << 32)) {
+                       /*
+                        * For 64-bit architectures, with max number of
+                        * buckets small enough not to use the entire
+                        * 64-bit memory mapping space (and allowing a
+                        * fair number of hash table instances), use the
+                        * mmap allocator, which is faster than the
+                        * order allocator.
+                        */
+                       mm = &cds_lfht_mm_mmap;
+               } else {
+                       /*
+                        * The fallback is to use the order allocator.
+                        */
+                       mm = &cds_lfht_mm_order;
+               }
+       }
+
        /* max_nr_buckets == 0 for order based mm means infinite */
        if (mm == &cds_lfht_mm_order && !max_nr_buckets)
                max_nr_buckets = 1UL << (MAX_TABLE_ORDER - 1);
This page took 0.024298 seconds and 4 git commands to generate.