rculfhash,test: add memory_backend argument
[urcu.git] / rculfhash.c
index 49c7863a78a0d1b965dd9023451f795eb55d282e..6c648f17da8a939d22894067eb1cc8c24620a8e0 100644 (file)
@@ -242,15 +242,6 @@ struct partition_resize_work {
                    unsigned long start, unsigned long len);
 };
 
-static
-void _cds_lfht_add(struct cds_lfht *ht,
-               cds_lfht_match_fct match,
-               const void *key,
-               unsigned long size,
-               struct cds_lfht_node *node,
-               struct cds_lfht_iter *unique_ret,
-               int bucket);
-
 /*
  * Algorithm to reverse bits in a word by lookup table, extended to
  * 64-bit words.
@@ -536,26 +527,28 @@ void ht_count_add(struct cds_lfht *ht, unsigned long size, unsigned long hash)
 {
        unsigned long split_count;
        int index;
+       long count;
 
        if (caa_unlikely(!ht->split_count))
                return;
        index = ht_get_split_count_index(hash);
        split_count = uatomic_add_return(&ht->split_count[index].add, 1);
-       if (caa_unlikely(!(split_count & ((1UL << COUNT_COMMIT_ORDER) - 1)))) {
-               long count;
-
-               dbg_printf("add split count %lu\n", split_count);
-               count = uatomic_add_return(&ht->count,
-                                          1UL << COUNT_COMMIT_ORDER);
-               /* If power of 2 */
-               if (!(count & (count - 1))) {
-                       if ((count >> CHAIN_LEN_RESIZE_THRESHOLD) < size)
-                               return;
-                       dbg_printf("add set global %ld\n", count);
-                       cds_lfht_resize_lazy_count(ht, size,
-                               count >> (CHAIN_LEN_TARGET - 1));
-               }
-       }
+       if (caa_likely(split_count & ((1UL << COUNT_COMMIT_ORDER) - 1)))
+               return;
+       /* Only if number of add multiple of 1UL << COUNT_COMMIT_ORDER */
+
+       dbg_printf("add split count %lu\n", split_count);
+       count = uatomic_add_return(&ht->count,
+                                  1UL << COUNT_COMMIT_ORDER);
+       if (caa_likely(count & (count - 1)))
+               return;
+       /* Only if global count is power of 2 */
+
+       if ((count >> CHAIN_LEN_RESIZE_THRESHOLD) < size)
+               return;
+       dbg_printf("add set global %ld\n", count);
+       cds_lfht_resize_lazy_count(ht, size,
+               count >> (CHAIN_LEN_TARGET - 1));
 }
 
 static
@@ -563,32 +556,34 @@ void ht_count_del(struct cds_lfht *ht, unsigned long size, unsigned long hash)
 {
        unsigned long split_count;
        int index;
+       long count;
 
        if (caa_unlikely(!ht->split_count))
                return;
        index = ht_get_split_count_index(hash);
        split_count = uatomic_add_return(&ht->split_count[index].del, 1);
-       if (caa_unlikely(!(split_count & ((1UL << COUNT_COMMIT_ORDER) - 1)))) {
-               long count;
-
-               dbg_printf("del split count %lu\n", split_count);
-               count = uatomic_add_return(&ht->count,
-                                          -(1UL << COUNT_COMMIT_ORDER));
-               /* If power of 2 */
-               if (!(count & (count - 1))) {
-                       if ((count >> CHAIN_LEN_RESIZE_THRESHOLD) >= size)
-                               return;
-                       dbg_printf("del set global %ld\n", count);
-                       /*
-                        * Don't shrink table if the number of nodes is below a
-                        * certain threshold.
-                        */
-                       if (count < (1UL << COUNT_COMMIT_ORDER) * (split_count_mask + 1))
-                               return;
-                       cds_lfht_resize_lazy_count(ht, size,
-                               count >> (CHAIN_LEN_TARGET - 1));
-               }
-       }
+       if (caa_likely(split_count & ((1UL << COUNT_COMMIT_ORDER) - 1)))
+               return;
+       /* Only if number of deletes multiple of 1UL << COUNT_COMMIT_ORDER */
+
+       dbg_printf("del split count %lu\n", split_count);
+       count = uatomic_add_return(&ht->count,
+                                  -(1UL << COUNT_COMMIT_ORDER));
+       if (caa_likely(count & (count - 1)))
+               return;
+       /* Only if global count is power of 2 */
+
+       if ((count >> CHAIN_LEN_RESIZE_THRESHOLD) >= size)
+               return;
+       dbg_printf("del set global %ld\n", count);
+       /*
+        * Don't shrink table if the number of nodes is below a
+        * certain threshold.
+        */
+       if (count < (1UL << COUNT_COMMIT_ORDER) * (split_count_mask + 1))
+               return;
+       cds_lfht_resize_lazy_count(ht, size,
+               count >> (CHAIN_LEN_TARGET - 1));
 }
 
 static
@@ -1154,6 +1149,11 @@ void remove_table(struct cds_lfht *ht, unsigned long i, unsigned long len)
        partition_resize_helper(ht, i, len, remove_table_partition);
 }
 
+/*
+ * fini_table() is never called for first_order == 0, which is why
+ * free_by_rcu_order == 0 can be used as criterion to know if free must
+ * be called.
+ */
 static
 void fini_table(struct cds_lfht *ht,
                unsigned long first_order, unsigned long last_order)
This page took 0.024593 seconds and 4 git commands to generate.