rcuja: disable node accounting by default
authorMathieu Desnoyers <mathieu.desnoyers@efficios.com>
Thu, 13 Jun 2013 15:35:13 +0000 (11:35 -0400)
committerMathieu Desnoyers <mathieu.desnoyers@efficios.com>
Thu, 13 Jun 2013 15:35:13 +0000 (11:35 -0400)
Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
rcuja/rcuja-internal.h
rcuja/rcuja.c

index 8afe0a436c521b6983dc14e5cc64ad21aacfe60c..18ad288a4a4973c9336093adb62bfd84e55715d1 100644 (file)
@@ -223,6 +223,7 @@ void free_cds_ja_node(struct cds_ja *ja, struct cds_ja_inode *node);
        for (; (pos) != NULL; (pos) = (pos)->next)
 
 //#define DEBUG
+//#define DEBUG_COUNTERS
 
 #ifdef __linux__
 #include <syscall.h>
@@ -248,6 +249,7 @@ static inline pid_t gettid(void)
        fprintf(stderr, "[debug rcuja %lu %s()@%s:%u] " fmt,    \
                (unsigned long) gettid(), __func__,             \
                __FILE__, __LINE__, ## args)
+
 #else
 #define dbg_printf(fmt, args...)                               \
 do {                                                           \
@@ -259,4 +261,18 @@ do {                                                               \
 } while (0)
 #endif
 
+#ifdef DEBUG_COUNTERS
+static inline
+ja_debug_counters(void)
+{
+       return 1;
+}
+#else
+static inline
+ja_debug_counters(void)
+{
+       return 0;
+}
+#endif
+
 #endif /* _URCU_RCUJA_INTERNAL_H */
index a3acfbceb7dffebd2ce358a90094a1b41c962027..3a67ef67497f9d3bc2902d9d1097d73d857ff5c5 100644 (file)
@@ -290,14 +290,15 @@ struct cds_ja_inode *alloc_cds_ja_node(struct cds_ja *ja,
                return NULL;
        }
        memset(p, 0, len);
-       uatomic_inc(&ja->nr_nodes_allocated);
+       if (ja_debug_counters())
+               uatomic_inc(&ja->nr_nodes_allocated);
        return p;
 }
 
 void free_cds_ja_node(struct cds_ja *ja, struct cds_ja_inode *node)
 {
        free(node);
-       if (node)
+       if (ja_debug_counters() && node)
                uatomic_inc(&ja->nr_nodes_freed);
 }
 
@@ -1605,7 +1606,8 @@ skip_copy:
                dbg_printf("Using fallback for %u children, node type index: %u, mode %s\n",
                        new_shadow_node->nr_child, old_type_index, mode == JA_RECOMPACT_ADD_NEXT ? "add_next" :
                                (mode == JA_RECOMPACT_DEL ? "del" : "add_same"));
-               uatomic_inc(&ja->node_fallback_count_distribution[new_shadow_node->nr_child]);
+               if (ja_debug_counters())
+                       uatomic_inc(&ja->node_fallback_count_distribution[new_shadow_node->nr_child]);
        }
 
        /* Return pointer to new recompacted node through old_node_flag_ptr */
@@ -1671,7 +1673,8 @@ fallback_toosmall:
                } else {
                        new_type_index++;
                        dbg_printf("Add fallback to type %d\n", new_type_index);
-                       uatomic_inc(&ja->nr_fallback);
+                       if (ja_debug_counters())
+                               uatomic_inc(&ja->nr_fallback);
                        fallback = 1;
                        goto retry;
                }
@@ -2698,6 +2701,9 @@ int ja_final_checks(struct cds_ja *ja)
        unsigned long na, nf, nr_fallback;
        int ret = 0;
 
+       if (!ja_debug_counters())
+               return 0;
+
        fallback_ratio = (double) uatomic_read(&ja->nr_fallback);
        fallback_ratio /= (double) uatomic_read(&ja->nr_nodes_allocated);
        nr_fallback = uatomic_read(&ja->nr_fallback);
This page took 0.027214 seconds and 4 git commands to generate.