rculfhash: poison memory before free (for testing)
[urcu.git] / rculfhash.c
index f6c901bb0d4f242ecfa5b8bce0077313bee56f54..ee4bea24c11340e5253612c51fad50c891020622 100644 (file)
 #define dbg_printf(fmt, args...)
 #endif
 
+/* For testing */
+#define POISON_FREE
+
 /*
  * Per-CPU split-counters lazily update the global counter each 1024
  * addition/removal. It automatically keeps track of resize required.
@@ -418,6 +421,16 @@ int get_count_order_ulong(unsigned long x)
        return order;
 }
 
+#ifdef POISON_FREE
+#define poison_free(ptr)                               \
+       do {                                            \
+               memset(ptr, 0x42, sizeof(*(ptr)));      \
+               free(ptr);                              \
+       } while (0)
+#else
+#define poison_free(ptr)       free(ptr)
+#endif
+
 static
 void cds_lfht_resize_lazy(struct cds_lfht *ht, struct rcu_table *t, int growth);
 
@@ -468,7 +481,7 @@ struct ht_items_count *alloc_per_cpu_items_count(void)
 static
 void free_per_cpu_items_count(struct ht_items_count *count)
 {
-       free(count);
+       poison_free(count);
 }
 
 static
@@ -644,7 +657,7 @@ void cds_lfht_free_table_cb(struct rcu_head *head)
 {
        struct rcu_table *t =
                caa_container_of(head, struct rcu_table, head);
-       free(t);
+       poison_free(t);
 }
 
 static
@@ -652,7 +665,7 @@ void cds_lfht_free_level(struct rcu_head *head)
 {
        struct rcu_level *l =
                caa_container_of(head, struct rcu_level, head);
-       free(l);
+       poison_free(l);
 }
 
 /*
@@ -672,6 +685,13 @@ void _cds_lfht_gc_bucket(struct cds_lfht_node *dummy, struct cds_lfht_node *node
                /* We can always skip the dummy node initially */
                iter = rcu_dereference(iter_prev->p.next);
                assert(iter_prev->p.reverse_hash <= node->p.reverse_hash);
+               /*
+                * We should never be called with dummy (start of chain)
+                * and logically removed node (end of path compression
+                * marker) being the actual same node. This would be a
+                * bug in the algorithm implementation.
+                */
+               assert(dummy != node);
                for (;;) {
                        if (unlikely(!clear_flag(iter)))
                                return;
@@ -895,7 +915,7 @@ void fini_table(struct cds_lfht *ht, struct rcu_table *t,
                 * removal so gc lookups use non-logically-removed dummy
                 * nodes.
                 */
-               t->size = 1UL << (i - 2);
+               t->size = 1UL << (i - 1);
                /* Unlink */
                for (j = 0; j < len; j++) {
                        struct cds_lfht_node *fini_node =
@@ -1094,7 +1114,7 @@ int cds_lfht_delete_dummy(struct cds_lfht *ht)
                                bit_reverse_ulong(t->tbl[order]->nodes[i].reverse_hash));
                        assert(is_dummy(t->tbl[order]->nodes[i].next));
                }
-               free(t->tbl[order]);
+               poison_free(t->tbl[order]);
        }
        return 0;
 }
@@ -1114,9 +1134,9 @@ int cds_lfht_destroy(struct cds_lfht *ht)
        ret = cds_lfht_delete_dummy(ht);
        if (ret)
                return ret;
-       free(ht->t);
+       poison_free(ht->t);
        free_per_cpu_items_count(ht->percpu_count);
-       free(ht);
+       poison_free(ht);
        return ret;
 }
 
@@ -1268,7 +1288,7 @@ void do_resize_cb(struct rcu_head *head)
        pthread_mutex_lock(&ht->resize_mutex);
        _do_cds_lfht_resize(ht);
        pthread_mutex_unlock(&ht->resize_mutex);
-       free(work);
+       poison_free(work);
        cmm_smp_mb();   /* finish resize before decrement */
        uatomic_dec(&ht->in_progress_resize);
 }
This page took 0.023423 seconds and 4 git commands to generate.