projects
/
urcu.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
rculfhash: add list iteration tracer in gc
[urcu.git]
/
rculfhash.c
diff --git
a/rculfhash.c
b/rculfhash.c
index f6c901bb0d4f242ecfa5b8bce0077313bee56f54..6a80049ddd1f783a21e117d63437eec5771ba514 100644
(file)
--- a/
rculfhash.c
+++ b/
rculfhash.c
@@
-156,6
+156,9
@@
#define dbg_printf(fmt, args...)
#endif
#define dbg_printf(fmt, args...)
#endif
+/* For testing */
+#define POISON_FREE
+
/*
* Per-CPU split-counters lazily update the global counter each 1024
* addition/removal. It automatically keeps track of resize required.
/*
* Per-CPU split-counters lazily update the global counter each 1024
* addition/removal. It automatically keeps track of resize required.
@@
-418,6
+421,16
@@
int get_count_order_ulong(unsigned long x)
return order;
}
return order;
}
+#ifdef POISON_FREE
+#define poison_free(ptr) \
+ do { \
+ memset(ptr, 0x42, sizeof(*(ptr))); \
+ free(ptr); \
+ } while (0)
+#else
+#define poison_free(ptr) free(ptr)
+#endif
+
static
void cds_lfht_resize_lazy(struct cds_lfht *ht, struct rcu_table *t, int growth);
static
void cds_lfht_resize_lazy(struct cds_lfht *ht, struct rcu_table *t, int growth);
@@
-468,7
+481,7
@@
struct ht_items_count *alloc_per_cpu_items_count(void)
static
void free_per_cpu_items_count(struct ht_items_count *count)
{
static
void free_per_cpu_items_count(struct ht_items_count *count)
{
- free(count);
+
poison_
free(count);
}
static
}
static
@@
-644,7
+657,7
@@
void cds_lfht_free_table_cb(struct rcu_head *head)
{
struct rcu_table *t =
caa_container_of(head, struct rcu_table, head);
{
struct rcu_table *t =
caa_container_of(head, struct rcu_table, head);
- free(t);
+
poison_
free(t);
}
static
}
static
@@
-652,7
+665,7
@@
void cds_lfht_free_level(struct rcu_head *head)
{
struct rcu_level *l =
caa_container_of(head, struct rcu_level, head);
{
struct rcu_level *l =
caa_container_of(head, struct rcu_level, head);
- free(l);
+
poison_
free(l);
}
/*
}
/*
@@
-662,16
+675,28
@@
static
void _cds_lfht_gc_bucket(struct cds_lfht_node *dummy, struct cds_lfht_node *node)
{
struct cds_lfht_node *iter_prev, *iter, *next, *new_next;
void _cds_lfht_gc_bucket(struct cds_lfht_node *dummy, struct cds_lfht_node *node)
{
struct cds_lfht_node *iter_prev, *iter, *next, *new_next;
+ struct cds_lfht_node *iter_trace[64];
+ unsigned long trace_idx = 0;
+ memset(iter_trace, 0, sizeof(iter_trace));
assert(!is_dummy(dummy));
assert(!is_removed(dummy));
assert(!is_dummy(node));
assert(!is_removed(node));
for (;;) {
assert(!is_dummy(dummy));
assert(!is_removed(dummy));
assert(!is_dummy(node));
assert(!is_removed(node));
for (;;) {
+ iter_trace[trace_idx++ & (64 - 1)] = (void *) 0x1;
iter_prev = dummy;
/* We can always skip the dummy node initially */
iter = rcu_dereference(iter_prev->p.next);
iter_prev = dummy;
/* We can always skip the dummy node initially */
iter = rcu_dereference(iter_prev->p.next);
+ iter_trace[trace_idx++ & (64 - 1)] = iter;
assert(iter_prev->p.reverse_hash <= node->p.reverse_hash);
assert(iter_prev->p.reverse_hash <= node->p.reverse_hash);
+ /*
+ * We should never be called with dummy (start of chain)
+ * and logically removed node (end of path compression
+ * marker) being the actual same node. This would be a
+ * bug in the algorithm implementation.
+ */
+ assert(dummy != node);
for (;;) {
if (unlikely(!clear_flag(iter)))
return;
for (;;) {
if (unlikely(!clear_flag(iter)))
return;
@@
-682,6
+707,7
@@
void _cds_lfht_gc_bucket(struct cds_lfht_node *dummy, struct cds_lfht_node *node
break;
iter_prev = clear_flag(iter);
iter = next;
break;
iter_prev = clear_flag(iter);
iter = next;
+ iter_trace[trace_idx++ & (64 - 1)] = iter;
}
assert(!is_removed(iter));
if (is_dummy(iter))
}
assert(!is_removed(iter));
if (is_dummy(iter))
@@
-689,6
+715,7
@@
void _cds_lfht_gc_bucket(struct cds_lfht_node *dummy, struct cds_lfht_node *node
else
new_next = clear_flag(next);
(void) uatomic_cmpxchg(&iter_prev->p.next, iter, new_next);
else
new_next = clear_flag(next);
(void) uatomic_cmpxchg(&iter_prev->p.next, iter, new_next);
+ iter_trace[trace_idx++ & (64 - 1)] = (void *) 0x2;
}
}
}
}
@@
-895,7
+922,7
@@
void fini_table(struct cds_lfht *ht, struct rcu_table *t,
* removal so gc lookups use non-logically-removed dummy
* nodes.
*/
* removal so gc lookups use non-logically-removed dummy
* nodes.
*/
- t->size = 1UL << (i -
2
);
+ t->size = 1UL << (i -
1
);
/* Unlink */
for (j = 0; j < len; j++) {
struct cds_lfht_node *fini_node =
/* Unlink */
for (j = 0; j < len; j++) {
struct cds_lfht_node *fini_node =
@@
-1094,7
+1121,7
@@
int cds_lfht_delete_dummy(struct cds_lfht *ht)
bit_reverse_ulong(t->tbl[order]->nodes[i].reverse_hash));
assert(is_dummy(t->tbl[order]->nodes[i].next));
}
bit_reverse_ulong(t->tbl[order]->nodes[i].reverse_hash));
assert(is_dummy(t->tbl[order]->nodes[i].next));
}
- free(t->tbl[order]);
+
poison_
free(t->tbl[order]);
}
return 0;
}
}
return 0;
}
@@
-1114,9
+1141,9
@@
int cds_lfht_destroy(struct cds_lfht *ht)
ret = cds_lfht_delete_dummy(ht);
if (ret)
return ret;
ret = cds_lfht_delete_dummy(ht);
if (ret)
return ret;
- free(ht->t);
+
poison_
free(ht->t);
free_per_cpu_items_count(ht->percpu_count);
free_per_cpu_items_count(ht->percpu_count);
- free(ht);
+
poison_
free(ht);
return ret;
}
return ret;
}
@@
-1268,7
+1295,7
@@
void do_resize_cb(struct rcu_head *head)
pthread_mutex_lock(&ht->resize_mutex);
_do_cds_lfht_resize(ht);
pthread_mutex_unlock(&ht->resize_mutex);
pthread_mutex_lock(&ht->resize_mutex);
_do_cds_lfht_resize(ht);
pthread_mutex_unlock(&ht->resize_mutex);
- free(work);
+
poison_
free(work);
cmm_smp_mb(); /* finish resize before decrement */
uatomic_dec(&ht->in_progress_resize);
}
cmm_smp_mb(); /* finish resize before decrement */
uatomic_dec(&ht->in_progress_resize);
}
This page took
0.026093 seconds
and
4
git commands to generate.