X-Git-Url: https://git.liburcu.org/?p=urcu.git;a=blobdiff_plain;f=rculfhash.c;h=4080e8e54b9ee87a6cbccb93fbea49cd0901b3b3;hp=ee4bea24c11340e5253612c51fad50c891020622;hb=3a22f1dd9300d2bbe621e423dfdeae3a35aa36d7;hpb=98808fb1581997fc7c9ff1bd6a486ceb27932be1 diff --git a/rculfhash.c b/rculfhash.c index ee4bea2..4080e8e 100644 --- a/rculfhash.c +++ b/rculfhash.c @@ -156,9 +156,6 @@ #define dbg_printf(fmt, args...) #endif -/* For testing */ -#define POISON_FREE - /* * Per-CPU split-counters lazily update the global counter each 1024 * addition/removal. It automatically keeps track of resize required. @@ -220,6 +217,8 @@ struct cds_lfht { void (*cds_lfht_call_rcu)(struct rcu_head *head, void (*func)(struct rcu_head *head)); void (*cds_lfht_synchronize_rcu)(void); + void (*cds_lfht_rcu_read_lock)(void); + void (*cds_lfht_rcu_read_unlock)(void); unsigned long count; /* global approximate item count */ struct ht_items_count *percpu_count; /* per-cpu item count */ }; @@ -855,6 +854,11 @@ end: return -ENOENT; } +/* + * Holding RCU read lock to protect _cds_lfht_add against memory + * reclaim that could be performed by other call_rcu worker threads (ABA + * problem). + */ static void init_table(struct cds_lfht *ht, struct rcu_table *t, unsigned long first_order, unsigned long len_order) @@ -872,6 +876,7 @@ void init_table(struct cds_lfht *ht, struct rcu_table *t, dbg_printf("init order %lu len: %lu\n", i, len); t->tbl[i] = calloc(1, sizeof(struct rcu_level) + (len * sizeof(struct _cds_lfht_node))); + ht->cds_lfht_rcu_read_lock(); for (j = 0; j < len; j++) { struct cds_lfht_node *new_node = (struct cds_lfht_node *) &t->tbl[i]->nodes[j]; @@ -884,6 +889,7 @@ void init_table(struct cds_lfht *ht, struct rcu_table *t, if (CMM_LOAD_SHARED(ht->in_progress_destroy)) break; } + ht->cds_lfht_rcu_read_unlock(); /* Update table size */ t->size = !i ? 1 : (1UL << i); dbg_printf("init new size: %lu\n", t->size); @@ -894,6 +900,11 @@ void init_table(struct cds_lfht *ht, struct rcu_table *t, t->resize_initiated = 0; } +/* + * Holding RCU read lock to protect _cds_lfht_remove against memory + * reclaim that could be performed by other call_rcu worker threads (ABA + * problem). + */ static void fini_table(struct cds_lfht *ht, struct rcu_table *t, unsigned long first_order, unsigned long len_order) @@ -917,6 +928,7 @@ void fini_table(struct cds_lfht *ht, struct rcu_table *t, */ t->size = 1UL << (i - 1); /* Unlink */ + ht->cds_lfht_rcu_read_lock(); for (j = 0; j < len; j++) { struct cds_lfht_node *fini_node = (struct cds_lfht_node *) &t->tbl[i]->nodes[j]; @@ -929,6 +941,7 @@ void fini_table(struct cds_lfht *ht, struct rcu_table *t, if (CMM_LOAD_SHARED(ht->in_progress_destroy)) break; } + ht->cds_lfht_rcu_read_unlock(); ht->cds_lfht_call_rcu(&t->tbl[i]->head, cds_lfht_free_level); dbg_printf("fini new size: %lu\n", t->size); if (CMM_LOAD_SHARED(ht->in_progress_destroy)) @@ -945,7 +958,9 @@ struct cds_lfht *cds_lfht_new(cds_lfht_hash_fct hash_fct, int flags, void (*cds_lfht_call_rcu)(struct rcu_head *head, void (*func)(struct rcu_head *head)), - void (*cds_lfht_synchronize_rcu)(void)) + void (*cds_lfht_synchronize_rcu)(void), + void (*cds_lfht_rcu_read_lock)(void), + void (*cds_lfht_rcu_read_unlock)(void)) { struct cds_lfht *ht; unsigned long order; @@ -959,6 +974,8 @@ struct cds_lfht *cds_lfht_new(cds_lfht_hash_fct hash_fct, ht->hash_seed = hash_seed; ht->cds_lfht_call_rcu = cds_lfht_call_rcu; ht->cds_lfht_synchronize_rcu = cds_lfht_synchronize_rcu; + ht->cds_lfht_rcu_read_lock = cds_lfht_rcu_read_lock; + ht->cds_lfht_rcu_read_unlock = cds_lfht_rcu_read_unlock; ht->in_progress_resize = 0; ht->percpu_count = alloc_per_cpu_items_count(); /* this mutex should not nest in read-side C.S. */