X-Git-Url: https://git.liburcu.org/?a=blobdiff_plain;f=rculfhash.c;h=8dd5a7327ad81be4da1b3dddace433b4fd8aadc4;hb=e8de508ea8e9baf08e5e5ec5b4d04cf947dfd529;hp=f95ef5dc083aa4e22618512708beb37cee22e1e3;hpb=5f51139190536f948f9571fdf3c97cf0198356d0;p=urcu.git diff --git a/rculfhash.c b/rculfhash.c index f95ef5d..8dd5a73 100644 --- a/rculfhash.c +++ b/rculfhash.c @@ -752,10 +752,8 @@ struct cds_lfht_node *_cds_lfht_add(struct cds_lfht *ht, iter = rcu_dereference(iter_prev->p.next); assert(iter_prev->p.reverse_hash <= node->p.reverse_hash); for (;;) { - /* TODO: check if removed */ if (unlikely(!clear_flag(iter))) goto insert; - /* TODO: check if removed */ if (likely(clear_flag(iter)->p.reverse_hash > node->p.reverse_hash)) goto insert; next = rcu_dereference(clear_flag(iter)->p.next); @@ -883,6 +881,11 @@ void init_table_hash(struct cds_lfht *ht, unsigned long i, } } +/* + * Holding RCU read lock to protect _cds_lfht_add against memory + * reclaim that could be performed by other call_rcu worker threads (ABA + * problem). + */ static void init_table_link(struct cds_lfht *ht, unsigned long i, unsigned long len) { @@ -905,11 +908,6 @@ void init_table_link(struct cds_lfht *ht, unsigned long i, unsigned long len) ht->cds_lfht_rcu_thread_offline(); } -/* - * Holding RCU read lock to protect _cds_lfht_add against memory - * reclaim that could be performed by other call_rcu worker threads (ABA - * problem). - */ static void init_table(struct cds_lfht *ht, unsigned long first_order, unsigned long len_order) @@ -948,6 +946,26 @@ void init_table(struct cds_lfht *ht, } } +/* + * Holding RCU read lock to protect _cds_lfht_remove against memory + * reclaim that could be performed by other call_rcu worker threads (ABA + * problem). + * For a single level, we logically remove and garbage collect each node. + * + * As a design choice, we perform logical removal and garbage collection on a + * node-per-node basis to simplify this algorithm. We also assume keeping good + * cache locality of the operation would overweight possible performance gain + * that could be achieved by batching garbage collection for multiple levels. + * However, this would have to be justified by benchmarks. + * + * Concurrent removal and add operations are helping us perform garbage + * collection of logically removed nodes. We guarantee that all logically + * removed nodes have been garbage-collected (unlinked) before call_rcu is + * invoked to free a hole level of dummy nodes (after a grace period). + * + * Logical removal and garbage collection can therefore be done in batch or on a + * node-per-node basis, as long as the guarantee above holds. + */ static void remove_table(struct cds_lfht *ht, unsigned long i, unsigned long len) { @@ -972,11 +990,6 @@ void remove_table(struct cds_lfht *ht, unsigned long i, unsigned long len) ht->cds_lfht_rcu_thread_offline(); } -/* - * Holding RCU read lock to protect _cds_lfht_remove against memory - * reclaim that could be performed by other call_rcu worker threads (ABA - * problem). - */ static void fini_table(struct cds_lfht *ht, unsigned long first_order, unsigned long len_order)