By adding a small slowpath overhead (added synchronize_rcu call in the
last iteration of the resize), we can reduce the amount of wasted memory
for memory allocators that allocate power of two memory areas. This is
achieved by removing the call_rcu head from struct rcu_level.
[ Edit by Mathieu Desnoyers:
- add comment about need to manually update the allocation size of
fields are added to struct rcu_level.
- create a more explanatory title and changelog. ]
Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
} __attribute__((aligned(CAA_CACHE_LINE_SIZE)));
struct rcu_level {
} __attribute__((aligned(CAA_CACHE_LINE_SIZE)));
struct rcu_level {
+ /* Note: manually update allocation length when adding a field */
struct _cds_lfht_node nodes[0];
};
struct _cds_lfht_node nodes[0];
};
-static
-void cds_lfht_free_level(struct rcu_head *head)
-{
- struct rcu_level *l =
- caa_container_of(head, struct rcu_level, head);
- poison_free(l);
-}
-
/*
* Remove all logically deleted nodes from a bucket up to a certain node key.
*/
/*
* Remove all logically deleted nodes from a bucket up to a certain node key.
*/
if (CMM_LOAD_SHARED(ht->t.resize_target) < (!i ? 1 : (1UL << i)))
break;
if (CMM_LOAD_SHARED(ht->t.resize_target) < (!i ? 1 : (1UL << i)))
break;
- ht->t.tbl[i] = calloc(1, sizeof(struct rcu_level)
- + (len * sizeof(struct _cds_lfht_node)));
+ ht->t.tbl[i] = calloc(1, len * sizeof(struct _cds_lfht_node));
unsigned long first_order, unsigned long len_order)
{
long i, end_order;
unsigned long first_order, unsigned long len_order)
{
long i, end_order;
+ void *free_by_rcu = NULL;
dbg_printf("fini table: first_order %lu end_order %lu\n",
first_order, first_order + len_order);
dbg_printf("fini table: first_order %lu end_order %lu\n",
first_order, first_order + len_order);
* return a logically removed node as insert position.
*/
ht->cds_lfht_synchronize_rcu();
* return a logically removed node as insert position.
*/
ht->cds_lfht_synchronize_rcu();
+ if (free_by_rcu)
+ free(free_by_rcu);
/*
* Set "removed" flag in dummy nodes about to be removed.
/*
* Set "removed" flag in dummy nodes about to be removed.
*/
remove_table(ht, i, len);
*/
remove_table(ht, i, len);
- ht->cds_lfht_call_rcu(&ht->t.tbl[i]->head, cds_lfht_free_level);
+ free_by_rcu = ht->t.tbl[i];
dbg_printf("fini new size: %lu\n", 1UL << i);
if (CMM_LOAD_SHARED(ht->in_progress_destroy))
break;
}
dbg_printf("fini new size: %lu\n", 1UL << i);
if (CMM_LOAD_SHARED(ht->in_progress_destroy))
break;
}
+
+ if (free_by_rcu) {
+ ht->cds_lfht_synchronize_rcu();
+ free(free_by_rcu);
+ }
}
struct cds_lfht *_cds_lfht_new(cds_lfht_hash_fct hash_fct,
}
struct cds_lfht *_cds_lfht_new(cds_lfht_hash_fct hash_fct,