summary |
shortlog |
log |
commit | commitdiff |
tree
raw |
patch |
inline | side by side (from parent 1:
51b03c6)
We need to hold the RCU read-side lock in resize to protect against ABA
caused by re-use of nodes after going through the memory allocator
through a call_rcu performed on another CPU.
Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
void (*cds_lfht_call_rcu)(struct rcu_head *head,
void (*func)(struct rcu_head *head));
void (*cds_lfht_synchronize_rcu)(void);
void (*cds_lfht_call_rcu)(struct rcu_head *head,
void (*func)(struct rcu_head *head));
void (*cds_lfht_synchronize_rcu)(void);
+ void (*cds_lfht_rcu_read_lock)(void);
+ void (*cds_lfht_rcu_read_unlock)(void);
unsigned long count; /* global approximate item count */
struct ht_items_count *percpu_count; /* per-cpu item count */
};
unsigned long count; /* global approximate item count */
struct ht_items_count *percpu_count; /* per-cpu item count */
};
+/*
+ * Holding RCU read lock to protect _cds_lfht_add against memory
+ * reclaim that could be performed by other call_rcu worker threads (ABA
+ * problem).
+ */
static
void init_table(struct cds_lfht *ht, struct rcu_table *t,
unsigned long first_order, unsigned long len_order)
static
void init_table(struct cds_lfht *ht, struct rcu_table *t,
unsigned long first_order, unsigned long len_order)
dbg_printf("init order %lu len: %lu\n", i, len);
t->tbl[i] = calloc(1, sizeof(struct rcu_level)
+ (len * sizeof(struct _cds_lfht_node)));
dbg_printf("init order %lu len: %lu\n", i, len);
t->tbl[i] = calloc(1, sizeof(struct rcu_level)
+ (len * sizeof(struct _cds_lfht_node)));
+ ht->cds_lfht_rcu_read_lock();
for (j = 0; j < len; j++) {
struct cds_lfht_node *new_node =
(struct cds_lfht_node *) &t->tbl[i]->nodes[j];
for (j = 0; j < len; j++) {
struct cds_lfht_node *new_node =
(struct cds_lfht_node *) &t->tbl[i]->nodes[j];
if (CMM_LOAD_SHARED(ht->in_progress_destroy))
break;
}
if (CMM_LOAD_SHARED(ht->in_progress_destroy))
break;
}
+ ht->cds_lfht_rcu_read_unlock();
/* Update table size */
t->size = !i ? 1 : (1UL << i);
dbg_printf("init new size: %lu\n", t->size);
/* Update table size */
t->size = !i ? 1 : (1UL << i);
dbg_printf("init new size: %lu\n", t->size);
t->resize_initiated = 0;
}
t->resize_initiated = 0;
}
+/*
+ * Holding RCU read lock to protect _cds_lfht_remove against memory
+ * reclaim that could be performed by other call_rcu worker threads (ABA
+ * problem).
+ */
static
void fini_table(struct cds_lfht *ht, struct rcu_table *t,
unsigned long first_order, unsigned long len_order)
static
void fini_table(struct cds_lfht *ht, struct rcu_table *t,
unsigned long first_order, unsigned long len_order)
*/
t->size = 1UL << (i - 1);
/* Unlink */
*/
t->size = 1UL << (i - 1);
/* Unlink */
+ ht->cds_lfht_rcu_read_lock();
for (j = 0; j < len; j++) {
struct cds_lfht_node *fini_node =
(struct cds_lfht_node *) &t->tbl[i]->nodes[j];
for (j = 0; j < len; j++) {
struct cds_lfht_node *fini_node =
(struct cds_lfht_node *) &t->tbl[i]->nodes[j];
if (CMM_LOAD_SHARED(ht->in_progress_destroy))
break;
}
if (CMM_LOAD_SHARED(ht->in_progress_destroy))
break;
}
+ ht->cds_lfht_rcu_read_unlock();
ht->cds_lfht_call_rcu(&t->tbl[i]->head, cds_lfht_free_level);
dbg_printf("fini new size: %lu\n", t->size);
if (CMM_LOAD_SHARED(ht->in_progress_destroy))
ht->cds_lfht_call_rcu(&t->tbl[i]->head, cds_lfht_free_level);
dbg_printf("fini new size: %lu\n", t->size);
if (CMM_LOAD_SHARED(ht->in_progress_destroy))
int flags,
void (*cds_lfht_call_rcu)(struct rcu_head *head,
void (*func)(struct rcu_head *head)),
int flags,
void (*cds_lfht_call_rcu)(struct rcu_head *head,
void (*func)(struct rcu_head *head)),
- void (*cds_lfht_synchronize_rcu)(void))
+ void (*cds_lfht_synchronize_rcu)(void),
+ void (*cds_lfht_rcu_read_lock)(void),
+ void (*cds_lfht_rcu_read_unlock)(void))
{
struct cds_lfht *ht;
unsigned long order;
{
struct cds_lfht *ht;
unsigned long order;
ht->hash_seed = hash_seed;
ht->cds_lfht_call_rcu = cds_lfht_call_rcu;
ht->cds_lfht_synchronize_rcu = cds_lfht_synchronize_rcu;
ht->hash_seed = hash_seed;
ht->cds_lfht_call_rcu = cds_lfht_call_rcu;
ht->cds_lfht_synchronize_rcu = cds_lfht_synchronize_rcu;
+ ht->cds_lfht_rcu_read_lock = cds_lfht_rcu_read_lock;
+ ht->cds_lfht_rcu_read_unlock = cds_lfht_rcu_read_unlock;
ht->in_progress_resize = 0;
ht->percpu_count = alloc_per_cpu_items_count();
/* this mutex should not nest in read-side C.S. */
ht->in_progress_resize = 0;
ht->percpu_count = alloc_per_cpu_items_count();
/* this mutex should not nest in read-side C.S. */
test_ht = cds_lfht_new(test_hash, test_compare, 0x42UL,
init_hash_size,
opt_auto_resize ? CDS_LFHT_AUTO_RESIZE : 0,
test_ht = cds_lfht_new(test_hash, test_compare, 0x42UL,
init_hash_size,
opt_auto_resize ? CDS_LFHT_AUTO_RESIZE : 0,
- call_rcu, synchronize_rcu);
+ call_rcu, synchronize_rcu, rcu_read_lock,
+ rcu_read_unlock);
ret = populate_hash();
assert(!ret);
err = create_all_cpu_call_rcu_data(0);
ret = populate_hash();
assert(!ret);
err = create_all_cpu_call_rcu_data(0);
int flags,
void (*cds_lfht_call_rcu)(struct rcu_head *head,
void (*func)(struct rcu_head *head)),
int flags,
void (*cds_lfht_call_rcu)(struct rcu_head *head,
void (*func)(struct rcu_head *head)),
- void (*cds_lfht_synchronize_rcu)(void));
+ void (*cds_lfht_synchronize_rcu)(void),
+ void (*cds_lfht_rcu_read_lock)(void),
+ void (*cds_lfht_rcu_read_unlock)(void));
/*
* cds_lfht_destroy - destroy a hash table.
/*
* cds_lfht_destroy - destroy a hash table.