projects
/
urcu.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
rculfhash: wait for call_rcu quiescence before destroy
[urcu.git]
/
rculfhash.c
diff --git
a/rculfhash.c
b/rculfhash.c
index 11c0077977f327e5437a13d050f8eceb6e7a6a6b..a3223f37f046021cb6c3c0119e107e2e4dde4fd0 100644
(file)
--- a/
rculfhash.c
+++ b/
rculfhash.c
@@
-67,6
+67,7
@@
struct rcu_ht {
ht_compare_fct compare_fct;
unsigned long hash_seed;
pthread_mutex_t resize_mutex; /* resize mutex: add/del mutex */
ht_compare_fct compare_fct;
unsigned long hash_seed;
pthread_mutex_t resize_mutex; /* resize mutex: add/del mutex */
+ unsigned int in_progress_resize;
void (*ht_call_rcu)(struct rcu_head *head,
void (*func)(struct rcu_head *head));
};
void (*ht_call_rcu)(struct rcu_head *head,
void (*func)(struct rcu_head *head));
};
@@
-234,14
+235,16
@@
void _ht_gc_bucket(struct rcu_ht_node *dummy, struct rcu_ht_node *node)
}
static
}
static
-
int _ht_add(struct rcu_ht *ht, struct rcu_table *t, struct rcu_ht_node *node
,
- int unique)
+
struct rcu_ht_node *_ht_add(struct rcu_ht *ht, struct rcu_table *t
,
+
struct rcu_ht_node *node,
int unique)
{
struct rcu_ht_node *iter_prev, *dummy, *iter, *next;
unsigned long hash;
{
struct rcu_ht_node *iter_prev, *dummy, *iter, *next;
unsigned long hash;
- if (!t->size)
- return 0;
+ if (!t->size) {
+ assert(node->dummy);
+ return node; /* Initial first add (head) */
+ }
hash = bit_reverse_ulong(node->reverse_hash);
for (;;) {
uint32_t chain_len = 0;
hash = bit_reverse_ulong(node->reverse_hash);
for (;;) {
uint32_t chain_len = 0;
@@
-262,6
+265,12
@@
int _ht_add(struct rcu_ht *ht, struct rcu_table *t, struct rcu_ht_node *node,
next = rcu_dereference(clear_flag(iter)->next);
if (is_removed(next))
goto gc_node;
next = rcu_dereference(clear_flag(iter)->next);
if (is_removed(next))
goto gc_node;
+ if (unique
+ && !clear_flag(iter)->dummy
+ && !ht->compare_fct(node->key, node->key_len,
+ clear_flag(iter)->key,
+ clear_flag(iter)->key_len))
+ return clear_flag(iter);
/* Only account for identical reverse hash once */
if (iter_prev->reverse_hash != clear_flag(iter)->reverse_hash)
check_resize(ht, t, ++chain_len);
/* Only account for identical reverse hash once */
if (iter_prev->reverse_hash != clear_flag(iter)->reverse_hash)
check_resize(ht, t, ++chain_len);
@@
-287,7
+296,7
@@
gc_end:
/* Garbage collect logically removed nodes in the bucket */
dummy = rcu_dereference(t->tbl[hash & (t->size - 1)]);
_ht_gc_bucket(dummy, node);
/* Garbage collect logically removed nodes in the bucket */
dummy = rcu_dereference(t->tbl[hash & (t->size - 1)]);
_ht_gc_bucket(dummy, node);
- return
0
;
+ return
node
;
}
static
}
static
@@
-365,6
+374,7
@@
struct rcu_ht *ht_new(ht_hash_fct hash_fct,
ht->compare_fct = compare_fct;
ht->hash_seed = hash_seed;
ht->ht_call_rcu = ht_call_rcu;
ht->compare_fct = compare_fct;
ht->hash_seed = hash_seed;
ht->ht_call_rcu = ht_call_rcu;
+ ht->in_progress_resize = 0;
/* this mutex should not nest in read-side C.S. */
pthread_mutex_init(&ht->resize_mutex, NULL);
ht->t = calloc(1, sizeof(struct rcu_table)
/* this mutex should not nest in read-side C.S. */
pthread_mutex_init(&ht->resize_mutex, NULL);
ht->t = calloc(1, sizeof(struct rcu_table)
@@
-417,7
+427,7
@@
void ht_add(struct rcu_ht *ht, struct rcu_ht_node *node)
(void) _ht_add(ht, t, node, 0);
}
(void) _ht_add(ht, t, node, 0);
}
-
int
ht_add_unique(struct rcu_ht *ht, struct rcu_ht_node *node)
+
struct rcu_ht_node *
ht_add_unique(struct rcu_ht *ht, struct rcu_ht_node *node)
{
struct rcu_table *t;
unsigned long hash;
{
struct rcu_table *t;
unsigned long hash;
@@
-469,6
+479,9
@@
int ht_destroy(struct rcu_ht *ht)
{
int ret;
{
int ret;
+ /* Wait for in-flight resize operations to complete */
+ while (uatomic_read(&ht->in_progress_resize))
+ poll(NULL, 0, 100); /* wait for 100ms */
ret = ht_delete_dummy(ht);
if (ret)
return ret;
ret = ht_delete_dummy(ht);
if (ret)
return ret;
@@
-568,6
+581,8
@@
void do_resize_cb(struct rcu_head *head)
_do_ht_resize(ht);
pthread_mutex_unlock(&ht->resize_mutex);
free(work);
_do_ht_resize(ht);
pthread_mutex_unlock(&ht->resize_mutex);
free(work);
+ cmm_smp_mb(); /* finish resize before decrement */
+ uatomic_dec(&ht->in_progress_resize);
}
static
}
static
@@
-578,6
+593,8
@@
void ht_resize_lazy(struct rcu_ht *ht, struct rcu_table *t, int growth)
target_size = resize_target_update(t, growth);
if (!CMM_LOAD_SHARED(t->resize_initiated) && t->size < target_size) {
target_size = resize_target_update(t, growth);
if (!CMM_LOAD_SHARED(t->resize_initiated) && t->size < target_size) {
+ uatomic_inc(&ht->in_progress_resize);
+ cmm_smp_mb(); /* increment resize count before calling it */
work = malloc(sizeof(*work));
work->ht = ht;
ht->ht_call_rcu(&work->head, do_resize_cb);
work = malloc(sizeof(*work));
work->ht = ht;
ht->ht_call_rcu(&work->head, do_resize_cb);
This page took
0.023808 seconds
and
4
git commands to generate.