projects
/
userspace-rcu.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
cleanup: use an enum for the error states of nr_cpus_mask
[userspace-rcu.git]
/
src
/
rculfhash.c
diff --git
a/src/rculfhash.c
b/src/rculfhash.c
index 401a76a4ae06f328682549239decae8c7f3fa980..02c7f0f6dc3564fc4f10348a24ea89deee6a8a3d 100644
(file)
--- a/
src/rculfhash.c
+++ b/
src/rculfhash.c
@@
-259,7
+259,6
@@
#include <urcu/uatomic.h>
#include <urcu/compiler.h>
#include <urcu/rculfhash.h>
#include <urcu/uatomic.h>
#include <urcu/compiler.h>
#include <urcu/rculfhash.h>
-#include <urcu/static/urcu-signal-nr.h>
#include <stdio.h>
#include <pthread.h>
#include <signal.h>
#include <stdio.h>
#include <pthread.h>
#include <signal.h>
@@
-348,6
+347,11
@@
struct partition_resize_work {
unsigned long start, unsigned long len);
};
unsigned long start, unsigned long len);
};
+enum nr_cpus_mask_state {
+ NR_CPUS_MASK_INIT_FAILED = -2,
+ NR_CPUS_MASK_UNINITIALIZED = -1,
+};
+
static struct urcu_workqueue *cds_lfht_workqueue;
/*
static struct urcu_workqueue *cds_lfht_workqueue;
/*
@@
-609,9
+613,7
@@
static void mutex_lock(pthread_mutex_t *mutex)
if (ret != EBUSY && ret != EINTR)
urcu_die(ret);
if (CMM_LOAD_SHARED(URCU_TLS(rcu_reader).need_mb)) {
if (ret != EBUSY && ret != EINTR)
urcu_die(ret);
if (CMM_LOAD_SHARED(URCU_TLS(rcu_reader).need_mb)) {
- cmm_smp_mb();
- _CMM_STORE_SHARED(URCU_TLS(rcu_reader).need_mb, 0);
- cmm_smp_mb();
+ uatomic_store(&URCU_TLS(rcu_reader).need_mb, 0, CMM_SEQ_CST);
}
(void) poll(NULL, 0, 10);
}
}
(void) poll(NULL, 0, 10);
}
@@
-627,7
+629,7
@@
static void mutex_unlock(pthread_mutex_t *mutex)
urcu_die(ret);
}
urcu_die(ret);
}
-static long nr_cpus_mask =
-1
;
+static long nr_cpus_mask =
NR_CPUS_MASK_UNINITIALIZED
;
static long split_count_mask = -1;
static int split_count_order = -1;
static long split_count_mask = -1;
static int split_count_order = -1;
@@
-637,7
+639,7
@@
static void ht_init_nr_cpus_mask(void)
maxcpus = get_possible_cpus_array_len();
if (maxcpus <= 0) {
maxcpus = get_possible_cpus_array_len();
if (maxcpus <= 0) {
- nr_cpus_mask =
-2
;
+ nr_cpus_mask =
NR_CPUS_MASK_INIT_FAILED
;
return;
}
/*
return;
}
/*
@@
-651,7
+653,7
@@
static void ht_init_nr_cpus_mask(void)
static
void alloc_split_items_count(struct cds_lfht *ht)
{
static
void alloc_split_items_count(struct cds_lfht *ht)
{
- if (nr_cpus_mask ==
-1
) {
+ if (nr_cpus_mask ==
NR_CPUS_MASK_UNINITIALIZED
) {
ht_init_nr_cpus_mask();
if (nr_cpus_mask < 0)
split_count_mask = DEFAULT_SPLIT_COUNT_MASK;
ht_init_nr_cpus_mask();
if (nr_cpus_mask < 0)
split_count_mask = DEFAULT_SPLIT_COUNT_MASK;
@@
-869,8
+871,10
@@
unsigned long _uatomic_xchg_monotonic_increase(unsigned long *ptr,
old1 = uatomic_read(ptr);
do {
old2 = old1;
old1 = uatomic_read(ptr);
do {
old2 = old1;
- if (old2 >= v)
+ if (old2 >= v) {
+ cmm_smp_mb();
return old2;
return old2;
+ }
} while ((old1 = uatomic_cmpxchg(ptr, old2, v)) != old2);
return old2;
}
} while ((old1 = uatomic_cmpxchg(ptr, old2, v)) != old2);
return old2;
}
@@
-1154,6
+1158,7
@@
int _cds_lfht_del(struct cds_lfht *ht, unsigned long size,
struct cds_lfht_node *node)
{
struct cds_lfht_node *bucket, *next;
struct cds_lfht_node *node)
{
struct cds_lfht_node *bucket, *next;
+ uintptr_t *node_next;
if (!node) /* Return -ENOENT if asked to delete NULL node */
return -ENOENT;
if (!node) /* Return -ENOENT if asked to delete NULL node */
return -ENOENT;
@@
-1176,15
+1181,18
@@
int _cds_lfht_del(struct cds_lfht *ht, unsigned long size,
/*
* The del operation semantic guarantees a full memory barrier
* before the uatomic_or atomic commit of the deletion flag.
/*
* The del operation semantic guarantees a full memory barrier
* before the uatomic_or atomic commit of the deletion flag.
- */
- cmm_smp_mb__before_uatomic_or();
- /*
+ *
* We set the REMOVED_FLAG unconditionally. Note that there may
* be more than one concurrent thread setting this flag.
* Knowing which wins the race will be known after the garbage
* collection phase, stay tuned!
* We set the REMOVED_FLAG unconditionally. Note that there may
* be more than one concurrent thread setting this flag.
* Knowing which wins the race will be known after the garbage
* collection phase, stay tuned!
+ *
+ * NOTE: The node_next variable is present to avoid breaking
+ * strict-aliasing rules.
*/
*/
- uatomic_or(&node->next, REMOVED_FLAG);
+ node_next = (uintptr_t*)&node->next;
+ uatomic_or_mo(node_next, REMOVED_FLAG, CMM_RELEASE);
+
/* We performed the (logical) deletion. */
/*
/* We performed the (logical) deletion. */
/*
@@
-1209,7
+1217,7
@@
int _cds_lfht_del(struct cds_lfht *ht, unsigned long size,
* was already set).
*/
if (!is_removal_owner(uatomic_xchg(&node->next,
* was already set).
*/
if (!is_removal_owner(uatomic_xchg(&node->next,
- flag_removal_owner(
node->next
))))
+ flag_removal_owner(
uatomic_load(&node->next, CMM_RELAXED)
))))
return 0;
else
return -ENOENT;
return 0;
else
return -ENOENT;
@@
-1238,7
+1246,7
@@
void partition_resize_helper(struct cds_lfht *ht, unsigned long i,
unsigned long thread, nr_threads;
sigset_t newmask, oldmask;
unsigned long thread, nr_threads;
sigset_t newmask, oldmask;
- urcu_posix_assert(nr_cpus_mask !=
-1
);
+ urcu_posix_assert(nr_cpus_mask !=
NR_CPUS_MASK_UNINITIALIZED
);
if (nr_cpus_mask < 0 || len < 2 * MIN_PARTITION_PER_THREAD)
goto fallback;
if (nr_cpus_mask < 0 || len < 2 * MIN_PARTITION_PER_THREAD)
goto fallback;
@@
-1375,9
+1383,10
@@
void init_table(struct cds_lfht *ht,
/*
* Update table size.
/*
* Update table size.
+ *
+ * Populate data before RCU size.
*/
*/
- cmm_smp_wmb(); /* populate data before RCU size */
- CMM_STORE_SHARED(ht->size, 1UL << i);
+ uatomic_store(&ht->size, 1UL << i, CMM_RELEASE);
dbg_printf("init new size: %lu\n", 1UL << i);
if (CMM_LOAD_SHARED(ht->in_progress_destroy))
dbg_printf("init new size: %lu\n", 1UL << i);
if (CMM_LOAD_SHARED(ht->in_progress_destroy))
@@
-1422,12
+1431,18
@@
void remove_table_partition(struct cds_lfht *ht, unsigned long i,
for (j = size + start; j < size + start + len; j++) {
struct cds_lfht_node *fini_bucket = bucket_at(ht, j);
struct cds_lfht_node *parent_bucket = bucket_at(ht, j - size);
for (j = size + start; j < size + start + len; j++) {
struct cds_lfht_node *fini_bucket = bucket_at(ht, j);
struct cds_lfht_node *parent_bucket = bucket_at(ht, j - size);
+ uintptr_t *fini_bucket_next;
urcu_posix_assert(j >= size && j < (size << 1));
dbg_printf("remove entry: order %lu index %lu hash %lu\n",
i, j, j);
urcu_posix_assert(j >= size && j < (size << 1));
dbg_printf("remove entry: order %lu index %lu hash %lu\n",
i, j, j);
- /* Set the REMOVED_FLAG to freeze the ->next for gc */
- uatomic_or(&fini_bucket->next, REMOVED_FLAG);
+ /* Set the REMOVED_FLAG to freeze the ->next for gc.
+ *
+ * NOTE: The fini_bucket_next variable is present to
+ * avoid breaking strict-aliasing rules.
+ */
+ fini_bucket_next = (uintptr_t*)&fini_bucket->next;
+ uatomic_or(fini_bucket_next, REMOVED_FLAG);
_cds_lfht_gc_bucket(parent_bucket, fini_bucket);
}
ht->flavor->read_unlock();
_cds_lfht_gc_bucket(parent_bucket, fini_bucket);
}
ht->flavor->read_unlock();
@@
-1653,7
+1668,14
@@
void cds_lfht_lookup(struct cds_lfht *ht, unsigned long hash,
reverse_hash = bit_reverse_ulong(hash);
reverse_hash = bit_reverse_ulong(hash);
- size = rcu_dereference(ht->size);
+ /*
+ * Use load acquire instead of rcu_dereference because there is no
+ * dependency between the table size and the dereference of the bucket
+ * content.
+ *
+ * This acquire is paired with the store release in init_table().
+ */
+ size = uatomic_load(&ht->size, CMM_ACQUIRE);
bucket = lookup_bucket(ht, size, hash);
/* We can always skip the bucket node initially */
node = rcu_dereference(bucket->next);
bucket = lookup_bucket(ht, size, hash);
/* We can always skip the bucket node initially */
node = rcu_dereference(bucket->next);
@@
-1712,7
+1734,7
@@
void cds_lfht_next_duplicate(struct cds_lfht *ht __attribute__((unused)),
}
node = clear_flag(next);
}
}
node = clear_flag(next);
}
- urcu_posix_assert(!node || !is_bucket(
CMM_LOAD_SHARED(node->next
)));
+ urcu_posix_assert(!node || !is_bucket(
uatomic_load(&node->next, CMM_RELAXED
)));
iter->node = node;
iter->next = next;
}
iter->node = node;
iter->next = next;
}
@@
-1736,7
+1758,7
@@
void cds_lfht_next(struct cds_lfht *ht __attribute__((unused)),
}
node = clear_flag(next);
}
}
node = clear_flag(next);
}
- urcu_posix_assert(!node || !is_bucket(
CMM_LOAD_SHARED(node->next
)));
+ urcu_posix_assert(!node || !is_bucket(
uatomic_load(&node->next, CMM_RELAXED
)));
iter->node = node;
iter->next = next;
}
iter->node = node;
iter->next = next;
}
@@
-1748,7
+1770,7
@@
void cds_lfht_first(struct cds_lfht *ht, struct cds_lfht_iter *iter)
* Get next after first bucket node. The first bucket node is the
* first node of the linked list.
*/
* Get next after first bucket node. The first bucket node is the
* first node of the linked list.
*/
- iter->next =
bucket_at(ht, 0)->next
;
+ iter->next =
uatomic_load(&bucket_at(ht, 0)->next, CMM_CONSUME)
;
cds_lfht_next(ht, iter);
}
cds_lfht_next(ht, iter);
}
@@
-1758,7
+1780,7
@@
void cds_lfht_add(struct cds_lfht *ht, unsigned long hash,
unsigned long size;
node->reverse_hash = bit_reverse_ulong(hash);
unsigned long size;
node->reverse_hash = bit_reverse_ulong(hash);
- size =
rcu_dereference(ht->size
);
+ size =
uatomic_load(&ht->size, CMM_ACQUIRE
);
_cds_lfht_add(ht, hash, NULL, NULL, size, node, NULL, 0);
ht_count_add(ht, size, hash);
}
_cds_lfht_add(ht, hash, NULL, NULL, size, node, NULL, 0);
ht_count_add(ht, size, hash);
}
@@
-1773,7
+1795,7
@@
struct cds_lfht_node *cds_lfht_add_unique(struct cds_lfht *ht,
struct cds_lfht_iter iter;
node->reverse_hash = bit_reverse_ulong(hash);
struct cds_lfht_iter iter;
node->reverse_hash = bit_reverse_ulong(hash);
- size =
rcu_dereference(ht->size
);
+ size =
uatomic_load(&ht->size, CMM_ACQUIRE
);
_cds_lfht_add(ht, hash, match, key, size, node, &iter, 0);
if (iter.node == node)
ht_count_add(ht, size, hash);
_cds_lfht_add(ht, hash, match, key, size, node, &iter, 0);
if (iter.node == node)
ht_count_add(ht, size, hash);
@@
-1790,7
+1812,7
@@
struct cds_lfht_node *cds_lfht_add_replace(struct cds_lfht *ht,
struct cds_lfht_iter iter;
node->reverse_hash = bit_reverse_ulong(hash);
struct cds_lfht_iter iter;
node->reverse_hash = bit_reverse_ulong(hash);
- size =
rcu_dereference(ht->size
);
+ size =
uatomic_load(&ht->size, CMM_ACQUIRE
);
for (;;) {
_cds_lfht_add(ht, hash, match, key, size, node, &iter, 0);
if (iter.node == node) {
for (;;) {
_cds_lfht_add(ht, hash, match, key, size, node, &iter, 0);
if (iter.node == node) {
@@
-1819,7
+1841,7
@@
int cds_lfht_replace(struct cds_lfht *ht,
return -EINVAL;
if (caa_unlikely(!match(old_iter->node, key)))
return -EINVAL;
return -EINVAL;
if (caa_unlikely(!match(old_iter->node, key)))
return -EINVAL;
- size =
rcu_dereference(ht->size
);
+ size =
uatomic_load(&ht->size, CMM_ACQUIRE
);
return _cds_lfht_replace(ht, size, old_iter->node, old_iter->next,
new_node);
}
return _cds_lfht_replace(ht, size, old_iter->node, old_iter->next,
new_node);
}
@@
-1829,7
+1851,7
@@
int cds_lfht_del(struct cds_lfht *ht, struct cds_lfht_node *node)
unsigned long size;
int ret;
unsigned long size;
int ret;
- size =
rcu_dereference(ht->size
);
+ size =
uatomic_load(&ht->size, CMM_ACQUIRE
);
ret = _cds_lfht_del(ht, size, node);
if (!ret) {
unsigned long hash;
ret = _cds_lfht_del(ht, size, node);
if (!ret) {
unsigned long hash;
@@
-1943,7
+1965,7
@@
int cds_lfht_destroy(struct cds_lfht *ht, pthread_attr_t **attr)
if (!cds_lfht_is_empty(ht))
return -EPERM;
/* Cancel ongoing resize operations. */
if (!cds_lfht_is_empty(ht))
return -EPERM;
/* Cancel ongoing resize operations. */
-
_CMM_STORE_SHARED(ht->in_progress_destroy, 1
);
+
uatomic_store(&ht->in_progress_destroy, 1, CMM_RELAXED
);
if (attr) {
*attr = ht->caller_resize_attr;
ht->caller_resize_attr = NULL;
if (attr) {
*attr = ht->caller_resize_attr;
ht->caller_resize_attr = NULL;
@@
-2063,19
+2085,22
@@
void _do_cds_lfht_resize(struct cds_lfht *ht)
* Resize table, re-do if the target size has changed under us.
*/
do {
* Resize table, re-do if the target size has changed under us.
*/
do {
- if (
CMM_LOAD_SHARED(ht->in_progress_destroy
))
+ if (
uatomic_load(&ht->in_progress_destroy, CMM_RELAXED
))
break;
break;
- ht->resize_initiated = 1;
+
+ uatomic_store(&ht->resize_initiated, 1, CMM_RELAXED);
+
old_size = ht->size;
old_size = ht->size;
- new_size =
CMM_LOAD_SHARED(ht->resize_target
);
+ new_size =
uatomic_load(&ht->resize_target, CMM_RELAXED
);
if (old_size < new_size)
_do_cds_lfht_grow(ht, old_size, new_size);
else if (old_size > new_size)
_do_cds_lfht_shrink(ht, old_size, new_size);
if (old_size < new_size)
_do_cds_lfht_grow(ht, old_size, new_size);
else if (old_size > new_size)
_do_cds_lfht_shrink(ht, old_size, new_size);
- ht->resize_initiated = 0;
+
+ uatomic_store(&ht->resize_initiated, 0, CMM_RELAXED);
/* write resize_initiated before read resize_target */
cmm_smp_mb();
/* write resize_initiated before read resize_target */
cmm_smp_mb();
- } while (ht->size !=
CMM_LOAD_SHARED(ht->resize_target
));
+ } while (ht->size !=
uatomic_load(&ht->resize_target, CMM_RELAXED
));
}
static
}
static
@@
-2096,7
+2121,12
@@
void resize_target_update_count(struct cds_lfht *ht,
void cds_lfht_resize(struct cds_lfht *ht, unsigned long new_size)
{
resize_target_update_count(ht, new_size);
void cds_lfht_resize(struct cds_lfht *ht, unsigned long new_size)
{
resize_target_update_count(ht, new_size);
- CMM_STORE_SHARED(ht->resize_initiated, 1);
+
+ /*
+ * Set flags has early as possible even in contention case.
+ */
+ uatomic_store(&ht->resize_initiated, 1, CMM_RELAXED);
+
mutex_lock(&ht->resize_mutex);
_do_cds_lfht_resize(ht);
mutex_unlock(&ht->resize_mutex);
mutex_lock(&ht->resize_mutex);
_do_cds_lfht_resize(ht);
mutex_unlock(&ht->resize_mutex);
@@
-2122,10
+2152,12
@@
void __cds_lfht_resize_lazy_launch(struct cds_lfht *ht)
{
struct resize_work *work;
{
struct resize_work *work;
- /* Store resize_target before read resize_initiated */
- cmm_smp_mb();
- if (!CMM_LOAD_SHARED(ht->resize_initiated)) {
- if (CMM_LOAD_SHARED(ht->in_progress_destroy)) {
+ /*
+ * Store to resize_target is before read resize_initiated as guaranteed
+ * by either cmpxchg or _uatomic_xchg_monotonic_increase.
+ */
+ if (!uatomic_load(&ht->resize_initiated, CMM_RELAXED)) {
+ if (uatomic_load(&ht->in_progress_destroy, CMM_RELAXED)) {
return;
}
work = malloc(sizeof(*work));
return;
}
work = malloc(sizeof(*work));
@@
-2136,7
+2168,7
@@
void __cds_lfht_resize_lazy_launch(struct cds_lfht *ht)
work->ht = ht;
urcu_workqueue_queue_work(cds_lfht_workqueue,
&work->work, do_resize_cb);
work->ht = ht;
urcu_workqueue_queue_work(cds_lfht_workqueue,
&work->work, do_resize_cb);
-
CMM_STORE_SHARED(ht->resize_initiated, 1
);
+
uatomic_store(&ht->resize_initiated, 1, CMM_RELAXED
);
}
}
}
}
This page took
0.02793 seconds
and
4
git commands to generate.