#define _LGPL_SOURCE
#include <stdlib.h>
#include <errno.h>
-#include <assert.h>
#include <stdio.h>
#include <stdint.h>
#include <string.h>
#include <unistd.h>
#include "compat-getcpu.h"
+#include <urcu/assert.h>
#include <urcu/pointer.h>
#include <urcu/call-rcu.h>
#include <urcu/flavor.h>
#include <urcu/uatomic.h>
#include <urcu/compiler.h>
#include <urcu/rculfhash.h>
+#include <urcu/static/urcu-signal-nr.h>
#include <rculfhash-internal.h>
#include <stdio.h>
#include <pthread.h>
#include <signal.h>
#include "workqueue.h"
#include "urcu-die.h"
+#include "urcu-utils.h"
/*
* Split-counters lazily update the global counter each 1024
iter->lfht = ht;
}
-#define cds_lfht_iter_debug_assert(...) assert(__VA_ARGS__)
+#define cds_lfht_iter_debug_assert(...) urcu_posix_assert(__VA_ARGS__)
#else
static
-void cds_lfht_iter_debug_set_ht(struct cds_lfht *ht, struct cds_lfht_iter *iter)
+void cds_lfht_iter_debug_set_ht(struct cds_lfht *ht __attribute__((unused)),
+ struct cds_lfht_iter *iter __attribute__((unused)))
{
}
* Returns 0 if no bit is set, else returns the position of the most
* significant bit (from 1 to 32 on 32-bit, from 1 to 64 on 64-bit).
*/
-#if defined(__i386) || defined(__x86_64)
+#if defined(URCU_ARCH_X86)
static inline
unsigned int fls_u32(uint32_t x)
{
#define HAS_FLS_U32
#endif
-#if defined(__x86_64)
+#if defined(URCU_ARCH_AMD64)
static inline
unsigned int fls_u64(uint64_t x)
{
* Return the minimum order for which x <= (1UL << order).
* Return -1 if x is 0.
*/
+static
int cds_lfht_get_count_order_u32(uint32_t x)
{
if (!x)
cds_lfht_get_count_order_ulong(split_count_mask + 1);
}
- assert(split_count_mask >= 0);
+ urcu_posix_assert(split_count_mask >= 0);
if (ht->flags & CDS_LFHT_ACCOUNTING) {
ht->split_count = calloc(split_count_mask + 1,
sizeof(struct ht_items_count));
- assert(ht->split_count);
+ urcu_posix_assert(ht->split_count);
} else {
ht->split_count = NULL;
}
{
int cpu;
- assert(split_count_mask >= 0);
+ urcu_posix_assert(split_count_mask >= 0);
cpu = urcu_sched_getcpu();
if (caa_unlikely(cpu < 0))
return hash & split_count_mask;
static
void ht_count_add(struct cds_lfht *ht, unsigned long size, unsigned long hash)
{
- unsigned long split_count;
+ unsigned long split_count, count;
int index;
- long count;
if (caa_unlikely(!ht->split_count))
return;
if ((count >> CHAIN_LEN_RESIZE_THRESHOLD) < size)
return;
- dbg_printf("add set global %ld\n", count);
+ dbg_printf("add set global %lu\n", count);
cds_lfht_resize_lazy_count(ht, size,
count >> (CHAIN_LEN_TARGET - 1));
}
static
void ht_count_del(struct cds_lfht *ht, unsigned long size, unsigned long hash)
{
- unsigned long split_count;
+ unsigned long split_count, count;
int index;
- long count;
if (caa_unlikely(!ht->split_count))
return;
}
static
-int is_removed(struct cds_lfht_node *node)
+int is_removed(const struct cds_lfht_node *node)
{
return ((unsigned long) node) & REMOVED_FLAG;
}
return ((unsigned long) node) & REMOVAL_OWNER_FLAG;
}
+static
+struct cds_lfht_node *flag_removed(struct cds_lfht_node *node)
+{
+ return (struct cds_lfht_node *) (((unsigned long) node) | REMOVED_FLAG);
+}
+
static
struct cds_lfht_node *flag_removal_owner(struct cds_lfht_node *node)
{
struct cds_lfht_node *lookup_bucket(struct cds_lfht *ht, unsigned long size,
unsigned long hash)
{
- assert(size > 0);
+ urcu_posix_assert(size > 0);
return bucket_at(ht, hash & (size - 1));
}
{
struct cds_lfht_node *iter_prev, *iter, *next, *new_next;
- assert(!is_bucket(bucket));
- assert(!is_removed(bucket));
- assert(!is_removal_owner(bucket));
- assert(!is_bucket(node));
- assert(!is_removed(node));
- assert(!is_removal_owner(node));
+ urcu_posix_assert(!is_bucket(bucket));
+ urcu_posix_assert(!is_removed(bucket));
+ urcu_posix_assert(!is_removal_owner(bucket));
+ urcu_posix_assert(!is_bucket(node));
+ urcu_posix_assert(!is_removed(node));
+ urcu_posix_assert(!is_removal_owner(node));
for (;;) {
iter_prev = bucket;
/* We can always skip the bucket node initially */
iter = rcu_dereference(iter_prev->next);
- assert(!is_removed(iter));
- assert(!is_removal_owner(iter));
- assert(iter_prev->reverse_hash <= node->reverse_hash);
+ urcu_posix_assert(!is_removed(iter));
+ urcu_posix_assert(!is_removal_owner(iter));
+ urcu_posix_assert(iter_prev->reverse_hash <= node->reverse_hash);
/*
* We should never be called with bucket (start of chain)
* and logically removed node (end of path compression
* marker) being the actual same node. This would be a
* bug in the algorithm implementation.
*/
- assert(bucket != node);
+ urcu_posix_assert(bucket != node);
for (;;) {
if (caa_unlikely(is_end(iter)))
return;
iter_prev = clear_flag(iter);
iter = next;
}
- assert(!is_removed(iter));
- assert(!is_removal_owner(iter));
+ urcu_posix_assert(!is_removed(iter));
+ urcu_posix_assert(!is_removal_owner(iter));
if (is_bucket(iter))
new_next = flag_bucket(clear_flag(next));
else
if (!old_node) /* Return -ENOENT if asked to replace NULL node */
return -ENOENT;
- assert(!is_removed(old_node));
- assert(!is_removal_owner(old_node));
- assert(!is_bucket(old_node));
- assert(!is_removed(new_node));
- assert(!is_removal_owner(new_node));
- assert(!is_bucket(new_node));
- assert(new_node != old_node);
+ urcu_posix_assert(!is_removed(old_node));
+ urcu_posix_assert(!is_removal_owner(old_node));
+ urcu_posix_assert(!is_bucket(old_node));
+ urcu_posix_assert(!is_removed(new_node));
+ urcu_posix_assert(!is_removal_owner(new_node));
+ urcu_posix_assert(!is_bucket(new_node));
+ urcu_posix_assert(new_node != old_node);
for (;;) {
/* Insert after node to be replaced */
if (is_removed(old_next)) {
*/
return -ENOENT;
}
- assert(old_next == clear_flag(old_next));
- assert(new_node != old_next);
+ urcu_posix_assert(old_next == clear_flag(old_next));
+ urcu_posix_assert(new_node != old_next);
/*
* REMOVAL_OWNER flag is _NEVER_ set before the REMOVED
* flag. It is either set atomically at the same time
* (replace) or after (del).
*/
- assert(!is_removal_owner(old_next));
+ urcu_posix_assert(!is_removal_owner(old_next));
new_node->next = old_next;
/*
* Here is the whole trick for lock-free replace: we add
bucket = lookup_bucket(ht, size, bit_reverse_ulong(old_node->reverse_hash));
_cds_lfht_gc_bucket(bucket, new_node);
- assert(is_removed(CMM_LOAD_SHARED(old_node->next)));
+ urcu_posix_assert(is_removed(CMM_LOAD_SHARED(old_node->next)));
return 0;
}
*return_node;
struct cds_lfht_node *bucket;
- assert(!is_bucket(node));
- assert(!is_removed(node));
- assert(!is_removal_owner(node));
+ urcu_posix_assert(!is_bucket(node));
+ urcu_posix_assert(!is_removed(node));
+ urcu_posix_assert(!is_removal_owner(node));
bucket = lookup_bucket(ht, size, hash);
for (;;) {
uint32_t chain_len = 0;
iter_prev = bucket;
/* We can always skip the bucket node initially */
iter = rcu_dereference(iter_prev->next);
- assert(iter_prev->reverse_hash <= node->reverse_hash);
+ urcu_posix_assert(iter_prev->reverse_hash <= node->reverse_hash);
for (;;) {
if (caa_unlikely(is_end(iter)))
goto insert;
}
insert:
- assert(node != clear_flag(iter));
- assert(!is_removed(iter_prev));
- assert(!is_removal_owner(iter_prev));
- assert(!is_removed(iter));
- assert(!is_removal_owner(iter));
- assert(iter_prev != node);
+ urcu_posix_assert(node != clear_flag(iter));
+ urcu_posix_assert(!is_removed(iter_prev));
+ urcu_posix_assert(!is_removal_owner(iter_prev));
+ urcu_posix_assert(!is_removed(iter));
+ urcu_posix_assert(!is_removal_owner(iter));
+ urcu_posix_assert(iter_prev != node);
if (!bucket_flag)
node->next = clear_flag(iter);
else
}
gc_node:
- assert(!is_removed(iter));
- assert(!is_removal_owner(iter));
+ urcu_posix_assert(!is_removed(iter));
+ urcu_posix_assert(!is_removal_owner(iter));
if (is_bucket(iter))
new_next = flag_bucket(clear_flag(next));
else
return -ENOENT;
/* logically delete the node */
- assert(!is_bucket(node));
- assert(!is_removed(node));
- assert(!is_removal_owner(node));
+ urcu_posix_assert(!is_bucket(node));
+ urcu_posix_assert(!is_removed(node));
+ urcu_posix_assert(!is_removal_owner(node));
/*
* We are first checking if the node had previously been
next = CMM_LOAD_SHARED(node->next); /* next is not dereferenced */
if (caa_unlikely(is_removed(next)))
return -ENOENT;
- assert(!is_bucket(next));
+ urcu_posix_assert(!is_bucket(next));
/*
* The del operation semantic guarantees a full memory barrier
* before the uatomic_or atomic commit of the deletion flag.
bucket = lookup_bucket(ht, size, bit_reverse_ulong(node->reverse_hash));
_cds_lfht_gc_bucket(bucket, node);
- assert(is_removed(CMM_LOAD_SHARED(node->next)));
+ urcu_posix_assert(is_removed(CMM_LOAD_SHARED(node->next)));
/*
* Last phase: atomically exchange node->next with a version
* having "REMOVAL_OWNER_FLAG" set. If the returned node->next
{
unsigned long partition_len, start = 0;
struct partition_resize_work *work;
- int thread, ret;
- unsigned long nr_threads;
+ int ret;
+ unsigned long thread, nr_threads;
- assert(nr_cpus_mask != -1);
+ urcu_posix_assert(nr_cpus_mask != -1);
if (nr_cpus_mask < 0 || len < 2 * MIN_PARTITION_PER_THREAD)
goto fallback;
* partition size, up to the number of CPUs in the system.
*/
if (nr_cpus_mask > 0) {
- nr_threads = min(nr_cpus_mask + 1,
+ nr_threads = min_t(unsigned long, nr_cpus_mask + 1,
len >> MIN_PARTITION_PER_THREAD_ORDER);
} else {
nr_threads = 1;
nr_threads = thread;
break;
}
- assert(!ret);
+ urcu_posix_assert(!ret);
}
for (thread = 0; thread < nr_threads; thread++) {
ret = pthread_join(work[thread].thread_id, NULL);
- assert(!ret);
+ urcu_posix_assert(!ret);
}
free(work);
{
unsigned long j, size = 1UL << (i - 1);
- assert(i > MIN_TABLE_ORDER);
+ urcu_posix_assert(i > MIN_TABLE_ORDER);
ht->flavor->read_lock();
for (j = size + start; j < size + start + len; j++) {
struct cds_lfht_node *new_node = bucket_at(ht, j);
- assert(j >= size && j < (size << 1));
+ urcu_posix_assert(j >= size && j < (size << 1));
dbg_printf("init populate: order %lu index %lu hash %lu\n",
i, j, j);
new_node->reverse_hash = bit_reverse_ulong(j);
dbg_printf("init table: first_order %lu last_order %lu\n",
first_order, last_order);
- assert(first_order > MIN_TABLE_ORDER);
+ urcu_posix_assert(first_order > MIN_TABLE_ORDER);
for (i = first_order; i <= last_order; i++) {
unsigned long len;
{
unsigned long j, size = 1UL << (i - 1);
- assert(i > MIN_TABLE_ORDER);
+ urcu_posix_assert(i > MIN_TABLE_ORDER);
ht->flavor->read_lock();
for (j = size + start; j < size + start + len; j++) {
struct cds_lfht_node *fini_bucket = bucket_at(ht, j);
struct cds_lfht_node *parent_bucket = bucket_at(ht, j - size);
- assert(j >= size && j < (size << 1));
+ urcu_posix_assert(j >= size && j < (size << 1));
dbg_printf("remove entry: order %lu index %lu hash %lu\n",
i, j, j);
/* Set the REMOVED_FLAG to freeze the ->next for gc */
void fini_table(struct cds_lfht *ht,
unsigned long first_order, unsigned long last_order)
{
- long i;
- unsigned long free_by_rcu_order = 0;
+ unsigned long free_by_rcu_order = 0, i;
dbg_printf("fini table: first_order %lu last_order %lu\n",
first_order, last_order);
- assert(first_order > MIN_TABLE_ORDER);
+ urcu_posix_assert(first_order > MIN_TABLE_ORDER);
for (i = last_order; i >= first_order; i--) {
unsigned long len;
}
}
+/*
+ * Never called with size < 1.
+ */
static
void cds_lfht_create_bucket(struct cds_lfht *ht, unsigned long size)
{
struct cds_lfht_node *prev, *node;
unsigned long order, len, i;
+ int bucket_order;
cds_lfht_alloc_bucket_table(ht, 0);
node->next = flag_bucket(get_end());
node->reverse_hash = 0;
- for (order = 1; order < cds_lfht_get_count_order_ulong(size) + 1; order++) {
+ bucket_order = cds_lfht_get_count_order_ulong(size);
+ urcu_posix_assert(bucket_order >= 0);
+
+ for (order = 1; order < (unsigned long) bucket_order + 1; order++) {
len = 1UL << (order - 1);
cds_lfht_alloc_bucket_table(ht, order);
node->reverse_hash = bit_reverse_ulong(len + i);
/* insert after prev */
- assert(is_bucket(prev->next));
+ urcu_posix_assert(is_bucket(prev->next));
node->next = prev->next;
prev->next = flag_bucket(node);
}
}
}
+#if (CAA_BITS_PER_LONG > 32)
+/*
+ * For 64-bit architectures, with max number of buckets small enough not to
+ * use the entire 64-bit memory mapping space (and allowing a fair number of
+ * hash table instances), use the mmap allocator, which is faster. Otherwise,
+ * fallback to the order allocator.
+ */
+static
+const struct cds_lfht_mm_type *get_mm_type(unsigned long max_nr_buckets)
+{
+ if (max_nr_buckets && max_nr_buckets <= (1ULL << 32))
+ return &cds_lfht_mm_mmap;
+ else
+ return &cds_lfht_mm_order;
+}
+#else
+/*
+ * For 32-bit architectures, use the order allocator.
+ */
+static
+const struct cds_lfht_mm_type *get_mm_type(
+ unsigned long max_nr_buckets __attribute__((unused)))
+{
+ return &cds_lfht_mm_order;
+}
+#endif
+
+void cds_lfht_node_init_deleted(struct cds_lfht_node *node)
+{
+ cds_lfht_node_init(node);
+ node->next = flag_removed(NULL);
+}
+
struct cds_lfht *_cds_lfht_new(unsigned long init_size,
unsigned long min_nr_alloc_buckets,
unsigned long max_nr_buckets,
/*
* Memory management plugin default.
*/
- if (!mm) {
- if (CAA_BITS_PER_LONG > 32
- && max_nr_buckets
- && max_nr_buckets <= (1ULL << 32)) {
- /*
- * For 64-bit architectures, with max number of
- * buckets small enough not to use the entire
- * 64-bit memory mapping space (and allowing a
- * fair number of hash table instances), use the
- * mmap allocator, which is faster than the
- * order allocator.
- */
- mm = &cds_lfht_mm_mmap;
- } else {
- /*
- * The fallback is to use the order allocator.
- */
- mm = &cds_lfht_mm_order;
- }
- }
+ if (!mm)
+ mm = get_mm_type(max_nr_buckets);
/* max_nr_buckets == 0 for order based mm means infinite */
if (mm == &cds_lfht_mm_order && !max_nr_buckets)
init_size = min(init_size, max_nr_buckets);
ht = mm->alloc_cds_lfht(min_nr_alloc_buckets, max_nr_buckets);
- assert(ht);
- assert(ht->mm == mm);
- assert(ht->bucket_at == mm->bucket_at);
+ urcu_posix_assert(ht);
+ urcu_posix_assert(ht->mm == mm);
+ urcu_posix_assert(ht->bucket_at == mm->bucket_at);
ht->flags = flags;
ht->flavor = flavor;
break;
}
next = rcu_dereference(node->next);
- assert(node == clear_flag(node));
+ urcu_posix_assert(node == clear_flag(node));
if (caa_likely(!is_removed(next))
&& !is_bucket(next)
&& node->reverse_hash == reverse_hash
}
node = clear_flag(next);
}
- assert(!node || !is_bucket(CMM_LOAD_SHARED(node->next)));
+ urcu_posix_assert(!node || !is_bucket(CMM_LOAD_SHARED(node->next)));
iter->node = node;
iter->next = next;
}
-void cds_lfht_next_duplicate(struct cds_lfht *ht, cds_lfht_match_fct match,
+void cds_lfht_next_duplicate(struct cds_lfht *ht __attribute__((unused)),
+ cds_lfht_match_fct match,
const void *key, struct cds_lfht_iter *iter)
{
struct cds_lfht_node *node, *next;
}
node = clear_flag(next);
}
- assert(!node || !is_bucket(CMM_LOAD_SHARED(node->next)));
+ urcu_posix_assert(!node || !is_bucket(CMM_LOAD_SHARED(node->next)));
iter->node = node;
iter->next = next;
}
-void cds_lfht_next(struct cds_lfht *ht, struct cds_lfht_iter *iter)
+void cds_lfht_next(struct cds_lfht *ht __attribute__((unused)),
+ struct cds_lfht_iter *iter)
{
struct cds_lfht_node *node, *next;
}
node = clear_flag(next);
}
- assert(!node || !is_bucket(CMM_LOAD_SHARED(node->next)));
+ urcu_posix_assert(!node || !is_bucket(CMM_LOAD_SHARED(node->next)));
iter->node = node;
iter->next = next;
}
return ret;
}
-int cds_lfht_is_node_deleted(struct cds_lfht_node *node)
+int cds_lfht_is_node_deleted(const struct cds_lfht_node *node)
{
return is_removed(CMM_LOAD_SHARED(node->next));
}
node = clear_flag(node)->next;
if (!is_bucket(node))
return -EPERM;
- assert(!is_removed(node));
- assert(!is_removal_owner(node));
+ urcu_posix_assert(!is_removed(node));
+ urcu_posix_assert(!is_removal_owner(node));
} while (!is_end(node));
/*
* size accessed without rcu_dereference because hash table is
node = bucket_at(ht, i);
dbg_printf("delete bucket: index %lu expected hash %lu hash %lu\n",
i, i, bit_reverse_ulong(node->reverse_hash));
- assert(is_bucket(node->next));
+ urcu_posix_assert(is_bucket(node->next));
}
for (order = cds_lfht_get_count_order_ulong(size); (long)order >= 0; order--)
new_order = cds_lfht_get_count_order_ulong(new_size);
dbg_printf("resize from %lu (order %lu) to %lu (order %lu) buckets\n",
old_size, old_order, new_size, new_order);
- assert(new_size > old_size);
+ urcu_posix_assert(new_size > old_size);
init_table(ht, old_order + 1, new_order);
}
new_order = cds_lfht_get_count_order_ulong(new_size);
dbg_printf("resize from %lu (order %lu) to %lu (order %lu) buckets\n",
old_size, old_order, new_size, new_order);
- assert(new_size < old_size);
+ urcu_posix_assert(new_size < old_size);
/* Remove and unlink all bucket nodes to remove. */
fini_table(ht, new_order + 1, old_order);
__cds_lfht_resize_lazy_launch(ht);
}
-static void cds_lfht_before_fork(void *priv)
+static void cds_lfht_before_fork(void *priv __attribute__((unused)))
{
if (cds_lfht_workqueue_atfork_nesting++)
return;
urcu_workqueue_pause_worker(cds_lfht_workqueue);
}
-static void cds_lfht_after_fork_parent(void *priv)
+static void cds_lfht_after_fork_parent(void *priv __attribute__((unused)))
{
if (--cds_lfht_workqueue_atfork_nesting)
return;
mutex_unlock(&cds_lfht_fork_mutex);
}
-static void cds_lfht_after_fork_child(void *priv)
+static void cds_lfht_after_fork_child(void *priv __attribute__((unused)))
{
if (--cds_lfht_workqueue_atfork_nesting)
return;
.after_fork_child = cds_lfht_after_fork_child,
};
-/* Block all signals to ensure we don't disturb the application. */
-static void cds_lfht_worker_init(struct urcu_workqueue *workqueue,
- void *priv)
+/*
+ * Block all signals for the workqueue worker thread to ensure we don't
+ * disturb the application. The SIGRCU signal needs to be unblocked for
+ * the urcu-signal flavor.
+ */
+static void cds_lfht_worker_init(
+ struct urcu_workqueue *workqueue __attribute__((unused)),
+ void *priv __attribute__((unused)))
{
int ret;
sigset_t mask;
- /* Block signal for entire process, so only our thread processes it. */
ret = sigfillset(&mask);
if (ret)
urcu_die(errno);
- ret = pthread_sigmask(SIG_BLOCK, &mask, NULL);
+ ret = sigdelset(&mask, SIGRCU);
+ if (ret)
+ urcu_die(errno);
+ ret = pthread_sigmask(SIG_SETMASK, &mask, NULL);
if (ret)
urcu_die(ret);
}