#include <urcu/uatomic.h>
#include <urcu/compiler.h>
#include <urcu/rculfhash.h>
+#include <urcu/static/urcu-signal-nr.h>
#include <rculfhash-internal.h>
#include <stdio.h>
#include <pthread.h>
#include <signal.h>
#include "workqueue.h"
#include "urcu-die.h"
+#include "urcu-utils.h"
/*
* Split-counters lazily update the global counter each 1024
static
void ht_count_add(struct cds_lfht *ht, unsigned long size, unsigned long hash)
{
- unsigned long split_count;
+ unsigned long split_count, count;
int index;
- long count;
if (caa_unlikely(!ht->split_count))
return;
if ((count >> CHAIN_LEN_RESIZE_THRESHOLD) < size)
return;
- dbg_printf("add set global %ld\n", count);
+ dbg_printf("add set global %lu\n", count);
cds_lfht_resize_lazy_count(ht, size,
count >> (CHAIN_LEN_TARGET - 1));
}
static
void ht_count_del(struct cds_lfht *ht, unsigned long size, unsigned long hash)
{
- unsigned long split_count;
+ unsigned long split_count, count;
int index;
- long count;
if (caa_unlikely(!ht->split_count))
return;
}
static
-int is_removed(struct cds_lfht_node *node)
+int is_removed(const struct cds_lfht_node *node)
{
return ((unsigned long) node) & REMOVED_FLAG;
}
{
unsigned long partition_len, start = 0;
struct partition_resize_work *work;
- int thread, ret;
- unsigned long nr_threads;
+ int ret;
+ unsigned long thread, nr_threads;
assert(nr_cpus_mask != -1);
if (nr_cpus_mask < 0 || len < 2 * MIN_PARTITION_PER_THREAD)
* partition size, up to the number of CPUs in the system.
*/
if (nr_cpus_mask > 0) {
- nr_threads = min(nr_cpus_mask + 1,
+ nr_threads = min_t(unsigned long, nr_cpus_mask + 1,
len >> MIN_PARTITION_PER_THREAD_ORDER);
} else {
nr_threads = 1;
void fini_table(struct cds_lfht *ht,
unsigned long first_order, unsigned long last_order)
{
- long i;
- unsigned long free_by_rcu_order = 0;
+ unsigned long free_by_rcu_order = 0, i;
dbg_printf("fini table: first_order %lu last_order %lu\n",
first_order, last_order);
}
}
+/*
+ * Never called with size < 1.
+ */
static
void cds_lfht_create_bucket(struct cds_lfht *ht, unsigned long size)
{
struct cds_lfht_node *prev, *node;
unsigned long order, len, i;
+ int bucket_order;
cds_lfht_alloc_bucket_table(ht, 0);
node->next = flag_bucket(get_end());
node->reverse_hash = 0;
- for (order = 1; order < cds_lfht_get_count_order_ulong(size) + 1; order++) {
+ bucket_order = cds_lfht_get_count_order_ulong(size);
+ assert(bucket_order >= 0);
+
+ for (order = 1; order < (unsigned long) bucket_order + 1; order++) {
len = 1UL << (order - 1);
cds_lfht_alloc_bucket_table(ht, order);
return ret;
}
-int cds_lfht_is_node_deleted(struct cds_lfht_node *node)
+int cds_lfht_is_node_deleted(const struct cds_lfht_node *node)
{
return is_removed(CMM_LOAD_SHARED(node->next));
}
.after_fork_child = cds_lfht_after_fork_child,
};
-/* Block all signals to ensure we don't disturb the application. */
+/*
+ * Block all signals for the workqueue worker thread to ensure we don't
+ * disturb the application. The SIGRCU signal needs to be unblocked for
+ * the urcu-signal flavor.
+ */
static void cds_lfht_worker_init(struct urcu_workqueue *workqueue,
void *priv)
{
int ret;
sigset_t mask;
- /* Block signal for entire process, so only our thread processes it. */
ret = sigfillset(&mask);
if (ret)
urcu_die(errno);
- ret = pthread_sigmask(SIG_BLOCK, &mask, NULL);
+ ret = sigdelset(&mask, SIGRCU);
+ if (ret)
+ urcu_die(errno);
+ ret = pthread_sigmask(SIG_SETMASK, &mask, NULL);
if (ret)
urcu_die(ret);
}