*/
#define _GNU_SOURCE
+#include "../config.h"
#include <stdio.h>
#include <pthread.h>
#include <stdlib.h>
#include <assert.h>
#include <sched.h>
#include <errno.h>
+#include <signal.h>
#ifdef __linux__
#include <syscall.h>
#define DEFAULT_MIN_ALLOC_SIZE 1
#define DEFAULT_RAND_POOL 1000000
+/*
+ * Note: the hash seed should be a random value for hash tables
+ * targeting production environments to provide protection against
+ * denial of service attacks. We keep it a static value within this test
+ * program to compare identical benchmark runs.
+ */
#define TEST_HASH_SEED 0x42UL
/* Make this big enough to include the POWER5+ L3 cacheline size of 256B */
pthread_mutex_t affinity_mutex = PTHREAD_MUTEX_INITIALIZER;
+#ifndef HAVE_CPU_SET_T
+typedef unsigned long cpu_set_t;
+# define CPU_ZERO(cpuset) do { *(cpuset) = 0; } while(0)
+# define CPU_SET(cpu, cpuset) do { *(cpuset) |= (1UL << (cpu)); } while(0)
+#endif
+
static void set_affinity(void)
{
cpu_set_t mask;
if (!use_affinity)
return;
+#if HAVE_SCHED_SETAFFINITY
ret = pthread_mutex_lock(&affinity_mutex);
if (ret) {
perror("Error in pthread mutex lock");
}
CPU_ZERO(&mask);
CPU_SET(cpu, &mask);
- sched_setaffinity(0, sizeof(mask), &mask);
+#if SCHED_SETAFFINITY_ARGS == 2
+ sched_setaffinity(0, &mask);
+#else
+ sched_setaffinity(0, sizeof(mask), &mask);
+#endif
+#endif /* HAVE_SCHED_SETAFFINITY */
}
static enum {
rcu_register_thread();
for (;;) {
- unsigned long count, removed;
+ unsigned long count;
long approx_before, approx_after;
ssize_t len;
char buf[1];
printf("Counting nodes... ");
fflush(stdout);
rcu_read_lock();
- cds_lfht_count_nodes(test_ht, &approx_before, &count, &removed,
+ cds_lfht_count_nodes(test_ht, &approx_before, &count,
&approx_after);
rcu_read_unlock();
printf("done.\n");
printf("Approximation before node accounting: %ld nodes.\n",
approx_before);
printf("Accounting of nodes in the hash table: "
- "%lu nodes + %lu logically removed.\n",
- count, removed);
+ "%lu nodes.\n",
+ count);
printf("Approximation after node accounting: %ld nodes.\n",
approx_after);
}
cds_lfht_test_lookup(test_ht,
(void *)(((unsigned long) rand_r(&rand_lookup) % write_pool_size) + write_pool_offset),
sizeof(void *), &iter);
- ret = cds_lfht_del(test_ht, &iter);
+ ret = cds_lfht_del(test_ht, cds_lfht_iter_get_node(&iter));
rcu_read_unlock();
if (ret == 0) {
node = cds_lfht_iter_get_test_node(&iter);
cds_lfht_for_each_entry(ht, &iter, node, node) {
int ret;
- ret = cds_lfht_del(test_ht, &iter);
+ ret = cds_lfht_del(test_ht, cds_lfht_iter_get_node(&iter));
assert(!ret);
call_rcu(&node->head, free_node_cb);
count++;
struct wr_count *count_writer;
unsigned long long tot_reads = 0, tot_writes = 0,
tot_add = 0, tot_add_exist = 0, tot_remove = 0;
- unsigned long count, removed;
+ unsigned long count;
long approx_before, approx_after;
int i, a, ret;
struct sigaction act;
count_writer = malloc(sizeof(*count_writer) * nr_writers);
err = create_all_cpu_call_rcu_data(0);
- assert(!err);
+ if (err) {
+ printf("Per-CPU call_rcu() worker threads unavailable. Using default global worker thread.\n");
+ }
if (memory_backend) {
test_ht = _cds_lfht_new(init_hash_size, min_hash_alloc_size,
rcu_thread_online();
rcu_read_lock();
printf("Counting nodes... ");
- cds_lfht_count_nodes(test_ht, &approx_before, &count, &removed,
- &approx_after);
+ cds_lfht_count_nodes(test_ht, &approx_before, &count, &approx_after);
printf("done.\n");
test_delete_all_nodes(test_ht);
rcu_read_unlock();
rcu_thread_offline();
- if (count || removed) {
+ if (count) {
printf("Approximation before node accounting: %ld nodes.\n",
approx_before);
printf("Nodes deleted from hash table before destroy: "
- "%lu nodes + %lu logically removed.\n",
- count, removed);
+ "%lu nodes.\n",
+ count);
printf("Approximation after node accounting: %ld nodes.\n",
approx_after);
}