* Test variables.
*/
+#include <stdlib.h>
+
DEFINE_PER_THREAD(long long, n_reads_pt);
DEFINE_PER_THREAD(long long, n_updates_pt);
#define GOFLAG_RUN 1
#define GOFLAG_STOP 2
-int goflag __attribute__((__aligned__(CACHE_LINE_SIZE))) = GOFLAG_INIT;
+int goflag __attribute__((__aligned__(CAA_CACHE_LINE_SIZE))) = GOFLAG_INIT;
#define RCU_READ_RUN 1000
#define rcu_read_unlock_nest()
#endif /* #else #ifdef RCU_READ_NESTABLE */
+#ifdef TORTURE_QSBR
+#define mark_rcu_quiescent_state rcu_quiescent_state
+#define put_thread_offline rcu_thread_offline
+#define put_thread_online rcu_thread_online
+#endif
+
#ifndef mark_rcu_quiescent_state
#define mark_rcu_quiescent_state() do ; while (0)
#endif /* #ifdef mark_rcu_quiescent_state */
void *rcu_read_perf_test(void *arg)
{
+ struct call_rcu_data *crdp;
int i;
int me = (long)arg;
long long n_reads_local = 0;
rcu_register_thread();
run_on(me);
- atomic_inc(&nthreadsrunning);
+ uatomic_inc(&nthreadsrunning);
while (goflag == GOFLAG_INIT)
poll(NULL, 0, 1);
mark_rcu_quiescent_state();
}
__get_thread_var(n_reads_pt) += n_reads_local;
put_thread_offline();
+ crdp = get_thread_call_rcu_data();
+ set_thread_call_rcu_data(NULL);
+ call_rcu_data_free(crdp);
rcu_unregister_thread();
return (NULL);
{
long long n_updates_local = 0;
- atomic_inc(&nthreadsrunning);
+ if ((random() & 0xf00) == 0) {
+ struct call_rcu_data *crdp;
+
+ crdp = create_call_rcu_data(0);
+ if (crdp != NULL) {
+ fprintf(stderr,
+ "Using per-thread call_rcu() worker.\n");
+ set_thread_call_rcu_data(crdp);
+ }
+ }
+ uatomic_inc(&nthreadsrunning);
while (goflag == GOFLAG_INIT)
poll(NULL, 0, 1);
while (goflag == GOFLAG_RUN) {
{
init_per_thread(n_reads_pt, 0LL);
init_per_thread(n_updates_pt, 0LL);
- atomic_set(&nthreadsrunning, 0);
+ uatomic_set(&nthreadsrunning, 0);
}
void perftestrun(int nthreads, int nreaders, int nupdaters)
int t;
int duration = 1;
- smp_mb();
- while (atomic_read(&nthreadsrunning) < nthreads)
+ cmm_smp_mb();
+ while (uatomic_read(&nthreadsrunning) < nthreads)
poll(NULL, 0, 1);
goflag = GOFLAG_RUN;
- smp_mb();
+ cmm_smp_mb();
sleep(duration);
- smp_mb();
+ cmm_smp_mb();
goflag = GOFLAG_STOP;
- smp_mb();
+ cmm_smp_mb();
wait_all_threads();
for_each_thread(t) {
n_reads += per_thread(n_reads_pt, t);
(double)n_reads),
((duration * 1000*1000*1000.*(double)nupdaters) /
(double)n_updates));
+ if (get_cpu_call_rcu_data(0)) {
+ fprintf(stderr, "Deallocating per-CPU call_rcu threads.\n");
+ free_all_cpu_call_rcu_data();
+ }
exit(0);
}
return (NULL);
}
+static pthread_mutex_t call_rcu_test_mutex = PTHREAD_MUTEX_INITIALIZER;
+static pthread_cond_t call_rcu_test_cond = PTHREAD_COND_INITIALIZER;
+
+void rcu_update_stress_test_rcu(struct rcu_head *head)
+{
+ if (pthread_mutex_lock(&call_rcu_test_mutex) != 0) {
+ perror("pthread_mutex_lock");
+ exit(-1);
+ }
+ if (pthread_cond_signal(&call_rcu_test_cond) != 0) {
+ perror("pthread_cond_signal");
+ exit(-1);
+ }
+ if (pthread_mutex_unlock(&call_rcu_test_mutex) != 0) {
+ perror("pthread_mutex_unlock");
+ exit(-1);
+ }
+}
+
void *rcu_update_stress_test(void *arg)
{
int i;
struct rcu_stress *p;
+ struct rcu_head rh;
while (goflag == GOFLAG_INIT)
poll(NULL, 0, 1);
i = 0;
p = &rcu_stress_array[i];
p->mbtest = 0;
- smp_mb();
+ cmm_smp_mb();
p->pipe_count = 0;
p->mbtest = 1;
rcu_assign_pointer(rcu_stress_current, p);
for (i = 0; i < RCU_STRESS_PIPE_LEN; i++)
if (i != rcu_stress_idx)
rcu_stress_array[i].pipe_count++;
- synchronize_rcu();
+ if (n_updates & 0x1)
+ synchronize_rcu();
+ else {
+ if (pthread_mutex_lock(&call_rcu_test_mutex) != 0) {
+ perror("pthread_mutex_lock");
+ exit(-1);
+ }
+ call_rcu(&rh, rcu_update_stress_test_rcu);
+ if (pthread_cond_wait(&call_rcu_test_cond,
+ &call_rcu_test_mutex) != 0) {
+ perror("pthread_cond_wait");
+ exit(-1);
+ }
+ if (pthread_mutex_unlock(&call_rcu_test_mutex) != 0) {
+ perror("pthread_mutex_unlock");
+ exit(-1);
+ }
+ }
n_updates++;
}
return NULL;
void *rcu_fake_update_stress_test(void *arg)
{
+ if ((random() & 0xf00) == 0) {
+ struct call_rcu_data *crdp;
+
+ crdp = create_call_rcu_data(0);
+ if (crdp != NULL) {
+ fprintf(stderr,
+ "Using per-thread call_rcu() worker.\n");
+ set_thread_call_rcu_data(crdp);
+ }
+ }
while (goflag == GOFLAG_INIT)
poll(NULL, 0, 1);
while (goflag == GOFLAG_RUN) {
create_thread(rcu_update_stress_test, NULL);
for (i = 0; i < 5; i++)
create_thread(rcu_fake_update_stress_test, NULL);
- smp_mb();
+ cmm_smp_mb();
goflag = GOFLAG_RUN;
- smp_mb();
+ cmm_smp_mb();
sleep(10);
- smp_mb();
+ cmm_smp_mb();
goflag = GOFLAG_STOP;
- smp_mb();
+ cmm_smp_mb();
wait_all_threads();
for_each_thread(t)
n_reads += per_thread(n_reads_pt, t);
printf(" %lld", sum);
}
printf("\n");
+ if (get_cpu_call_rcu_data(0)) {
+ fprintf(stderr, "Deallocating per-CPU call_rcu threads.\n");
+ free_all_cpu_call_rcu_data();
+ }
exit(0);
}
smp_init();
//rcu_init();
+ srandom(time(NULL));
+ if (random() & 0x100) {
+ fprintf(stderr, "Allocating per-CPU call_rcu threads.\n");
+ if (create_all_cpu_call_rcu_data(0))
+ perror("create_all_cpu_call_rcu_data");
+ }
#ifdef DEBUG_YIELD
yield_active |= YIELD_READ;