X-Git-Url: http://git.liburcu.org/?p=urcu.git;a=blobdiff_plain;f=tests%2Frcutorture.h;h=b42b8ab7c828a52fd06ed9ddc187e498b3589d30;hp=7d4948e59626b486524af30fa4b0dcc5f9bfc02b;hb=3c24913f970c2dc1dab0a00e3ee9683250667f82;hpb=6ee91d83a7a5b33160158f57f94dd75e6d0a67ed diff --git a/tests/rcutorture.h b/tests/rcutorture.h index 7d4948e..b42b8ab 100644 --- a/tests/rcutorture.h +++ b/tests/rcutorture.h @@ -65,6 +65,9 @@ * Test variables. */ +#include +#include "../urcu-call-rcu.h" + DEFINE_PER_THREAD(long long, n_reads_pt); DEFINE_PER_THREAD(long long, n_updates_pt); @@ -77,7 +80,7 @@ char argsbuf[64]; #define GOFLAG_RUN 1 #define GOFLAG_STOP 2 -int goflag __attribute__((__aligned__(CACHE_LINE_SIZE))) = GOFLAG_INIT; +int goflag __attribute__((__aligned__(CAA_CACHE_LINE_SIZE))) = GOFLAG_INIT; #define RCU_READ_RUN 1000 @@ -92,6 +95,12 @@ int goflag __attribute__((__aligned__(CACHE_LINE_SIZE))) = GOFLAG_INIT; #define rcu_read_unlock_nest() #endif /* #else #ifdef RCU_READ_NESTABLE */ +#ifdef TORTURE_QSBR +#define mark_rcu_quiescent_state rcu_quiescent_state +#define put_thread_offline rcu_thread_offline +#define put_thread_online rcu_thread_online +#endif + #ifndef mark_rcu_quiescent_state #define mark_rcu_quiescent_state() do ; while (0) #endif /* #ifdef mark_rcu_quiescent_state */ @@ -116,7 +125,7 @@ void *rcu_read_perf_test(void *arg) rcu_register_thread(); run_on(me); - atomic_inc(&nthreadsrunning); + uatomic_inc(&nthreadsrunning); while (goflag == GOFLAG_INIT) poll(NULL, 0, 1); mark_rcu_quiescent_state(); @@ -141,7 +150,17 @@ void *rcu_update_perf_test(void *arg) { long long n_updates_local = 0; - atomic_inc(&nthreadsrunning); + if ((random() & 0xf00) == 0) { + struct call_rcu_data *crdp; + + crdp = create_call_rcu_data(0); + if (crdp != NULL) { + fprintf(stderr, + "Using per-thread call_rcu() worker.\n"); + set_thread_call_rcu_data(crdp); + } + } + uatomic_inc(&nthreadsrunning); while (goflag == GOFLAG_INIT) poll(NULL, 0, 1); while (goflag == GOFLAG_RUN) { @@ -156,7 +175,7 @@ void perftestinit(void) { init_per_thread(n_reads_pt, 0LL); init_per_thread(n_updates_pt, 0LL); - atomic_set(&nthreadsrunning, 0); + uatomic_set(&nthreadsrunning, 0); } void perftestrun(int nthreads, int nreaders, int nupdaters) @@ -164,15 +183,15 @@ void perftestrun(int nthreads, int nreaders, int nupdaters) int t; int duration = 1; - smp_mb(); - while (atomic_read(&nthreadsrunning) < nthreads) + cmm_smp_mb(); + while (uatomic_read(&nthreadsrunning) < nthreads) poll(NULL, 0, 1); goflag = GOFLAG_RUN; - smp_mb(); + cmm_smp_mb(); sleep(duration); - smp_mb(); + cmm_smp_mb(); goflag = GOFLAG_STOP; - smp_mb(); + cmm_smp_mb(); wait_all_threads(); for_each_thread(t) { n_reads += per_thread(n_reads_pt, t); @@ -290,10 +309,30 @@ void *rcu_read_stress_test(void *arg) return (NULL); } +static pthread_mutex_t call_rcu_test_mutex = PTHREAD_MUTEX_INITIALIZER; +static pthread_cond_t call_rcu_test_cond = PTHREAD_COND_INITIALIZER; + +void rcu_update_stress_test_rcu(struct rcu_head *head) +{ + if (pthread_mutex_lock(&call_rcu_test_mutex) != 0) { + perror("pthread_mutex_lock"); + exit(-1); + } + if (pthread_cond_signal(&call_rcu_test_cond) != 0) { + perror("pthread_cond_signal"); + exit(-1); + } + if (pthread_mutex_unlock(&call_rcu_test_mutex) != 0) { + perror("pthread_mutex_unlock"); + exit(-1); + } +} + void *rcu_update_stress_test(void *arg) { int i; struct rcu_stress *p; + struct rcu_head rh; while (goflag == GOFLAG_INIT) poll(NULL, 0, 1); @@ -303,7 +342,7 @@ void *rcu_update_stress_test(void *arg) i = 0; p = &rcu_stress_array[i]; p->mbtest = 0; - smp_mb(); + cmm_smp_mb(); p->pipe_count = 0; p->mbtest = 1; rcu_assign_pointer(rcu_stress_current, p); @@ -311,7 +350,24 @@ void *rcu_update_stress_test(void *arg) for (i = 0; i < RCU_STRESS_PIPE_LEN; i++) if (i != rcu_stress_idx) rcu_stress_array[i].pipe_count++; - synchronize_rcu(); + if (n_updates & 0x1) + synchronize_rcu(); + else { + if (pthread_mutex_lock(&call_rcu_test_mutex) != 0) { + perror("pthread_mutex_lock"); + exit(-1); + } + call_rcu(&rh, rcu_update_stress_test_rcu); + if (pthread_cond_wait(&call_rcu_test_cond, + &call_rcu_test_mutex) != 0) { + perror("pthread_cond_wait"); + exit(-1); + } + if (pthread_mutex_unlock(&call_rcu_test_mutex) != 0) { + perror("pthread_mutex_unlock"); + exit(-1); + } + } n_updates++; } return NULL; @@ -319,6 +375,16 @@ void *rcu_update_stress_test(void *arg) void *rcu_fake_update_stress_test(void *arg) { + if ((random() & 0xf00) == 0) { + struct call_rcu_data *crdp; + + crdp = create_call_rcu_data(0); + if (crdp != NULL) { + fprintf(stderr, + "Using per-thread call_rcu() worker.\n"); + set_thread_call_rcu_data(crdp); + } + } while (goflag == GOFLAG_INIT) poll(NULL, 0, 1); while (goflag == GOFLAG_RUN) { @@ -349,13 +415,13 @@ void stresstest(int nreaders) create_thread(rcu_update_stress_test, NULL); for (i = 0; i < 5; i++) create_thread(rcu_fake_update_stress_test, NULL); - smp_mb(); + cmm_smp_mb(); goflag = GOFLAG_RUN; - smp_mb(); + cmm_smp_mb(); sleep(10); - smp_mb(); + cmm_smp_mb(); goflag = GOFLAG_STOP; - smp_mb(); + cmm_smp_mb(); wait_all_threads(); for_each_thread(t) n_reads += per_thread(n_reads_pt, t); @@ -390,6 +456,12 @@ int main(int argc, char *argv[]) smp_init(); //rcu_init(); + srandom(time(NULL)); + if (random() & 0x100) { + fprintf(stderr, "Allocating per-CPU call_rcu threads.\n"); + if (create_all_cpu_call_rcu_data(0)) + perror("create_all_cpu_call_rcu_data"); + } #ifdef DEBUG_YIELD yield_active |= YIELD_READ;