2 * rcutorture.h: simple user-level performance/stress test of RCU.
5 * ./rcu <nreaders> rperf [ <cpustride> ]
6 * Run a read-side performance test with the specified
7 * number of readers spaced by <cpustride>.
8 * Thus "./rcu 16 rperf 2" would run 16 readers on even-numbered
10 * ./rcu <nupdaters> uperf [ <cpustride> ]
11 * Run an update-side performance test with the specified
12 * number of updaters and specified CPU spacing.
13 * ./rcu <nreaders> perf [ <cpustride> ]
14 * Run a combined read/update performance test with the specified
15 * number of readers and one updater and specified CPU spacing.
16 * The readers run on the low-numbered CPUs and the updater
17 * of the highest-numbered CPU.
19 * The above tests produce output as follows:
21 * n_reads: 46008000 n_updates: 146026 nreaders: 2 nupdaters: 1 duration: 1
22 * ns/read: 43.4707 ns/update: 6848.1
24 * The first line lists the total number of RCU reads and updates executed
25 * during the test, the number of reader threads, the number of updater
26 * threads, and the duration of the test in seconds. The second line
27 * lists the average duration of each type of operation in nanoseconds,
28 * or "nan" if the corresponding type of operation was not performed.
30 * ./rcu <nreaders> stress
31 * Run a stress test with the specified number of readers and
32 * one updater. None of the threads are affinitied to any
35 * This test produces output as follows:
37 * n_reads: 114633217 n_updates: 3903415 n_mberror: 0
38 * rcu_stress_count: 114618391 14826 0 0 0 0 0 0 0 0 0
40 * The first line lists the number of RCU read and update operations
41 * executed, followed by the number of memory-ordering violations
42 * (which will be zero in a correct RCU implementation). The second
43 * line lists the number of readers observing progressively more stale
44 * data. A correct RCU implementation will have all but the first two
47 * This program is free software; you can redistribute it and/or modify
48 * it under the terms of the GNU General Public License as published by
49 * the Free Software Foundation; either version 2 of the License, or
50 * (at your option) any later version.
52 * This program is distributed in the hope that it will be useful,
53 * but WITHOUT ANY WARRANTY; without even the implied warranty of
54 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
55 * GNU General Public License for more details.
57 * You should have received a copy of the GNU General Public License
58 * along with this program; if not, write to the Free Software
59 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
61 * Copyright (c) 2008 Paul E. McKenney, IBM Corporation.
73 DEFINE_PER_THREAD(long long, n_reads_pt
);
74 DEFINE_PER_THREAD(long long, n_updates_pt
);
82 static enum callrcu_type callrcu_type
= CALLRCU_GLOBAL
;
84 long long n_reads
= 0LL;
93 volatile int goflag
__attribute__((__aligned__(CAA_CACHE_LINE_SIZE
)))
96 #define RCU_READ_RUN 1000
99 #define RCU_READ_NESTABLE
101 #ifdef RCU_READ_NESTABLE
102 #define rcu_read_lock_nest() rcu_read_lock()
103 #define rcu_read_unlock_nest() rcu_read_unlock()
104 #else /* #ifdef RCU_READ_NESTABLE */
105 #define rcu_read_lock_nest()
106 #define rcu_read_unlock_nest()
107 #endif /* #else #ifdef RCU_READ_NESTABLE */
110 #define mark_rcu_quiescent_state rcu_quiescent_state
111 #define put_thread_offline rcu_thread_offline
112 #define put_thread_online rcu_thread_online
115 #ifndef mark_rcu_quiescent_state
116 #define mark_rcu_quiescent_state() do {} while (0)
117 #endif /* #ifdef mark_rcu_quiescent_state */
119 #ifndef put_thread_offline
120 #define put_thread_offline() do {} while (0)
121 #define put_thread_online() do {} while (0)
122 #define put_thread_online_delay() do {} while (0)
123 #else /* #ifndef put_thread_offline */
124 #define put_thread_online_delay() synchronize_rcu()
125 #endif /* #else #ifndef put_thread_offline */
132 void *rcu_read_perf_test(void *arg
)
136 long long n_reads_local
= 0;
138 rcu_register_thread();
140 uatomic_inc(&nthreadsrunning
);
141 put_thread_offline();
142 while (goflag
== GOFLAG_INIT
)
143 (void) poll(NULL
, 0, 1);
145 while (goflag
== GOFLAG_RUN
) {
146 for (i
= 0; i
< RCU_READ_RUN
; i
++) {
148 /* rcu_read_lock_nest(); */
149 /* rcu_read_unlock_nest(); */
152 n_reads_local
+= RCU_READ_RUN
;
153 mark_rcu_quiescent_state();
155 __get_thread_var(n_reads_pt
) += n_reads_local
;
156 put_thread_offline();
157 rcu_unregister_thread();
163 void *rcu_update_perf_test(void *arg
__attribute__((unused
)))
165 long long n_updates_local
= 0;
167 if (callrcu_type
== CALLRCU_PERTHREAD
) {
168 struct call_rcu_data
*crdp
;
170 crdp
= create_call_rcu_data(0, -1);
172 diag("Successfully using per-thread call_rcu() worker.");
173 set_thread_call_rcu_data(crdp
);
176 uatomic_inc(&nthreadsrunning
);
177 while (goflag
== GOFLAG_INIT
)
178 (void) poll(NULL
, 0, 1);
179 while (goflag
== GOFLAG_RUN
) {
183 __get_thread_var(n_updates_pt
) += n_updates_local
;
184 if (callrcu_type
== CALLRCU_PERTHREAD
) {
185 struct call_rcu_data
*crdp
;
187 crdp
= get_thread_call_rcu_data();
188 set_thread_call_rcu_data(NULL
);
189 call_rcu_data_free(crdp
);
195 void perftestinit(void)
197 init_per_thread(n_reads_pt
, 0LL);
198 init_per_thread(n_updates_pt
, 0LL);
199 uatomic_set(&nthreadsrunning
, 0);
203 int perftestrun(int nthreads
, int nreaders
, int nupdaters
)
209 while (uatomic_read(&nthreadsrunning
) < nthreads
)
210 (void) poll(NULL
, 0, 1);
215 goflag
= GOFLAG_STOP
;
219 n_reads
+= per_thread(n_reads_pt
, t
);
220 n_updates
+= per_thread(n_updates_pt
, t
);
222 diag("n_reads: %lld n_updates: %ld nreaders: %d nupdaters: %d duration: %d",
223 n_reads
, n_updates
, nreaders
, nupdaters
, duration
);
224 diag("ns/read: %g ns/update: %g",
225 ((duration
* 1000*1000*1000.*(double)nreaders
) /
227 ((duration
* 1000*1000*1000.*(double)nupdaters
) /
229 if (get_cpu_call_rcu_data(0)) {
230 diag("Deallocating per-CPU call_rcu threads.\n");
231 free_all_cpu_call_rcu_data();
237 int perftest(int nreaders
, int cpustride
)
243 for (i
= 0; i
< nreaders
; i
++) {
244 arg
= (long)(i
* cpustride
);
245 create_thread(rcu_read_perf_test
, (void *)arg
);
247 arg
= (long)(i
* cpustride
);
248 create_thread(rcu_update_perf_test
, (void *)arg
);
249 return perftestrun(i
+ 1, nreaders
, 1);
253 int rperftest(int nreaders
, int cpustride
)
259 init_per_thread(n_reads_pt
, 0LL);
260 for (i
= 0; i
< nreaders
; i
++) {
261 arg
= (long)(i
* cpustride
);
262 create_thread(rcu_read_perf_test
, (void *)arg
);
264 return perftestrun(i
, nreaders
, 0);
268 int uperftest(int nupdaters
, int cpustride
)
274 init_per_thread(n_reads_pt
, 0LL);
275 for (i
= 0; i
< nupdaters
; i
++) {
276 arg
= (long)(i
* cpustride
);
277 create_thread(rcu_update_perf_test
, (void *)arg
);
279 return perftestrun(i
, 0, nupdaters
);
286 #define RCU_STRESS_PIPE_LEN 10
293 struct rcu_stress rcu_stress_array
[RCU_STRESS_PIPE_LEN
] = { { 0, 0 } };
294 struct rcu_stress
*rcu_stress_current
;
295 int rcu_stress_idx
= 0;
298 DEFINE_PER_THREAD(long long [RCU_STRESS_PIPE_LEN
+ 1], rcu_stress_count
);
303 void *rcu_read_stress_test(void *arg
__attribute__((unused
)))
307 struct rcu_stress
*p
;
310 rcu_register_thread();
311 put_thread_offline();
312 while (goflag
== GOFLAG_INIT
)
313 (void) poll(NULL
, 0, 1);
315 while (goflag
== GOFLAG_RUN
) {
317 p
= rcu_dereference(rcu_stress_current
);
320 rcu_read_lock_nest();
321 for (i
= 0; i
< 100; i
++)
323 rcu_read_unlock_nest();
326 if ((pc
> RCU_STRESS_PIPE_LEN
) || (pc
< 0))
327 pc
= RCU_STRESS_PIPE_LEN
;
328 __get_thread_var(rcu_stress_count
)[pc
]++;
329 __get_thread_var(n_reads_pt
)++;
330 mark_rcu_quiescent_state();
331 if ((++itercnt
% 0x1000) == 0) {
332 put_thread_offline();
333 put_thread_online_delay();
337 put_thread_offline();
338 rcu_unregister_thread();
343 static pthread_mutex_t call_rcu_test_mutex
= PTHREAD_MUTEX_INITIALIZER
;
344 static pthread_cond_t call_rcu_test_cond
= PTHREAD_COND_INITIALIZER
;
347 void rcu_update_stress_test_rcu(struct rcu_head
*head
__attribute__((unused
)))
351 ret
= pthread_mutex_lock(&call_rcu_test_mutex
);
354 diag("pthread_mutex_lock: %s",
358 ret
= pthread_cond_signal(&call_rcu_test_cond
);
361 diag("pthread_cond_signal: %s",
365 ret
= pthread_mutex_unlock(&call_rcu_test_mutex
);
368 diag("pthread_mutex_unlock: %s",
375 void *rcu_update_stress_test(void *arg
__attribute__((unused
)))
378 struct rcu_stress
*p
;
381 while (goflag
== GOFLAG_INIT
)
382 (void) poll(NULL
, 0, 1);
383 while (goflag
== GOFLAG_RUN
) {
384 i
= rcu_stress_idx
+ 1;
385 if (i
>= RCU_STRESS_PIPE_LEN
)
387 p
= &rcu_stress_array
[i
];
392 rcu_assign_pointer(rcu_stress_current
, p
);
394 for (i
= 0; i
< RCU_STRESS_PIPE_LEN
; i
++)
395 if (i
!= rcu_stress_idx
)
396 rcu_stress_array
[i
].pipe_count
++;
402 ret
= pthread_mutex_lock(&call_rcu_test_mutex
);
405 diag("pthread_mutex_lock: %s",
409 rcu_register_thread();
410 call_rcu(&rh
, rcu_update_stress_test_rcu
);
411 rcu_unregister_thread();
413 * Our MacOS X test machine with the following
415 * 15.6.0 Darwin Kernel Version 15.6.0
416 * root:xnu-3248.60.10~1/RELEASE_X86_64
417 * appears to have issues with liburcu-signal
418 * signal being delivered on top of
419 * pthread_cond_wait. It seems to make the
420 * thread continue, and therefore corrupt the
421 * rcu_head. Work around this issue by
422 * unregistering the RCU read-side thread
423 * immediately after call_rcu (call_rcu needs
424 * us to be registered RCU readers).
426 ret
= pthread_cond_wait(&call_rcu_test_cond
,
427 &call_rcu_test_mutex
);
430 diag("pthread_cond_signal: %s",
434 ret
= pthread_mutex_unlock(&call_rcu_test_mutex
);
437 diag("pthread_mutex_unlock: %s",
449 void *rcu_fake_update_stress_test(void *arg
__attribute__((unused
)))
451 if (callrcu_type
== CALLRCU_PERTHREAD
) {
452 struct call_rcu_data
*crdp
;
454 crdp
= create_call_rcu_data(0, -1);
456 diag("Successfully using per-thread call_rcu() worker.");
457 set_thread_call_rcu_data(crdp
);
460 while (goflag
== GOFLAG_INIT
)
461 (void) poll(NULL
, 0, 1);
462 while (goflag
== GOFLAG_RUN
) {
464 (void) poll(NULL
, 0, 1);
466 if (callrcu_type
== CALLRCU_PERTHREAD
) {
467 struct call_rcu_data
*crdp
;
469 crdp
= get_thread_call_rcu_data();
470 set_thread_call_rcu_data(NULL
);
471 call_rcu_data_free(crdp
);
477 int stresstest(int nreaders
)
484 init_per_thread(n_reads_pt
, 0LL);
486 p
= &per_thread(rcu_stress_count
,t
)[0];
487 for (i
= 0; i
<= RCU_STRESS_PIPE_LEN
; i
++)
490 rcu_stress_current
= &rcu_stress_array
[0];
491 rcu_stress_current
->pipe_count
= 0;
492 rcu_stress_current
->mbtest
= 1;
493 for (i
= 0; i
< nreaders
; i
++)
494 create_thread(rcu_read_stress_test
, NULL
);
495 create_thread(rcu_update_stress_test
, NULL
);
496 for (i
= 0; i
< 5; i
++)
497 create_thread(rcu_fake_update_stress_test
, NULL
);
503 goflag
= GOFLAG_STOP
;
507 n_reads
+= per_thread(n_reads_pt
, t
);
508 diag("n_reads: %lld n_updates: %ld n_mberror: %d",
509 n_reads
, n_updates
, n_mberror
);
511 rdiag("rcu_stress_count:");
512 for (i
= 0; i
<= RCU_STRESS_PIPE_LEN
; i
++) {
515 sum
+= per_thread(rcu_stress_count
, t
)[i
];
520 if (get_cpu_call_rcu_data(0)) {
521 diag("Deallocating per-CPU call_rcu threads.");
522 free_all_cpu_call_rcu_data();
535 void usage(char *argv
[]) __attribute__((noreturn
));
538 void usage(char *argv
[])
540 diag("Usage: %s nreaders [ perf | rperf | uperf | stress ] [ stride ] [ callrcu_global | callrcu_percpu | callrcu_perthread ]\n", argv
[0]);
544 int main(int argc
, char *argv
[])
549 plan_tests(NR_TESTS
);
554 const char *callrcu_str
= argv
[4];;
556 if (strcmp(callrcu_str
, "callrcu_global") == 0) {
557 callrcu_type
= CALLRCU_GLOBAL
;
558 } else if (strcmp(callrcu_str
, "callrcu_percpu") == 0) {
559 callrcu_type
= CALLRCU_PERCPU
;
560 } else if (strcmp(callrcu_str
, "callrcu_perthread") == 0) {
561 callrcu_type
= CALLRCU_PERTHREAD
;
568 switch (callrcu_type
) {
570 diag("Using global per-process call_rcu thread.");
573 diag("Using per-CPU call_rcu threads.");
574 if (create_all_cpu_call_rcu_data(0))
575 diag("create_all_cpu_call_rcu_data: %s",
578 case CALLRCU_PERTHREAD
:
579 diag("Using per-thread call_rcu() worker.");
586 yield_active
|= YIELD_READ
;
587 yield_active
|= YIELD_WRITE
;
591 if (strcmp(argv
[1], "-h") == 0
592 || strcmp(argv
[1], "--help") == 0) {
596 nreaders
= strtoul(argv
[1], NULL
, 0);
598 ok(!perftest(nreaders
, cpustride
),
599 "perftest readers: %d, stride: %d",
600 nreaders
, cpustride
);
604 cpustride
= strtoul(argv
[3], NULL
, 0);
605 if (strcmp(argv
[2], "perf") == 0)
606 ok(!perftest(nreaders
, cpustride
),
607 "perftest readers: %d, stride: %d",
608 nreaders
, cpustride
);
609 else if (strcmp(argv
[2], "rperf") == 0)
610 ok(!rperftest(nreaders
, cpustride
),
611 "rperftest readers: %d, stride: %d",
612 nreaders
, cpustride
);
613 else if (strcmp(argv
[2], "uperf") == 0)
614 ok(!uperftest(nreaders
, cpustride
),
615 "uperftest readers: %d, stride: %d",
616 nreaders
, cpustride
);
617 else if (strcmp(argv
[2], "stress") == 0)
618 ok(!stresstest(nreaders
),
619 "stresstest readers: %d, stride: %d",
620 nreaders
, cpustride
);
627 return exit_status();