2 * rcutorture.h: simple user-level performance/stress test of RCU.
5 * ./rcu <nreaders> rperf [ <cpustride> ]
6 * Run a read-side performance test with the specified
7 * number of readers spaced by <cpustride>.
8 * Thus "./rcu 16 rperf 2" would run 16 readers on even-numbered
10 * ./rcu <nupdaters> uperf [ <cpustride> ]
11 * Run an update-side performance test with the specified
12 * number of updaters and specified CPU spacing.
13 * ./rcu <nreaders> perf [ <cpustride> ]
14 * Run a combined read/update performance test with the specified
15 * number of readers and one updater and specified CPU spacing.
16 * The readers run on the low-numbered CPUs and the updater
17 * of the highest-numbered CPU.
19 * The above tests produce output as follows:
21 * n_reads: 46008000 n_updates: 146026 nreaders: 2 nupdaters: 1 duration: 1
22 * ns/read: 43.4707 ns/update: 6848.1
24 * The first line lists the total number of RCU reads and updates executed
25 * during the test, the number of reader threads, the number of updater
26 * threads, and the duration of the test in seconds. The second line
27 * lists the average duration of each type of operation in nanoseconds,
28 * or "nan" if the corresponding type of operation was not performed.
30 * ./rcu <nreaders> stress
31 * Run a stress test with the specified number of readers and
32 * one updater. None of the threads are affinitied to any
35 * This test produces output as follows:
37 * n_reads: 114633217 n_updates: 3903415 n_mberror: 0
38 * rcu_stress_count: 114618391 14826 0 0 0 0 0 0 0 0 0
40 * The first line lists the number of RCU read and update operations
41 * executed, followed by the number of memory-ordering violations
42 * (which will be zero in a correct RCU implementation). The second
43 * line lists the number of readers observing progressively more stale
44 * data. A correct RCU implementation will have all but the first two
47 * This program is free software; you can redistribute it and/or modify
48 * it under the terms of the GNU General Public License as published by
49 * the Free Software Foundation; either version 2 of the License, or
50 * (at your option) any later version.
52 * This program is distributed in the hope that it will be useful,
53 * but WITHOUT ANY WARRANTY; without even the implied warranty of
54 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
55 * GNU General Public License for more details.
57 * You should have received a copy of the GNU General Public License
58 * along with this program; if not, write to the Free Software
59 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
61 * Copyright (c) 2008 Paul E. McKenney, IBM Corporation.
70 DEFINE_PER_THREAD(long long, n_reads_pt
);
71 DEFINE_PER_THREAD(long long, n_updates_pt
);
73 long long n_reads
= 0LL;
82 volatile int goflag
__attribute__((__aligned__(CAA_CACHE_LINE_SIZE
)))
85 #define RCU_READ_RUN 1000
88 #define RCU_READ_NESTABLE
90 #ifdef RCU_READ_NESTABLE
91 #define rcu_read_lock_nest() rcu_read_lock()
92 #define rcu_read_unlock_nest() rcu_read_unlock()
93 #else /* #ifdef RCU_READ_NESTABLE */
94 #define rcu_read_lock_nest()
95 #define rcu_read_unlock_nest()
96 #endif /* #else #ifdef RCU_READ_NESTABLE */
99 #define mark_rcu_quiescent_state rcu_quiescent_state
100 #define put_thread_offline rcu_thread_offline
101 #define put_thread_online rcu_thread_online
104 #ifndef mark_rcu_quiescent_state
105 #define mark_rcu_quiescent_state() do ; while (0)
106 #endif /* #ifdef mark_rcu_quiescent_state */
108 #ifndef put_thread_offline
109 #define put_thread_offline() do ; while (0)
110 #define put_thread_online() do ; while (0)
111 #define put_thread_online_delay() do ; while (0)
112 #else /* #ifndef put_thread_offline */
113 #define put_thread_online_delay() synchronize_rcu()
114 #endif /* #else #ifndef put_thread_offline */
120 void *rcu_read_perf_test(void *arg
)
122 struct call_rcu_data
*crdp
;
125 long long n_reads_local
= 0;
127 rcu_register_thread();
129 uatomic_inc(&nthreadsrunning
);
130 while (goflag
== GOFLAG_INIT
)
132 mark_rcu_quiescent_state();
133 while (goflag
== GOFLAG_RUN
) {
134 for (i
= 0; i
< RCU_READ_RUN
; i
++) {
136 /* rcu_read_lock_nest(); */
137 /* rcu_read_unlock_nest(); */
140 n_reads_local
+= RCU_READ_RUN
;
141 mark_rcu_quiescent_state();
143 __get_thread_var(n_reads_pt
) += n_reads_local
;
144 put_thread_offline();
145 crdp
= get_thread_call_rcu_data();
146 set_thread_call_rcu_data(NULL
);
147 call_rcu_data_free(crdp
);
148 rcu_unregister_thread();
153 void *rcu_update_perf_test(void *arg
)
155 long long n_updates_local
= 0;
157 if ((random() & 0xf00) == 0) {
158 struct call_rcu_data
*crdp
;
160 crdp
= create_call_rcu_data(0, -1);
163 "Using per-thread call_rcu() worker.\n");
164 set_thread_call_rcu_data(crdp
);
167 uatomic_inc(&nthreadsrunning
);
168 while (goflag
== GOFLAG_INIT
)
170 while (goflag
== GOFLAG_RUN
) {
174 __get_thread_var(n_updates_pt
) += n_updates_local
;
178 void perftestinit(void)
180 init_per_thread(n_reads_pt
, 0LL);
181 init_per_thread(n_updates_pt
, 0LL);
182 uatomic_set(&nthreadsrunning
, 0);
185 void perftestrun(int nthreads
, int nreaders
, int nupdaters
)
191 while (uatomic_read(&nthreadsrunning
) < nthreads
)
197 goflag
= GOFLAG_STOP
;
201 n_reads
+= per_thread(n_reads_pt
, t
);
202 n_updates
+= per_thread(n_updates_pt
, t
);
204 printf("n_reads: %lld n_updates: %ld nreaders: %d nupdaters: %d duration: %d\n",
205 n_reads
, n_updates
, nreaders
, nupdaters
, duration
);
206 printf("ns/read: %g ns/update: %g\n",
207 ((duration
* 1000*1000*1000.*(double)nreaders
) /
209 ((duration
* 1000*1000*1000.*(double)nupdaters
) /
211 if (get_cpu_call_rcu_data(0)) {
212 fprintf(stderr
, "Deallocating per-CPU call_rcu threads.\n");
213 free_all_cpu_call_rcu_data();
218 void perftest(int nreaders
, int cpustride
)
224 for (i
= 0; i
< nreaders
; i
++) {
225 arg
= (long)(i
* cpustride
);
226 create_thread(rcu_read_perf_test
, (void *)arg
);
228 arg
= (long)(i
* cpustride
);
229 create_thread(rcu_update_perf_test
, (void *)arg
);
230 perftestrun(i
+ 1, nreaders
, 1);
233 void rperftest(int nreaders
, int cpustride
)
239 init_per_thread(n_reads_pt
, 0LL);
240 for (i
= 0; i
< nreaders
; i
++) {
241 arg
= (long)(i
* cpustride
);
242 create_thread(rcu_read_perf_test
, (void *)arg
);
244 perftestrun(i
, nreaders
, 0);
247 void uperftest(int nupdaters
, int cpustride
)
253 init_per_thread(n_reads_pt
, 0LL);
254 for (i
= 0; i
< nupdaters
; i
++) {
255 arg
= (long)(i
* cpustride
);
256 create_thread(rcu_update_perf_test
, (void *)arg
);
258 perftestrun(i
, 0, nupdaters
);
265 #define RCU_STRESS_PIPE_LEN 10
272 struct rcu_stress rcu_stress_array
[RCU_STRESS_PIPE_LEN
] = { { 0 } };
273 struct rcu_stress
*rcu_stress_current
;
274 int rcu_stress_idx
= 0;
277 DEFINE_PER_THREAD(long long [RCU_STRESS_PIPE_LEN
+ 1], rcu_stress_count
);
281 void *rcu_read_stress_test(void *arg
)
285 struct rcu_stress
*p
;
288 rcu_register_thread();
289 while (goflag
== GOFLAG_INIT
)
291 mark_rcu_quiescent_state();
292 while (goflag
== GOFLAG_RUN
) {
294 p
= rcu_dereference(rcu_stress_current
);
297 rcu_read_lock_nest();
298 for (i
= 0; i
< 100; i
++)
300 rcu_read_unlock_nest();
303 if ((pc
> RCU_STRESS_PIPE_LEN
) || (pc
< 0))
304 pc
= RCU_STRESS_PIPE_LEN
;
305 __get_thread_var(rcu_stress_count
)[pc
]++;
306 __get_thread_var(n_reads_pt
)++;
307 mark_rcu_quiescent_state();
308 if ((++itercnt
% 0x1000) == 0) {
309 put_thread_offline();
310 put_thread_online_delay();
314 put_thread_offline();
315 rcu_unregister_thread();
320 static pthread_mutex_t call_rcu_test_mutex
= PTHREAD_MUTEX_INITIALIZER
;
321 static pthread_cond_t call_rcu_test_cond
= PTHREAD_COND_INITIALIZER
;
323 void rcu_update_stress_test_rcu(struct rcu_head
*head
)
325 if (pthread_mutex_lock(&call_rcu_test_mutex
) != 0) {
326 perror("pthread_mutex_lock");
329 if (pthread_cond_signal(&call_rcu_test_cond
) != 0) {
330 perror("pthread_cond_signal");
333 if (pthread_mutex_unlock(&call_rcu_test_mutex
) != 0) {
334 perror("pthread_mutex_unlock");
339 void *rcu_update_stress_test(void *arg
)
342 struct rcu_stress
*p
;
345 while (goflag
== GOFLAG_INIT
)
347 while (goflag
== GOFLAG_RUN
) {
348 i
= rcu_stress_idx
+ 1;
349 if (i
>= RCU_STRESS_PIPE_LEN
)
351 p
= &rcu_stress_array
[i
];
356 rcu_assign_pointer(rcu_stress_current
, p
);
358 for (i
= 0; i
< RCU_STRESS_PIPE_LEN
; i
++)
359 if (i
!= rcu_stress_idx
)
360 rcu_stress_array
[i
].pipe_count
++;
364 if (pthread_mutex_lock(&call_rcu_test_mutex
) != 0) {
365 perror("pthread_mutex_lock");
368 call_rcu(&rh
, rcu_update_stress_test_rcu
);
369 if (pthread_cond_wait(&call_rcu_test_cond
,
370 &call_rcu_test_mutex
) != 0) {
371 perror("pthread_cond_wait");
374 if (pthread_mutex_unlock(&call_rcu_test_mutex
) != 0) {
375 perror("pthread_mutex_unlock");
384 void *rcu_fake_update_stress_test(void *arg
)
386 if ((random() & 0xf00) == 0) {
387 struct call_rcu_data
*crdp
;
389 crdp
= create_call_rcu_data(0, -1);
392 "Using per-thread call_rcu() worker.\n");
393 set_thread_call_rcu_data(crdp
);
396 while (goflag
== GOFLAG_INIT
)
398 while (goflag
== GOFLAG_RUN
) {
405 void stresstest(int nreaders
)
412 init_per_thread(n_reads_pt
, 0LL);
414 p
= &per_thread(rcu_stress_count
,t
)[0];
415 for (i
= 0; i
<= RCU_STRESS_PIPE_LEN
; i
++)
418 rcu_stress_current
= &rcu_stress_array
[0];
419 rcu_stress_current
->pipe_count
= 0;
420 rcu_stress_current
->mbtest
= 1;
421 for (i
= 0; i
< nreaders
; i
++)
422 create_thread(rcu_read_stress_test
, NULL
);
423 create_thread(rcu_update_stress_test
, NULL
);
424 for (i
= 0; i
< 5; i
++)
425 create_thread(rcu_fake_update_stress_test
, NULL
);
431 goflag
= GOFLAG_STOP
;
435 n_reads
+= per_thread(n_reads_pt
, t
);
436 printf("n_reads: %lld n_updates: %ld n_mberror: %d\n",
437 n_reads
, n_updates
, n_mberror
);
438 printf("rcu_stress_count:");
439 for (i
= 0; i
<= RCU_STRESS_PIPE_LEN
; i
++) {
442 sum
+= per_thread(rcu_stress_count
, t
)[i
];
444 printf(" %lld", sum
);
447 if (get_cpu_call_rcu_data(0)) {
448 fprintf(stderr
, "Deallocating per-CPU call_rcu threads.\n");
449 free_all_cpu_call_rcu_data();
458 void usage(int argc
, char *argv
[])
460 fprintf(stderr
, "Usage: %s [nreaders [ perf | stress ] ]\n", argv
[0]);
464 int main(int argc
, char *argv
[])
472 if (random() & 0x100) {
473 fprintf(stderr
, "Allocating per-CPU call_rcu threads.\n");
474 if (create_all_cpu_call_rcu_data(0))
475 perror("create_all_cpu_call_rcu_data");
479 yield_active
|= YIELD_READ
;
480 yield_active
|= YIELD_WRITE
;
484 nreaders
= strtoul(argv
[1], NULL
, 0);
486 perftest(nreaders
, cpustride
);
488 cpustride
= strtoul(argv
[3], NULL
, 0);
489 if (strcmp(argv
[2], "perf") == 0)
490 perftest(nreaders
, cpustride
);
491 else if (strcmp(argv
[2], "rperf") == 0)
492 rperftest(nreaders
, cpustride
);
493 else if (strcmp(argv
[2], "uperf") == 0)
494 uperftest(nreaders
, cpustride
);
495 else if (strcmp(argv
[2], "stress") == 0)
496 stresstest(nreaders
);
499 perftest(nreaders
, cpustride
);