2 * rcutorture.h: simple user-level performance/stress test of RCU.
5 * ./rcu <nreaders> rperf [ <cpustride> ]
6 * Run a read-side performance test with the specified
7 * number of readers spaced by <cpustride>.
8 * Thus "./rcu 16 rperf 2" would run 16 readers on even-numbered
10 * ./rcu <nupdaters> uperf [ <cpustride> ]
11 * Run an update-side performance test with the specified
12 * number of updaters and specified CPU spacing.
13 * ./rcu <nreaders> perf [ <cpustride> ]
14 * Run a combined read/update performance test with the specified
15 * number of readers and one updater and specified CPU spacing.
16 * The readers run on the low-numbered CPUs and the updater
17 * of the highest-numbered CPU.
19 * The above tests produce output as follows:
21 * n_reads: 46008000 n_updates: 146026 nreaders: 2 nupdaters: 1 duration: 1
22 * ns/read: 43.4707 ns/update: 6848.1
24 * The first line lists the total number of RCU reads and updates executed
25 * during the test, the number of reader threads, the number of updater
26 * threads, and the duration of the test in seconds. The second line
27 * lists the average duration of each type of operation in nanoseconds,
28 * or "nan" if the corresponding type of operation was not performed.
30 * ./rcu <nreaders> stress
31 * Run a stress test with the specified number of readers and
32 * one updater. None of the threads are affinitied to any
35 * This test produces output as follows:
37 * n_reads: 114633217 n_updates: 3903415 n_mberror: 0
38 * rcu_stress_count: 114618391 14826 0 0 0 0 0 0 0 0 0
40 * The first line lists the number of RCU read and update operations
41 * executed, followed by the number of memory-ordering violations
42 * (which will be zero in a correct RCU implementation). The second
43 * line lists the number of readers observing progressively more stale
44 * data. A correct RCU implementation will have all but the first two
47 * This program is free software; you can redistribute it and/or modify
48 * it under the terms of the GNU General Public License as published by
49 * the Free Software Foundation; either version 2 of the License, or
50 * (at your option) any later version.
52 * This program is distributed in the hope that it will be useful,
53 * but WITHOUT ANY WARRANTY; without even the implied warranty of
54 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
55 * GNU General Public License for more details.
57 * You should have received a copy of the GNU General Public License
58 * along with this program; if not, write to the Free Software
59 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
61 * Copyright (c) 2008 Paul E. McKenney, IBM Corporation.
70 DEFINE_PER_THREAD(long long, n_reads_pt
);
71 DEFINE_PER_THREAD(long long, n_updates_pt
);
73 long long n_reads
= 0LL;
82 volatile int goflag
__attribute__((__aligned__(CAA_CACHE_LINE_SIZE
)))
85 #define RCU_READ_RUN 1000
88 #define RCU_READ_NESTABLE
90 #ifdef RCU_READ_NESTABLE
91 #define rcu_read_lock_nest() rcu_read_lock()
92 #define rcu_read_unlock_nest() rcu_read_unlock()
93 #else /* #ifdef RCU_READ_NESTABLE */
94 #define rcu_read_lock_nest()
95 #define rcu_read_unlock_nest()
96 #endif /* #else #ifdef RCU_READ_NESTABLE */
99 #define mark_rcu_quiescent_state rcu_quiescent_state
100 #define put_thread_offline rcu_thread_offline
101 #define put_thread_online rcu_thread_online
104 #ifndef mark_rcu_quiescent_state
105 #define mark_rcu_quiescent_state() do ; while (0)
106 #endif /* #ifdef mark_rcu_quiescent_state */
108 #ifndef put_thread_offline
109 #define put_thread_offline() do ; while (0)
110 #define put_thread_online() do ; while (0)
111 #define put_thread_online_delay() do ; while (0)
112 #else /* #ifndef put_thread_offline */
113 #define put_thread_online_delay() synchronize_rcu()
114 #endif /* #else #ifndef put_thread_offline */
120 void *rcu_read_perf_test(void *arg
)
122 struct call_rcu_data
*crdp
;
125 long long n_reads_local
= 0;
127 rcu_register_thread();
129 uatomic_inc(&nthreadsrunning
);
130 put_thread_offline();
131 while (goflag
== GOFLAG_INIT
)
134 while (goflag
== GOFLAG_RUN
) {
135 for (i
= 0; i
< RCU_READ_RUN
; i
++) {
137 /* rcu_read_lock_nest(); */
138 /* rcu_read_unlock_nest(); */
141 n_reads_local
+= RCU_READ_RUN
;
142 mark_rcu_quiescent_state();
144 __get_thread_var(n_reads_pt
) += n_reads_local
;
145 put_thread_offline();
146 crdp
= get_thread_call_rcu_data();
147 set_thread_call_rcu_data(NULL
);
148 call_rcu_data_free(crdp
);
149 rcu_unregister_thread();
154 void *rcu_update_perf_test(void *arg
)
156 long long n_updates_local
= 0;
158 if ((random() & 0xf00) == 0) {
159 struct call_rcu_data
*crdp
;
161 crdp
= create_call_rcu_data(0, -1);
164 "Using per-thread call_rcu() worker.\n");
165 set_thread_call_rcu_data(crdp
);
168 uatomic_inc(&nthreadsrunning
);
169 while (goflag
== GOFLAG_INIT
)
171 while (goflag
== GOFLAG_RUN
) {
175 __get_thread_var(n_updates_pt
) += n_updates_local
;
179 void perftestinit(void)
181 init_per_thread(n_reads_pt
, 0LL);
182 init_per_thread(n_updates_pt
, 0LL);
183 uatomic_set(&nthreadsrunning
, 0);
186 void perftestrun(int nthreads
, int nreaders
, int nupdaters
)
192 while (uatomic_read(&nthreadsrunning
) < nthreads
)
198 goflag
= GOFLAG_STOP
;
202 n_reads
+= per_thread(n_reads_pt
, t
);
203 n_updates
+= per_thread(n_updates_pt
, t
);
205 printf("n_reads: %lld n_updates: %ld nreaders: %d nupdaters: %d duration: %d\n",
206 n_reads
, n_updates
, nreaders
, nupdaters
, duration
);
207 printf("ns/read: %g ns/update: %g\n",
208 ((duration
* 1000*1000*1000.*(double)nreaders
) /
210 ((duration
* 1000*1000*1000.*(double)nupdaters
) /
212 if (get_cpu_call_rcu_data(0)) {
213 fprintf(stderr
, "Deallocating per-CPU call_rcu threads.\n");
214 free_all_cpu_call_rcu_data();
219 void perftest(int nreaders
, int cpustride
)
225 for (i
= 0; i
< nreaders
; i
++) {
226 arg
= (long)(i
* cpustride
);
227 create_thread(rcu_read_perf_test
, (void *)arg
);
229 arg
= (long)(i
* cpustride
);
230 create_thread(rcu_update_perf_test
, (void *)arg
);
231 perftestrun(i
+ 1, nreaders
, 1);
234 void rperftest(int nreaders
, int cpustride
)
240 init_per_thread(n_reads_pt
, 0LL);
241 for (i
= 0; i
< nreaders
; i
++) {
242 arg
= (long)(i
* cpustride
);
243 create_thread(rcu_read_perf_test
, (void *)arg
);
245 perftestrun(i
, nreaders
, 0);
248 void uperftest(int nupdaters
, int cpustride
)
254 init_per_thread(n_reads_pt
, 0LL);
255 for (i
= 0; i
< nupdaters
; i
++) {
256 arg
= (long)(i
* cpustride
);
257 create_thread(rcu_update_perf_test
, (void *)arg
);
259 perftestrun(i
, 0, nupdaters
);
266 #define RCU_STRESS_PIPE_LEN 10
273 struct rcu_stress rcu_stress_array
[RCU_STRESS_PIPE_LEN
] = { { 0 } };
274 struct rcu_stress
*rcu_stress_current
;
275 int rcu_stress_idx
= 0;
278 DEFINE_PER_THREAD(long long [RCU_STRESS_PIPE_LEN
+ 1], rcu_stress_count
);
282 void *rcu_read_stress_test(void *arg
)
286 struct rcu_stress
*p
;
289 rcu_register_thread();
290 put_thread_offline();
291 while (goflag
== GOFLAG_INIT
)
294 while (goflag
== GOFLAG_RUN
) {
296 p
= rcu_dereference(rcu_stress_current
);
299 rcu_read_lock_nest();
300 for (i
= 0; i
< 100; i
++)
302 rcu_read_unlock_nest();
305 if ((pc
> RCU_STRESS_PIPE_LEN
) || (pc
< 0))
306 pc
= RCU_STRESS_PIPE_LEN
;
307 __get_thread_var(rcu_stress_count
)[pc
]++;
308 __get_thread_var(n_reads_pt
)++;
309 mark_rcu_quiescent_state();
310 if ((++itercnt
% 0x1000) == 0) {
311 put_thread_offline();
312 put_thread_online_delay();
316 put_thread_offline();
317 rcu_unregister_thread();
322 static pthread_mutex_t call_rcu_test_mutex
= PTHREAD_MUTEX_INITIALIZER
;
323 static pthread_cond_t call_rcu_test_cond
= PTHREAD_COND_INITIALIZER
;
325 void rcu_update_stress_test_rcu(struct rcu_head
*head
)
327 if (pthread_mutex_lock(&call_rcu_test_mutex
) != 0) {
328 perror("pthread_mutex_lock");
331 if (pthread_cond_signal(&call_rcu_test_cond
) != 0) {
332 perror("pthread_cond_signal");
335 if (pthread_mutex_unlock(&call_rcu_test_mutex
) != 0) {
336 perror("pthread_mutex_unlock");
341 void *rcu_update_stress_test(void *arg
)
344 struct rcu_stress
*p
;
347 while (goflag
== GOFLAG_INIT
)
349 while (goflag
== GOFLAG_RUN
) {
350 i
= rcu_stress_idx
+ 1;
351 if (i
>= RCU_STRESS_PIPE_LEN
)
353 p
= &rcu_stress_array
[i
];
358 rcu_assign_pointer(rcu_stress_current
, p
);
360 for (i
= 0; i
< RCU_STRESS_PIPE_LEN
; i
++)
361 if (i
!= rcu_stress_idx
)
362 rcu_stress_array
[i
].pipe_count
++;
366 if (pthread_mutex_lock(&call_rcu_test_mutex
) != 0) {
367 perror("pthread_mutex_lock");
370 call_rcu(&rh
, rcu_update_stress_test_rcu
);
371 if (pthread_cond_wait(&call_rcu_test_cond
,
372 &call_rcu_test_mutex
) != 0) {
373 perror("pthread_cond_wait");
376 if (pthread_mutex_unlock(&call_rcu_test_mutex
) != 0) {
377 perror("pthread_mutex_unlock");
386 void *rcu_fake_update_stress_test(void *arg
)
388 if ((random() & 0xf00) == 0) {
389 struct call_rcu_data
*crdp
;
391 crdp
= create_call_rcu_data(0, -1);
394 "Using per-thread call_rcu() worker.\n");
395 set_thread_call_rcu_data(crdp
);
398 while (goflag
== GOFLAG_INIT
)
400 while (goflag
== GOFLAG_RUN
) {
407 void stresstest(int nreaders
)
414 init_per_thread(n_reads_pt
, 0LL);
416 p
= &per_thread(rcu_stress_count
,t
)[0];
417 for (i
= 0; i
<= RCU_STRESS_PIPE_LEN
; i
++)
420 rcu_stress_current
= &rcu_stress_array
[0];
421 rcu_stress_current
->pipe_count
= 0;
422 rcu_stress_current
->mbtest
= 1;
423 for (i
= 0; i
< nreaders
; i
++)
424 create_thread(rcu_read_stress_test
, NULL
);
425 create_thread(rcu_update_stress_test
, NULL
);
426 for (i
= 0; i
< 5; i
++)
427 create_thread(rcu_fake_update_stress_test
, NULL
);
433 goflag
= GOFLAG_STOP
;
437 n_reads
+= per_thread(n_reads_pt
, t
);
438 printf("n_reads: %lld n_updates: %ld n_mberror: %d\n",
439 n_reads
, n_updates
, n_mberror
);
440 printf("rcu_stress_count:");
441 for (i
= 0; i
<= RCU_STRESS_PIPE_LEN
; i
++) {
444 sum
+= per_thread(rcu_stress_count
, t
)[i
];
446 printf(" %lld", sum
);
449 if (get_cpu_call_rcu_data(0)) {
450 fprintf(stderr
, "Deallocating per-CPU call_rcu threads.\n");
451 free_all_cpu_call_rcu_data();
460 void usage(int argc
, char *argv
[])
462 fprintf(stderr
, "Usage: %s [nreaders [ perf | stress ] ]\n", argv
[0]);
466 int main(int argc
, char *argv
[])
474 if (random() & 0x100) {
475 fprintf(stderr
, "Allocating per-CPU call_rcu threads.\n");
476 if (create_all_cpu_call_rcu_data(0))
477 perror("create_all_cpu_call_rcu_data");
481 yield_active
|= YIELD_READ
;
482 yield_active
|= YIELD_WRITE
;
486 nreaders
= strtoul(argv
[1], NULL
, 0);
488 perftest(nreaders
, cpustride
);
490 cpustride
= strtoul(argv
[3], NULL
, 0);
491 if (strcmp(argv
[2], "perf") == 0)
492 perftest(nreaders
, cpustride
);
493 else if (strcmp(argv
[2], "rperf") == 0)
494 rperftest(nreaders
, cpustride
);
495 else if (strcmp(argv
[2], "uperf") == 0)
496 uperftest(nreaders
, cpustride
);
497 else if (strcmp(argv
[2], "stress") == 0)
498 stresstest(nreaders
);
501 perftest(nreaders
, cpustride
);