Commit | Line | Data |
---|---|---|
de10a585 MD |
1 | /* |
2 | * test_urcu.c | |
3 | * | |
4 | * Userspace RCU library - test program | |
5 | * | |
6 | * Copyright February 2009 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com> | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify | |
9 | * it under the terms of the GNU General Public License as published by | |
10 | * the Free Software Foundation; either version 2 of the License, or | |
11 | * (at your option) any later version. | |
12 | * | |
13 | * This program is distributed in the hope that it will be useful, | |
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
16 | * GNU General Public License for more details. | |
17 | * | |
18 | * You should have received a copy of the GNU General Public License along | |
19 | * with this program; if not, write to the Free Software Foundation, Inc., | |
20 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | |
21 | */ | |
22 | ||
23 | #define _GNU_SOURCE | |
24 | #include "../config.h" | |
25 | #include <stdio.h> | |
26 | #include <pthread.h> | |
27 | #include <stdlib.h> | |
28 | #include <string.h> | |
29 | #include <sys/types.h> | |
30 | #include <sys/wait.h> | |
31 | #include <unistd.h> | |
32 | #include <stdio.h> | |
33 | #include <assert.h> | |
34 | #include <sched.h> | |
35 | #include <errno.h> | |
36 | ||
37 | #include <urcu/arch.h> | |
38 | ||
39 | #ifdef __linux__ | |
40 | #include <syscall.h> | |
41 | #endif | |
42 | ||
43 | /* hardcoded number of CPUs */ | |
44 | #define NR_CPUS 16384 | |
45 | ||
46 | #if defined(_syscall0) | |
47 | _syscall0(pid_t, gettid) | |
48 | #elif defined(__NR_gettid) | |
49 | static inline pid_t gettid(void) | |
50 | { | |
51 | return syscall(__NR_gettid); | |
52 | } | |
53 | #else | |
54 | #warning "use pid as tid" | |
55 | static inline pid_t gettid(void) | |
56 | { | |
57 | return getpid(); | |
58 | } | |
59 | #endif | |
60 | ||
61 | #ifndef DYNAMIC_LINK_TEST | |
62 | #define _LGPL_SOURCE | |
63 | #else | |
64 | #define debug_yield_read() | |
65 | #endif | |
66 | #include "urcu-qsbr.h" | |
67 | ||
68 | struct test_array { | |
69 | int a; | |
70 | }; | |
71 | ||
72 | static volatile int test_go, test_stop; | |
73 | ||
74 | static unsigned long wdelay; | |
75 | ||
76 | static struct test_array *test_rcu_pointer; | |
77 | ||
78 | static unsigned long duration; | |
79 | ||
80 | /* read-side C.S. duration, in loops */ | |
81 | static unsigned long rduration; | |
82 | ||
83 | /* write-side C.S. duration, in loops */ | |
84 | static unsigned long wduration; | |
85 | ||
86 | static inline void loop_sleep(unsigned long l) | |
87 | { | |
88 | while(l-- != 0) | |
89 | caa_cpu_relax(); | |
90 | } | |
91 | ||
92 | static int verbose_mode; | |
93 | ||
94 | #define printf_verbose(fmt, args...) \ | |
95 | do { \ | |
96 | if (verbose_mode) \ | |
97 | printf(fmt, args); \ | |
98 | } while (0) | |
99 | ||
100 | static unsigned int cpu_affinities[NR_CPUS]; | |
101 | static unsigned int next_aff = 0; | |
102 | static int use_affinity = 0; | |
103 | ||
104 | pthread_mutex_t affinity_mutex = PTHREAD_MUTEX_INITIALIZER; | |
105 | ||
106 | #ifndef HAVE_CPU_SET_T | |
107 | typedef unsigned long cpu_set_t; | |
108 | # define CPU_ZERO(cpuset) do { *(cpuset) = 0; } while(0) | |
109 | # define CPU_SET(cpu, cpuset) do { *(cpuset) |= (1UL << (cpu)); } while(0) | |
110 | #endif | |
111 | ||
112 | static void set_affinity(void) | |
113 | { | |
114 | cpu_set_t mask; | |
115 | int cpu; | |
116 | int ret; | |
117 | ||
118 | if (!use_affinity) | |
119 | return; | |
120 | ||
121 | #if HAVE_SCHED_SETAFFINITY | |
122 | ret = pthread_mutex_lock(&affinity_mutex); | |
123 | if (ret) { | |
124 | perror("Error in pthread mutex lock"); | |
125 | exit(-1); | |
126 | } | |
127 | cpu = cpu_affinities[next_aff++]; | |
128 | ret = pthread_mutex_unlock(&affinity_mutex); | |
129 | if (ret) { | |
130 | perror("Error in pthread mutex unlock"); | |
131 | exit(-1); | |
132 | } | |
133 | CPU_ZERO(&mask); | |
134 | CPU_SET(cpu, &mask); | |
135 | #if SCHED_SETAFFINITY_ARGS == 2 | |
136 | sched_setaffinity(0, &mask); | |
137 | #else | |
138 | sched_setaffinity(0, sizeof(mask), &mask); | |
139 | #endif | |
140 | #endif /* HAVE_SCHED_SETAFFINITY */ | |
141 | } | |
142 | ||
143 | /* | |
144 | * returns 0 if test should end. | |
145 | */ | |
146 | static int test_duration_write(void) | |
147 | { | |
148 | return !test_stop; | |
149 | } | |
150 | ||
151 | static int test_duration_read(void) | |
152 | { | |
153 | return !test_stop; | |
154 | } | |
155 | ||
156 | static unsigned long long __thread nr_writes; | |
157 | static unsigned long long __thread nr_reads; | |
158 | ||
159 | static unsigned int nr_readers; | |
160 | static unsigned int nr_writers; | |
161 | ||
162 | pthread_mutex_t rcu_copy_mutex = PTHREAD_MUTEX_INITIALIZER; | |
163 | ||
164 | void rcu_copy_mutex_lock(void) | |
165 | { | |
166 | int ret; | |
167 | ret = pthread_mutex_lock(&rcu_copy_mutex); | |
168 | if (ret) { | |
169 | perror("Error in pthread mutex lock"); | |
170 | exit(-1); | |
171 | } | |
172 | } | |
173 | ||
174 | void rcu_copy_mutex_unlock(void) | |
175 | { | |
176 | int ret; | |
177 | ||
178 | ret = pthread_mutex_unlock(&rcu_copy_mutex); | |
179 | if (ret) { | |
180 | perror("Error in pthread mutex unlock"); | |
181 | exit(-1); | |
182 | } | |
183 | } | |
184 | ||
185 | /* | |
186 | * malloc/free are reusing memory areas too quickly, which does not let us | |
187 | * test races appropriately. Use a large circular array for allocations. | |
188 | * ARRAY_SIZE is larger than nr_writers, and we keep the mutex across | |
189 | * both alloc and free, which insures we never run over our tail. | |
190 | */ | |
191 | #define ARRAY_SIZE (1048576 * nr_writers) | |
192 | #define ARRAY_POISON 0xDEADBEEF | |
193 | static int array_index; | |
194 | static struct test_array *test_array; | |
195 | ||
196 | static struct test_array *test_array_alloc(void) | |
197 | { | |
198 | struct test_array *ret; | |
199 | int index; | |
200 | ||
201 | index = array_index % ARRAY_SIZE; | |
202 | assert(test_array[index].a == ARRAY_POISON || | |
203 | test_array[index].a == 0); | |
204 | ret = &test_array[index]; | |
205 | array_index++; | |
206 | if (array_index == ARRAY_SIZE) | |
207 | array_index = 0; | |
208 | return ret; | |
209 | } | |
210 | ||
211 | static void test_array_free(struct test_array *ptr) | |
212 | { | |
213 | if (!ptr) | |
214 | return; | |
215 | ptr->a = ARRAY_POISON; | |
216 | } | |
217 | ||
218 | void *thr_reader(void *_count) | |
219 | { | |
220 | unsigned long long *count = _count; | |
221 | struct test_array *local_ptr; | |
222 | ||
223 | printf_verbose("thread_begin %s, thread id : %lx, tid %lu\n", | |
224 | "reader", pthread_self(), (unsigned long)gettid()); | |
225 | ||
226 | set_affinity(); | |
227 | ||
228 | rcu_register_thread(); | |
229 | ||
230 | while (!test_go) | |
231 | { | |
232 | } | |
233 | cmm_smp_mb(); | |
234 | ||
235 | for (;;) { | |
236 | rcu_read_lock(); | |
237 | local_ptr = rcu_dereference(test_rcu_pointer); | |
238 | debug_yield_read(); | |
239 | if (local_ptr) | |
240 | assert(local_ptr->a == 8); | |
a0b7f7ea | 241 | if (caa_unlikely(rduration)) |
de10a585 MD |
242 | loop_sleep(rduration); |
243 | rcu_read_unlock(); | |
244 | nr_reads++; | |
245 | /* QS each 1024 reads */ | |
a0b7f7ea | 246 | if (caa_unlikely((nr_reads & ((1 << 10) - 1)) == 0)) |
de10a585 | 247 | rcu_quiescent_state(); |
a0b7f7ea | 248 | if (caa_unlikely(!test_duration_read())) |
de10a585 MD |
249 | break; |
250 | } | |
251 | ||
252 | rcu_unregister_thread(); | |
253 | ||
254 | /* test extra thread registration */ | |
255 | rcu_register_thread(); | |
256 | rcu_unregister_thread(); | |
257 | ||
258 | *count = nr_reads; | |
259 | printf_verbose("thread_end %s, thread id : %lx, tid %lu\n", | |
260 | "reader", pthread_self(), (unsigned long)gettid()); | |
261 | return ((void*)1); | |
262 | ||
263 | } | |
264 | ||
265 | void *thr_writer(void *_count) | |
266 | { | |
267 | unsigned long long *count = _count; | |
268 | struct test_array *new, *old; | |
269 | ||
270 | printf_verbose("thread_begin %s, thread id : %lx, tid %lu\n", | |
271 | "writer", pthread_self(), (unsigned long)gettid()); | |
272 | ||
273 | set_affinity(); | |
274 | ||
275 | while (!test_go) | |
276 | { | |
277 | } | |
278 | cmm_smp_mb(); | |
279 | ||
280 | for (;;) { | |
281 | rcu_copy_mutex_lock(); | |
282 | new = test_array_alloc(); | |
283 | new->a = 8; | |
284 | old = rcu_xchg_pointer(&test_rcu_pointer, new); | |
a0b7f7ea | 285 | if (caa_unlikely(wduration)) |
de10a585 MD |
286 | loop_sleep(wduration); |
287 | synchronize_rcu(); | |
288 | /* can be done after unlock */ | |
289 | if (old) | |
290 | old->a = 0; | |
291 | test_array_free(old); | |
292 | rcu_copy_mutex_unlock(); | |
293 | nr_writes++; | |
a0b7f7ea | 294 | if (caa_unlikely(!test_duration_write())) |
de10a585 | 295 | break; |
a0b7f7ea | 296 | if (caa_unlikely(wdelay)) |
de10a585 MD |
297 | loop_sleep(wdelay); |
298 | } | |
299 | ||
300 | printf_verbose("thread_end %s, thread id : %lx, tid %lu\n", | |
301 | "writer", pthread_self(), (unsigned long)gettid()); | |
302 | *count = nr_writes; | |
303 | return ((void*)2); | |
304 | } | |
305 | ||
306 | void show_usage(int argc, char **argv) | |
307 | { | |
308 | printf("Usage : %s nr_readers nr_writers duration (s)", argv[0]); | |
309 | #ifdef DEBUG_YIELD | |
310 | printf(" [-r] [-w] (yield reader and/or writer)"); | |
311 | #endif | |
312 | printf(" [-d delay] (writer period (us))"); | |
313 | printf(" [-c duration] (reader C.S. duration (in loops))"); | |
314 | printf(" [-e duration] (writer C.S. duration (in loops))"); | |
315 | printf(" [-v] (verbose output)"); | |
316 | printf(" [-a cpu#] [-a cpu#]... (affinity)"); | |
317 | printf("\n"); | |
318 | } | |
319 | ||
320 | int main(int argc, char **argv) | |
321 | { | |
322 | int err; | |
323 | pthread_t *tid_reader, *tid_writer; | |
324 | void *tret; | |
325 | unsigned long long *count_reader, *count_writer; | |
326 | unsigned long long tot_reads = 0, tot_writes = 0; | |
327 | int i, a; | |
328 | ||
329 | if (argc < 4) { | |
330 | show_usage(argc, argv); | |
331 | return -1; | |
332 | } | |
333 | ||
334 | err = sscanf(argv[1], "%u", &nr_readers); | |
335 | if (err != 1) { | |
336 | show_usage(argc, argv); | |
337 | return -1; | |
338 | } | |
339 | ||
340 | err = sscanf(argv[2], "%u", &nr_writers); | |
341 | if (err != 1) { | |
342 | show_usage(argc, argv); | |
343 | return -1; | |
344 | } | |
345 | ||
346 | err = sscanf(argv[3], "%lu", &duration); | |
347 | if (err != 1) { | |
348 | show_usage(argc, argv); | |
349 | return -1; | |
350 | } | |
351 | ||
352 | for (i = 4; i < argc; i++) { | |
353 | if (argv[i][0] != '-') | |
354 | continue; | |
355 | switch (argv[i][1]) { | |
356 | #ifdef DEBUG_YIELD | |
357 | case 'r': | |
358 | yield_active |= YIELD_READ; | |
359 | break; | |
360 | case 'w': | |
361 | yield_active |= YIELD_WRITE; | |
362 | break; | |
363 | #endif | |
364 | case 'a': | |
365 | if (argc < i + 2) { | |
366 | show_usage(argc, argv); | |
367 | return -1; | |
368 | } | |
369 | a = atoi(argv[++i]); | |
370 | cpu_affinities[next_aff++] = a; | |
371 | use_affinity = 1; | |
372 | printf_verbose("Adding CPU %d affinity\n", a); | |
373 | break; | |
374 | case 'c': | |
375 | if (argc < i + 2) { | |
376 | show_usage(argc, argv); | |
377 | return -1; | |
378 | } | |
379 | rduration = atol(argv[++i]); | |
380 | break; | |
381 | case 'd': | |
382 | if (argc < i + 2) { | |
383 | show_usage(argc, argv); | |
384 | return -1; | |
385 | } | |
386 | wdelay = atol(argv[++i]); | |
387 | break; | |
388 | case 'e': | |
389 | if (argc < i + 2) { | |
390 | show_usage(argc, argv); | |
391 | return -1; | |
392 | } | |
393 | wduration = atol(argv[++i]); | |
394 | break; | |
395 | case 'v': | |
396 | verbose_mode = 1; | |
397 | break; | |
398 | } | |
399 | } | |
400 | ||
401 | printf_verbose("running test for %lu seconds, %u readers, %u writers.\n", | |
402 | duration, nr_readers, nr_writers); | |
403 | printf_verbose("Writer delay : %lu loops.\n", wdelay); | |
404 | printf_verbose("Reader duration : %lu loops.\n", rduration); | |
405 | printf_verbose("thread %-6s, thread id : %lx, tid %lu\n", | |
406 | "main", pthread_self(), (unsigned long)gettid()); | |
407 | ||
408 | test_array = calloc(1, sizeof(*test_array) * ARRAY_SIZE); | |
409 | tid_reader = malloc(sizeof(*tid_reader) * nr_readers); | |
410 | tid_writer = malloc(sizeof(*tid_writer) * nr_writers); | |
411 | count_reader = malloc(sizeof(*count_reader) * nr_readers); | |
412 | count_writer = malloc(sizeof(*count_writer) * nr_writers); | |
413 | ||
414 | next_aff = 0; | |
415 | ||
416 | for (i = 0; i < nr_readers; i++) { | |
417 | err = pthread_create(&tid_reader[i], NULL, thr_reader, | |
418 | &count_reader[i]); | |
419 | if (err != 0) | |
420 | exit(1); | |
421 | } | |
422 | for (i = 0; i < nr_writers; i++) { | |
423 | err = pthread_create(&tid_writer[i], NULL, thr_writer, | |
424 | &count_writer[i]); | |
425 | if (err != 0) | |
426 | exit(1); | |
427 | } | |
428 | ||
429 | cmm_smp_mb(); | |
430 | ||
431 | test_go = 1; | |
432 | ||
433 | sleep(duration); | |
434 | ||
435 | test_stop = 1; | |
436 | ||
437 | for (i = 0; i < nr_readers; i++) { | |
438 | err = pthread_join(tid_reader[i], &tret); | |
439 | if (err != 0) | |
440 | exit(1); | |
441 | tot_reads += count_reader[i]; | |
442 | } | |
443 | for (i = 0; i < nr_writers; i++) { | |
444 | err = pthread_join(tid_writer[i], &tret); | |
445 | if (err != 0) | |
446 | exit(1); | |
447 | tot_writes += count_writer[i]; | |
448 | } | |
449 | ||
450 | printf_verbose("total number of reads : %llu, writes %llu\n", tot_reads, | |
451 | tot_writes); | |
452 | printf("SUMMARY %-25s testdur %4lu nr_readers %3u rdur %6lu wdur %6lu " | |
453 | "nr_writers %3u " | |
454 | "wdelay %6lu nr_reads %12llu nr_writes %12llu nr_ops %12llu\n", | |
455 | argv[0], duration, nr_readers, rduration, wduration, | |
456 | nr_writers, wdelay, tot_reads, tot_writes, | |
457 | tot_reads + tot_writes); | |
458 | test_array_free(test_rcu_pointer); | |
459 | free(test_array); | |
460 | free(tid_reader); | |
461 | free(tid_writer); | |
462 | free(count_reader); | |
463 | free(count_writer); | |
464 | return 0; | |
465 | } |