Remove glibc < 2.4 compat code for sched_setaffinity
[urcu.git] / tests / benchmark / test_urcu_qsbr_gc.c
CommitLineData
de10a585
MD
1/*
2 * test_urcu_gc.c
3 *
4 * Userspace RCU library - test program (with baatch reclamation)
5 *
6 * Copyright February 2009 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, write to the Free Software Foundation, Inc.,
20 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
21 */
22
de10a585
MD
23#include <stdio.h>
24#include <pthread.h>
25#include <stdlib.h>
26#include <string.h>
27#include <sys/types.h>
28#include <sys/wait.h>
29#include <unistd.h>
30#include <stdio.h>
31#include <assert.h>
de10a585
MD
32#include <errno.h>
33
34#include <urcu/arch.h>
bd252a04 35#include <urcu/tls-compat.h>
94df6318 36#include "thread-id.h"
2650042a 37#include "../common/debug-yield.h"
de10a585
MD
38
39/* hardcoded number of CPUs */
40#define NR_CPUS 16384
41
de10a585
MD
42#define _LGPL_SOURCE
43#include <urcu-qsbr.h>
44
45struct test_array {
46 int a;
47};
48
49static volatile int test_go, test_stop;
50
51static unsigned long wdelay;
52
53static struct test_array *test_rcu_pointer;
54
55static unsigned long duration;
56
57/* read-side C.S. duration, in loops */
58static unsigned long rduration;
d266df35 59static long reclaim_batch = 1;
de10a585
MD
60
61struct reclaim_queue {
62 void **queue; /* Beginning of queue */
63 void **head; /* Insert position */
64};
65
66static struct reclaim_queue *pending_reclaims;
67
68
69/* write-side C.S. duration, in loops */
70static unsigned long wduration;
71
ab0aacbe 72static inline void loop_sleep(unsigned long loops)
de10a585 73{
ab0aacbe 74 while (loops-- != 0)
de10a585
MD
75 caa_cpu_relax();
76}
77
78static int verbose_mode;
79
80#define printf_verbose(fmt, args...) \
81 do { \
82 if (verbose_mode) \
83 printf(fmt, args); \
84 } while (0)
85
86static unsigned int cpu_affinities[NR_CPUS];
87static unsigned int next_aff = 0;
88static int use_affinity = 0;
89
90pthread_mutex_t affinity_mutex = PTHREAD_MUTEX_INITIALIZER;
91
de10a585
MD
92static void set_affinity(void)
93{
95bc7fb9 94#if HAVE_SCHED_SETAFFINITY
de10a585 95 cpu_set_t mask;
95bc7fb9
MD
96 int cpu, ret;
97#endif /* HAVE_SCHED_SETAFFINITY */
de10a585
MD
98
99 if (!use_affinity)
100 return;
101
102#if HAVE_SCHED_SETAFFINITY
103 ret = pthread_mutex_lock(&affinity_mutex);
104 if (ret) {
105 perror("Error in pthread mutex lock");
106 exit(-1);
107 }
108 cpu = cpu_affinities[next_aff++];
109 ret = pthread_mutex_unlock(&affinity_mutex);
110 if (ret) {
111 perror("Error in pthread mutex unlock");
112 exit(-1);
113 }
114
115 CPU_ZERO(&mask);
116 CPU_SET(cpu, &mask);
de10a585 117 sched_setaffinity(0, sizeof(mask), &mask);
de10a585
MD
118#endif /* HAVE_SCHED_SETAFFINITY */
119}
120
121/*
122 * returns 0 if test should end.
123 */
124static int test_duration_write(void)
125{
126 return !test_stop;
127}
128
129static int test_duration_read(void)
130{
131 return !test_stop;
132}
133
bd252a04
MD
134static DEFINE_URCU_TLS(unsigned long long, nr_writes);
135static DEFINE_URCU_TLS(unsigned long long, nr_reads);
de10a585
MD
136
137static unsigned int nr_readers;
138static unsigned int nr_writers;
139
140pthread_mutex_t rcu_copy_mutex = PTHREAD_MUTEX_INITIALIZER;
141static
142unsigned long long __attribute__((aligned(CAA_CACHE_LINE_SIZE))) *tot_nr_writes;
143
144
145void rcu_copy_mutex_lock(void)
146{
147 int ret;
148 ret = pthread_mutex_lock(&rcu_copy_mutex);
149 if (ret) {
150 perror("Error in pthread mutex lock");
151 exit(-1);
152 }
153}
154
155void rcu_copy_mutex_unlock(void)
156{
157 int ret;
158
159 ret = pthread_mutex_unlock(&rcu_copy_mutex);
160 if (ret) {
161 perror("Error in pthread mutex unlock");
162 exit(-1);
163 }
164}
165
166void *thr_reader(void *_count)
167{
168 unsigned long long *count = _count;
169 struct test_array *local_ptr;
170
94df6318
MD
171 printf_verbose("thread_begin %s, tid %lu\n",
172 "reader", urcu_get_thread_id());
de10a585
MD
173
174 set_affinity();
175
176 rcu_register_thread();
177
178 while (!test_go)
179 {
180 }
181 cmm_smp_mb();
182
183 for (;;) {
184 _rcu_read_lock();
185 local_ptr = _rcu_dereference(test_rcu_pointer);
1de4df4b 186 rcu_debug_yield_read();
de10a585
MD
187 if (local_ptr)
188 assert(local_ptr->a == 8);
a0b7f7ea 189 if (caa_unlikely(rduration))
de10a585
MD
190 loop_sleep(rduration);
191 _rcu_read_unlock();
bd252a04 192 URCU_TLS(nr_reads)++;
de10a585 193 /* QS each 1024 reads */
bd252a04 194 if (caa_unlikely((URCU_TLS(nr_reads) & ((1 << 10) - 1)) == 0))
de10a585 195 _rcu_quiescent_state();
a0b7f7ea 196 if (caa_unlikely(!test_duration_read()))
de10a585
MD
197 break;
198 }
199
200 rcu_unregister_thread();
201
bd252a04 202 *count = URCU_TLS(nr_reads);
94df6318
MD
203 printf_verbose("thread_end %s, tid %lu\n",
204 "reader", urcu_get_thread_id());
de10a585
MD
205 return ((void*)1);
206
207}
208
209static void rcu_gc_clear_queue(unsigned long wtidx)
210{
211 void **p;
212
213 /* Wait for Q.S and empty queue */
214 synchronize_rcu();
215
216 for (p = pending_reclaims[wtidx].queue;
217 p < pending_reclaims[wtidx].head; p++) {
218 /* poison */
219 if (*p)
220 ((struct test_array *)*p)->a = 0;
221 free(*p);
222 }
223 pending_reclaims[wtidx].head = pending_reclaims[wtidx].queue;
224}
225
226/* Using per-thread queue */
227static void rcu_gc_reclaim(unsigned long wtidx, void *old)
228{
229 /* Queue pointer */
230 *pending_reclaims[wtidx].head = old;
231 pending_reclaims[wtidx].head++;
232
a0b7f7ea 233 if (caa_likely(pending_reclaims[wtidx].head - pending_reclaims[wtidx].queue
de10a585
MD
234 < reclaim_batch))
235 return;
236
237 rcu_gc_clear_queue(wtidx);
238}
239
240void *thr_writer(void *data)
241{
242 unsigned long wtidx = (unsigned long)data;
243#ifdef TEST_LOCAL_GC
244 struct test_array *old = NULL;
245#else
246 struct test_array *new, *old;
247#endif
248
94df6318
MD
249 printf_verbose("thread_begin %s, tid %lu\n",
250 "writer", urcu_get_thread_id());
de10a585
MD
251
252 set_affinity();
253
254 while (!test_go)
255 {
256 }
257 cmm_smp_mb();
258
259 for (;;) {
260#ifndef TEST_LOCAL_GC
261 new = malloc(sizeof(*new));
262 new->a = 8;
263 old = _rcu_xchg_pointer(&test_rcu_pointer, new);
264#endif
a0b7f7ea 265 if (caa_unlikely(wduration))
de10a585
MD
266 loop_sleep(wduration);
267 rcu_gc_reclaim(wtidx, old);
bd252a04 268 URCU_TLS(nr_writes)++;
a0b7f7ea 269 if (caa_unlikely(!test_duration_write()))
de10a585 270 break;
a0b7f7ea 271 if (caa_unlikely(wdelay))
de10a585
MD
272 loop_sleep(wdelay);
273 }
274
94df6318
MD
275 printf_verbose("thread_end %s, tid %lu\n",
276 "writer", urcu_get_thread_id());
bd252a04 277 tot_nr_writes[wtidx] = URCU_TLS(nr_writes);
de10a585
MD
278 return ((void*)2);
279}
280
281void show_usage(int argc, char **argv)
282{
06637138
MD
283 printf("Usage : %s nr_readers nr_writers duration (s) <OPTIONS>\n",
284 argv[0]);
285 printf("OPTIONS:\n");
06637138 286 printf(" [-r] [-w] (yield reader and/or writer)\n");
06637138
MD
287 printf(" [-b batch] (batch reclaim)\n");
288 printf(" [-d delay] (writer period (us))\n");
289 printf(" [-c duration] (reader C.S. duration (in loops))\n");
290 printf(" [-e duration] (writer C.S. duration (in loops))\n");
291 printf(" [-v] (verbose output)\n");
292 printf(" [-a cpu#] [-a cpu#]... (affinity)\n");
de10a585
MD
293 printf("\n");
294}
295
296int main(int argc, char **argv)
297{
298 int err;
299 pthread_t *tid_reader, *tid_writer;
300 void *tret;
301 unsigned long long *count_reader;
302 unsigned long long tot_reads = 0, tot_writes = 0;
303 int i, a;
83e334d0 304 unsigned int i_thr;
de10a585
MD
305
306 if (argc < 4) {
307 show_usage(argc, argv);
308 return -1;
309 }
310
311 err = sscanf(argv[1], "%u", &nr_readers);
312 if (err != 1) {
313 show_usage(argc, argv);
314 return -1;
315 }
316
317 err = sscanf(argv[2], "%u", &nr_writers);
318 if (err != 1) {
319 show_usage(argc, argv);
320 return -1;
321 }
83e334d0 322
de10a585
MD
323 err = sscanf(argv[3], "%lu", &duration);
324 if (err != 1) {
325 show_usage(argc, argv);
326 return -1;
327 }
328
329 for (i = 4; i < argc; i++) {
330 if (argv[i][0] != '-')
331 continue;
332 switch (argv[i][1]) {
de10a585 333 case 'r':
2650042a 334 rcu_debug_yield_enable(RCU_YIELD_READ);
de10a585
MD
335 break;
336 case 'w':
2650042a 337 rcu_debug_yield_enable(RCU_YIELD_WRITE);
de10a585 338 break;
de10a585
MD
339 case 'a':
340 if (argc < i + 2) {
341 show_usage(argc, argv);
342 return -1;
343 }
344 a = atoi(argv[++i]);
345 cpu_affinities[next_aff++] = a;
346 use_affinity = 1;
347 printf_verbose("Adding CPU %d affinity\n", a);
348 break;
349 case 'b':
350 if (argc < i + 2) {
351 show_usage(argc, argv);
352 return -1;
353 }
354 reclaim_batch = atol(argv[++i]);
355 break;
356 case 'c':
357 if (argc < i + 2) {
358 show_usage(argc, argv);
359 return -1;
360 }
361 rduration = atol(argv[++i]);
362 break;
363 case 'd':
364 if (argc < i + 2) {
365 show_usage(argc, argv);
366 return -1;
367 }
368 wdelay = atol(argv[++i]);
369 break;
370 case 'e':
371 if (argc < i + 2) {
372 show_usage(argc, argv);
373 return -1;
374 }
375 wduration = atol(argv[++i]);
376 break;
377 case 'v':
378 verbose_mode = 1;
379 break;
380 }
381 }
382
383 printf_verbose("running test for %lu seconds, %u readers, %u writers.\n",
384 duration, nr_readers, nr_writers);
385 printf_verbose("Writer delay : %lu loops.\n", wdelay);
386 printf_verbose("Reader duration : %lu loops.\n", rduration);
94df6318
MD
387 printf_verbose("thread %-6s, tid %lu\n",
388 "main", urcu_get_thread_id());
de10a585 389
9aa14175
MD
390 tid_reader = calloc(nr_readers, sizeof(*tid_reader));
391 tid_writer = calloc(nr_writers, sizeof(*tid_writer));
392 count_reader = calloc(nr_readers, sizeof(*count_reader));
393 tot_nr_writes = calloc(nr_writers, sizeof(*tot_nr_writes));
394 pending_reclaims = calloc(nr_writers, sizeof(*pending_reclaims));
83e334d0 395 if (reclaim_batch * sizeof(*pending_reclaims[0].queue)
de10a585 396 < CAA_CACHE_LINE_SIZE)
83e334d0
MJ
397 for (i_thr = 0; i_thr < nr_writers; i_thr++)
398 pending_reclaims[i_thr].queue = calloc(1, CAA_CACHE_LINE_SIZE);
de10a585 399 else
83e334d0
MJ
400 for (i_thr = 0; i_thr < nr_writers; i_thr++)
401 pending_reclaims[i_thr].queue = calloc(reclaim_batch,
402 sizeof(*pending_reclaims[i_thr].queue));
403 for (i_thr = 0; i_thr < nr_writers; i_thr++)
404 pending_reclaims[i_thr].head = pending_reclaims[i_thr].queue;
de10a585
MD
405
406 next_aff = 0;
407
83e334d0
MJ
408 for (i_thr = 0; i_thr < nr_readers; i_thr++) {
409 err = pthread_create(&tid_reader[i_thr], NULL, thr_reader,
410 &count_reader[i_thr]);
de10a585
MD
411 if (err != 0)
412 exit(1);
413 }
83e334d0
MJ
414 for (i_thr = 0; i_thr < nr_writers; i_thr++) {
415 err = pthread_create(&tid_writer[i_thr], NULL, thr_writer,
416 (void *)(long)i_thr);
de10a585
MD
417 if (err != 0)
418 exit(1);
419 }
420
421 cmm_smp_mb();
422
423 test_go = 1;
424
425 sleep(duration);
426
427 test_stop = 1;
428
83e334d0
MJ
429 for (i_thr = 0; i_thr < nr_readers; i_thr++) {
430 err = pthread_join(tid_reader[i_thr], &tret);
de10a585
MD
431 if (err != 0)
432 exit(1);
83e334d0 433 tot_reads += count_reader[i_thr];
de10a585 434 }
83e334d0
MJ
435 for (i_thr = 0; i_thr < nr_writers; i_thr++) {
436 err = pthread_join(tid_writer[i_thr], &tret);
de10a585
MD
437 if (err != 0)
438 exit(1);
83e334d0
MJ
439 tot_writes += tot_nr_writes[i_thr];
440 rcu_gc_clear_queue(i_thr);
de10a585 441 }
83e334d0 442
de10a585
MD
443 printf_verbose("total number of reads : %llu, writes %llu\n", tot_reads,
444 tot_writes);
445 printf("SUMMARY %-25s testdur %4lu nr_readers %3u rdur %6lu wdur %6lu "
446 "nr_writers %3u "
447 "wdelay %6lu nr_reads %12llu nr_writes %12llu nr_ops %12llu "
d266df35 448 "batch %ld\n",
de10a585
MD
449 argv[0], duration, nr_readers, rduration, wduration,
450 nr_writers, wdelay, tot_reads, tot_writes,
451 tot_reads + tot_writes, reclaim_batch);
83e334d0 452
de10a585
MD
453 free(tid_reader);
454 free(tid_writer);
455 free(count_reader);
456 free(tot_nr_writes);
83e334d0
MJ
457
458 for (i_thr = 0; i_thr < nr_writers; i_thr++)
459 free(pending_reclaims[i_thr].queue);
de10a585
MD
460 free(pending_reclaims);
461
462 return 0;
463}
This page took 0.055769 seconds and 4 git commands to generate.