fix runtests.sh
[urcu.git] / test_perthreadlock.c
CommitLineData
0ee8bb0a
MD
1/*
2 * test_urcu.c
3 *
4 * Userspace RCU library - test program
5 *
6 * Copyright February 2009 - Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, write to the Free Software Foundation, Inc.,
20 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
21 */
22
9e97e478 23#define _GNU_SOURCE
0ee8bb0a
MD
24#include <stdio.h>
25#include <pthread.h>
26#include <stdlib.h>
27#include <string.h>
28#include <sys/types.h>
29#include <sys/wait.h>
30#include <unistd.h>
31#include <stdio.h>
32#include <assert.h>
33#include <sys/syscall.h>
9e97e478 34#include <sched.h>
0ee8bb0a
MD
35
36#include "arch.h"
37
38#if defined(_syscall0)
39_syscall0(pid_t, gettid)
40#elif defined(__NR_gettid)
41static inline pid_t gettid(void)
42{
43 return syscall(__NR_gettid);
44}
45#else
46#warning "use pid as tid"
47static inline pid_t gettid(void)
48{
49 return getpid();
50}
51#endif
52
53#ifndef DYNAMIC_LINK_TEST
54#define _LGPL_SOURCE
55#else
56#define debug_yield_read()
57#endif
58#include "urcu.h"
59
60struct test_array {
61 int a;
62};
63
64struct per_thread_lock {
65 pthread_mutex_t lock;
66} __attribute__((aligned(128))); /* cache-line aligned */
67
68static struct per_thread_lock *per_thread_lock;
69
78efb485 70static volatile int test_go, test_stop;
0ee8bb0a
MD
71
72static int wdelay;
73
2b4e4125 74static volatile struct test_array test_array = { 8 };
0ee8bb0a
MD
75
76static unsigned long duration;
0ee8bb0a
MD
77
78/*
79 * returns 0 if test should end.
80 */
81static int test_duration_write(void)
82{
78efb485 83 return !test_stop;
0ee8bb0a
MD
84}
85
86static int test_duration_read(void)
87{
78efb485 88 return !test_stop;
0ee8bb0a
MD
89}
90
91static unsigned long long __thread nr_writes;
92static unsigned long long __thread nr_reads;
93
94static unsigned long long __attribute__((aligned(128))) *tot_nr_writes;
95static unsigned long long __attribute__((aligned(128))) *tot_nr_reads;
96
97static unsigned int nr_readers;
98static unsigned int nr_writers;
99
100pthread_mutex_t rcu_copy_mutex = PTHREAD_MUTEX_INITIALIZER;
101
102void rcu_copy_mutex_lock(void)
103{
104 int ret;
105 ret = pthread_mutex_lock(&rcu_copy_mutex);
106 if (ret) {
107 perror("Error in pthread mutex lock");
108 exit(-1);
109 }
110}
111
112void rcu_copy_mutex_unlock(void)
113{
114 int ret;
115
116 ret = pthread_mutex_unlock(&rcu_copy_mutex);
117 if (ret) {
118 perror("Error in pthread mutex unlock");
119 exit(-1);
120 }
121}
122
123void *thr_reader(void *data)
124{
125 unsigned long tidx = (unsigned long)data;
126
127 printf("thread_begin %s, thread id : %lx, tid %lu\n",
128 "reader", pthread_self(), (unsigned long)gettid());
129
130 while (!test_go)
131 {
132 }
133
134 for (;;) {
135 pthread_mutex_lock(&per_thread_lock[tidx].lock);
136 assert(test_array.a == 8);
137 pthread_mutex_unlock(&per_thread_lock[tidx].lock);
138 nr_reads++;
59d5a406 139 if (unlikely(!test_duration_read()))
0ee8bb0a
MD
140 break;
141 }
142
143 tot_nr_reads[tidx] = nr_reads;
144 printf("thread_end %s, thread id : %lx, tid %lu\n",
145 "reader", pthread_self(), (unsigned long)gettid());
146 return ((void*)1);
147
148}
149
150void *thr_writer(void *data)
151{
152 unsigned long wtidx = (unsigned long)data;
153 long tidx;
154
155 printf("thread_begin %s, thread id : %lx, tid %lu\n",
156 "writer", pthread_self(), (unsigned long)gettid());
157
158 while (!test_go)
159 {
160 }
161 smp_mb();
162
163 for (;;) {
164 for (tidx = 0; tidx < nr_readers; tidx++) {
165 pthread_mutex_lock(&per_thread_lock[tidx].lock);
166 }
2b4e4125 167 test_array.a = 0;
0ee8bb0a
MD
168 test_array.a = 8;
169 for (tidx = nr_readers - 1; tidx >= 0; tidx--) {
170 pthread_mutex_unlock(&per_thread_lock[tidx].lock);
171 }
172 nr_writes++;
59d5a406 173 if (unlikely(!test_duration_write()))
0ee8bb0a 174 break;
59d5a406 175 if (unlikely(wdelay))
0ee8bb0a
MD
176 usleep(wdelay);
177 }
178
179 printf("thread_end %s, thread id : %lx, tid %lu\n",
180 "writer", pthread_self(), (unsigned long)gettid());
181 tot_nr_writes[wtidx] = nr_writes;
182 return ((void*)2);
183}
184
185void show_usage(int argc, char **argv)
186{
187 printf("Usage : %s nr_readers nr_writers duration (s)", argv[0]);
188#ifdef DEBUG_YIELD
189 printf(" [-r] [-w] (yield reader and/or writer)");
190#endif
191 printf(" [-d delay] (writer period (us))");
9e97e478 192 printf(" [-a cpu#] [-a cpu#]... (affinity)");
0ee8bb0a
MD
193 printf("\n");
194}
195
9e97e478
MD
196cpu_set_t affinity;
197
0ee8bb0a
MD
198int main(int argc, char **argv)
199{
200 int err;
201 pthread_t *tid_reader, *tid_writer;
202 void *tret;
203 unsigned long long *count_reader, *count_writer;
204 unsigned long long tot_reads = 0, tot_writes = 0;
9e97e478
MD
205 int i, a;
206 int use_affinity = 0;
0ee8bb0a
MD
207
208 if (argc < 4) {
209 show_usage(argc, argv);
210 return -1;
211 }
212 smp_mb();
213
214 err = sscanf(argv[1], "%u", &nr_readers);
215 if (err != 1) {
216 show_usage(argc, argv);
217 return -1;
218 }
219
220 err = sscanf(argv[2], "%u", &nr_writers);
221 if (err != 1) {
222 show_usage(argc, argv);
223 return -1;
224 }
225
226 err = sscanf(argv[3], "%lu", &duration);
227 if (err != 1) {
228 show_usage(argc, argv);
229 return -1;
230 }
231
9e97e478
MD
232 CPU_ZERO(&affinity);
233
0ee8bb0a
MD
234 for (i = 4; i < argc; i++) {
235 if (argv[i][0] != '-')
236 continue;
237 switch (argv[i][1]) {
238#ifdef DEBUG_YIELD
239 case 'r':
240 yield_active |= YIELD_READ;
241 break;
242 case 'w':
243 yield_active |= YIELD_WRITE;
244 break;
245#endif
9e97e478
MD
246 case 'a':
247 if (argc < i + 2) {
248 show_usage(argc, argv);
249 return -1;
250 }
251 a = atoi(argv[++i]);
252 CPU_SET(a, &affinity);
253 use_affinity = 1;
254 printf("Adding CPU %d affinity\n", a);
255 break;
0ee8bb0a
MD
256 case 'd':
257 if (argc < i + 2) {
258 show_usage(argc, argv);
259 return -1;
260 }
261 wdelay = atoi(argv[++i]);
262 break;
263 }
264 }
265
266 printf("running test for %lu seconds, %u readers, %u writers.\n",
267 duration, nr_readers, nr_writers);
268 printf("Writer delay : %u us.\n", wdelay);
269 printf("thread %-6s, thread id : %lx, tid %lu\n",
270 "main", pthread_self(), (unsigned long)gettid());
271
9e97e478
MD
272 if (use_affinity
273 && sched_setaffinity(0, sizeof(affinity), &affinity) < 0) {
274 perror("sched_setaffinity");
275 exit(-1);
276 }
277
0ee8bb0a
MD
278 tid_reader = malloc(sizeof(*tid_reader) * nr_readers);
279 tid_writer = malloc(sizeof(*tid_writer) * nr_writers);
280 count_reader = malloc(sizeof(*count_reader) * nr_readers);
281 count_writer = malloc(sizeof(*count_writer) * nr_writers);
282 tot_nr_reads = malloc(sizeof(*tot_nr_reads) * nr_readers);
283 tot_nr_writes = malloc(sizeof(*tot_nr_writes) * nr_writers);
284 per_thread_lock = malloc(sizeof(*per_thread_lock) * nr_readers);
285
286 for (i = 0; i < nr_readers; i++) {
287 err = pthread_create(&tid_reader[i], NULL, thr_reader,
288 (void *)(long)i);
289 if (err != 0)
290 exit(1);
291 }
292 for (i = 0; i < nr_writers; i++) {
293 err = pthread_create(&tid_writer[i], NULL, thr_writer,
294 (void *)(long)i);
295 if (err != 0)
296 exit(1);
297 }
298
0ee8bb0a
MD
299 smp_mb();
300
78efb485
MD
301 test_go = 1;
302
303 sleep(duration);
304
305 test_stop = 1;
306
0ee8bb0a
MD
307 for (i = 0; i < nr_readers; i++) {
308 err = pthread_join(tid_reader[i], &tret);
309 if (err != 0)
310 exit(1);
311 tot_reads += tot_nr_reads[i];
312 }
313 for (i = 0; i < nr_writers; i++) {
314 err = pthread_join(tid_writer[i], &tret);
315 if (err != 0)
316 exit(1);
317 tot_writes += tot_nr_writes[i];
318 }
319
320 printf("total number of reads : %llu, writes %llu\n", tot_reads,
321 tot_writes);
322 free(tid_reader);
323 free(tid_writer);
324 free(count_reader);
325 free(count_writer);
326 free(tot_nr_reads);
327 free(tot_nr_writes);
328 free(per_thread_lock);
329 return 0;
330}
This page took 0.034085 seconds and 4 git commands to generate.