test api cleanup: remove unused primitives
[urcu.git] / tests / api.h
1
2 #ifndef _INCLUDE_API_H
3 #define _INCLUDE_API_H
4
5 #include "../config.h"
6
7 /*
8 * common.h: Common Linux kernel-isms.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; but version 2 of the License only due
13 * to code included from the Linux kernel.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23 *
24 * Copyright (c) 2006 Paul E. McKenney, IBM.
25 *
26 * Much code taken from the Linux kernel. For such code, the option
27 * to redistribute under later versions of GPL might not be available.
28 */
29
30 #include <urcu/arch.h>
31
32 #define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)]))
33 #define BUILD_BUG_ON_ZERO(e) (sizeof(char[1 - 2 * !!(e)]) - 1)
34
35 /*
36 * Machine parameters.
37 */
38
39 #define ____cacheline_internodealigned_in_smp \
40 __attribute__((__aligned__(CAA_CACHE_LINE_SIZE)))
41
42 /*
43 * api_pthreads.h: API mapping to pthreads environment.
44 *
45 * This program is free software; you can redistribute it and/or modify
46 * it under the terms of the GNU General Public License as published by
47 * the Free Software Foundation; either version 2 of the License, or
48 * (at your option) any later version. However, please note that much
49 * of the code in this file derives from the Linux kernel, and that such
50 * code may not be available except under GPLv2.
51 *
52 * This program is distributed in the hope that it will be useful,
53 * but WITHOUT ANY WARRANTY; without even the implied warranty of
54 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
55 * GNU General Public License for more details.
56 *
57 * You should have received a copy of the GNU General Public License
58 * along with this program; if not, write to the Free Software
59 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
60 *
61 * Copyright (c) 2006 Paul E. McKenney, IBM.
62 */
63
64 #include <stdio.h>
65 #include <stdlib.h>
66 #include <errno.h>
67 #include <limits.h>
68 #include <sys/types.h>
69 #define __USE_GNU
70 #include <pthread.h>
71 #include <sched.h>
72 #include <sys/param.h>
73 /* #include "atomic.h" */
74
75 /*
76 * Exclusive locking primitives.
77 */
78
79 typedef pthread_mutex_t spinlock_t;
80
81 #define DEFINE_SPINLOCK(lock) spinlock_t lock = PTHREAD_MUTEX_INITIALIZER;
82 #define __SPIN_LOCK_UNLOCKED(lockp) PTHREAD_MUTEX_INITIALIZER
83
84 static void spin_lock_init(spinlock_t *sp)
85 {
86 if (pthread_mutex_init(sp, NULL) != 0) {
87 perror("spin_lock_init:pthread_mutex_init");
88 exit(-1);
89 }
90 }
91
92 static void spin_lock(spinlock_t *sp)
93 {
94 if (pthread_mutex_lock(sp) != 0) {
95 perror("spin_lock:pthread_mutex_lock");
96 exit(-1);
97 }
98 }
99
100 static void spin_unlock(spinlock_t *sp)
101 {
102 if (pthread_mutex_unlock(sp) != 0) {
103 perror("spin_unlock:pthread_mutex_unlock");
104 exit(-1);
105 }
106 }
107
108 #define spin_lock_irqsave(l, f) do { f = 1; spin_lock(l); } while (0)
109 #define spin_unlock_irqrestore(l, f) do { f = 0; spin_unlock(l); } while (0)
110
111 /*
112 * Thread creation/destruction primitives.
113 */
114
115 typedef pthread_t thread_id_t;
116
117 #define NR_THREADS 128
118
119 #define __THREAD_ID_MAP_EMPTY ((thread_id_t) 0)
120 #define __THREAD_ID_MAP_WAITING ((thread_id_t) 1)
121 thread_id_t __thread_id_map[NR_THREADS];
122 spinlock_t __thread_id_map_mutex;
123
124 #define for_each_thread(t) \
125 for (t = 0; t < NR_THREADS; t++)
126
127 #define for_each_running_thread(t) \
128 for (t = 0; t < NR_THREADS; t++) \
129 if ((__thread_id_map[t] != __THREAD_ID_MAP_EMPTY) && \
130 (__thread_id_map[t] != __THREAD_ID_MAP_WAITING))
131
132 #define for_each_tid(t, tid) \
133 for (t = 0; t < NR_THREADS; t++) \
134 if ((((tid) = __thread_id_map[t]) != __THREAD_ID_MAP_EMPTY) && \
135 ((tid) != __THREAD_ID_MAP_WAITING))
136
137 pthread_key_t thread_id_key;
138
139 static int __smp_thread_id(void)
140 {
141 int i;
142 thread_id_t tid = pthread_self();
143
144 for (i = 0; i < NR_THREADS; i++) {
145 if (__thread_id_map[i] == tid) {
146 long v = i + 1; /* must be non-NULL. */
147
148 if (pthread_setspecific(thread_id_key, (void *)v) != 0) {
149 perror("pthread_setspecific");
150 exit(-1);
151 }
152 return i;
153 }
154 }
155 spin_lock(&__thread_id_map_mutex);
156 for (i = 0; i < NR_THREADS; i++) {
157 if (__thread_id_map[i] == tid)
158 spin_unlock(&__thread_id_map_mutex);
159 return i;
160 }
161 spin_unlock(&__thread_id_map_mutex);
162 fprintf(stderr, "smp_thread_id: Rogue thread, id: %d(%#x)\n",
163 (int)tid, (int)tid);
164 exit(-1);
165 }
166
167 static int smp_thread_id(void)
168 {
169 void *id;
170
171 id = pthread_getspecific(thread_id_key);
172 if (id == NULL)
173 return __smp_thread_id();
174 return (long)(id - 1);
175 }
176
177 static thread_id_t create_thread(void *(*func)(void *), void *arg)
178 {
179 thread_id_t tid;
180 int i;
181
182 spin_lock(&__thread_id_map_mutex);
183 for (i = 0; i < NR_THREADS; i++) {
184 if (__thread_id_map[i] == __THREAD_ID_MAP_EMPTY)
185 break;
186 }
187 if (i >= NR_THREADS) {
188 spin_unlock(&__thread_id_map_mutex);
189 fprintf(stderr, "Thread limit of %d exceeded!\n", NR_THREADS);
190 exit(-1);
191 }
192 __thread_id_map[i] = __THREAD_ID_MAP_WAITING;
193 spin_unlock(&__thread_id_map_mutex);
194 if (pthread_create(&tid, NULL, func, arg) != 0) {
195 perror("create_thread:pthread_create");
196 exit(-1);
197 }
198 __thread_id_map[i] = tid;
199 return tid;
200 }
201
202 static void *wait_thread(thread_id_t tid)
203 {
204 int i;
205 void *vp;
206
207 for (i = 0; i < NR_THREADS; i++) {
208 if (__thread_id_map[i] == tid)
209 break;
210 }
211 if (i >= NR_THREADS){
212 fprintf(stderr, "wait_thread: bad tid = %d(%#x)\n",
213 (int)tid, (int)tid);
214 exit(-1);
215 }
216 if (pthread_join(tid, &vp) != 0) {
217 perror("wait_thread:pthread_join");
218 exit(-1);
219 }
220 __thread_id_map[i] = __THREAD_ID_MAP_EMPTY;
221 return vp;
222 }
223
224 static void wait_all_threads(void)
225 {
226 int i;
227 thread_id_t tid;
228
229 for (i = 1; i < NR_THREADS; i++) {
230 tid = __thread_id_map[i];
231 if (tid != __THREAD_ID_MAP_EMPTY &&
232 tid != __THREAD_ID_MAP_WAITING)
233 (void)wait_thread(tid);
234 }
235 }
236
237 #ifndef HAVE_CPU_SET_T
238 typedef unsigned long cpu_set_t;
239 # define CPU_ZERO(cpuset) do { *(cpuset) = 0; } while(0)
240 # define CPU_SET(cpu, cpuset) do { *(cpuset) |= (1UL << (cpu)); } while(0)
241 #endif
242
243 static void run_on(int cpu)
244 {
245 #if HAVE_SCHED_SETAFFINITY
246 cpu_set_t mask;
247
248 CPU_ZERO(&mask);
249 CPU_SET(cpu, &mask);
250 #if SCHED_SETAFFINITY_ARGS == 2
251 sched_setaffinity(0, &mask);
252 #else
253 sched_setaffinity(0, sizeof(mask), &mask);
254 #endif
255 #endif /* HAVE_SCHED_SETAFFINITY */
256 }
257
258 /*
259 * timekeeping -- very crude -- should use MONOTONIC...
260 */
261
262 long long get_microseconds(void)
263 {
264 struct timeval tv;
265
266 if (gettimeofday(&tv, NULL) != 0)
267 abort();
268 return ((long long)tv.tv_sec) * 1000000LL + (long long)tv.tv_usec;
269 }
270
271 /*
272 * Per-thread variables.
273 */
274
275 #define DEFINE_PER_THREAD(type, name) \
276 struct { \
277 __typeof__(type) v \
278 __attribute__((__aligned__(CAA_CACHE_LINE_SIZE))); \
279 } __per_thread_##name[NR_THREADS];
280 #define DECLARE_PER_THREAD(type, name) extern DEFINE_PER_THREAD(type, name)
281
282 #define per_thread(name, thread) __per_thread_##name[thread].v
283 #define __get_thread_var(name) per_thread(name, smp_thread_id())
284
285 #define init_per_thread(name, v) \
286 do { \
287 int __i_p_t_i; \
288 for (__i_p_t_i = 0; __i_p_t_i < NR_THREADS; __i_p_t_i++) \
289 per_thread(name, __i_p_t_i) = v; \
290 } while (0)
291
292 DEFINE_PER_THREAD(int, smp_processor_id);
293
294 /*
295 * Bug checks.
296 */
297
298 #define BUG_ON(c) do { if (!(c)) abort(); } while (0)
299
300 /*
301 * Initialization -- Must be called before calling any primitives.
302 */
303
304 static void smp_init(void)
305 {
306 int i;
307
308 spin_lock_init(&__thread_id_map_mutex);
309 __thread_id_map[0] = pthread_self();
310 for (i = 1; i < NR_THREADS; i++)
311 __thread_id_map[i] = __THREAD_ID_MAP_EMPTY;
312 init_per_thread(smp_processor_id, 0);
313 if (pthread_key_create(&thread_id_key, NULL) != 0) {
314 perror("pthread_key_create");
315 exit(-1);
316 }
317 }
318
319 #endif
This page took 0.034553 seconds and 4 git commands to generate.