wfcqueue: introduce nonblocking API
[urcu.git] / tests / api.h
CommitLineData
1a43bbd8 1#ifndef _INCLUDE_API_H
0578089f
PM
2#define _INCLUDE_API_H
3
76ca5685 4#define _GNU_SOURCE
d8540fc5
PA
5#include "../config.h"
6
0578089f
PM
7/*
8 * common.h: Common Linux kernel-isms.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; but version 2 of the License only due
13 * to code included from the Linux kernel.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
3282a76b 22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
0578089f
PM
23 *
24 * Copyright (c) 2006 Paul E. McKenney, IBM.
25 *
26 * Much code taken from the Linux kernel. For such code, the option
27 * to redistribute under later versions of GPL might not be available.
28 */
29
78bec10c 30#include <urcu/compiler.h>
3c570c9c
PB
31#include <urcu/arch.h>
32
0578089f
PM
33/*
34 * Machine parameters.
35 */
36
0578089f 37#define ____cacheline_internodealigned_in_smp \
901bbb44 38 __attribute__((__aligned__(CAA_CACHE_LINE_SIZE)))
6ee91d83 39
0578089f
PM
40/*
41 * api_pthreads.h: API mapping to pthreads environment.
42 *
43 * This program is free software; you can redistribute it and/or modify
44 * it under the terms of the GNU General Public License as published by
45 * the Free Software Foundation; either version 2 of the License, or
46 * (at your option) any later version. However, please note that much
47 * of the code in this file derives from the Linux kernel, and that such
48 * code may not be available except under GPLv2.
49 *
50 * This program is distributed in the hope that it will be useful,
51 * but WITHOUT ANY WARRANTY; without even the implied warranty of
52 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
53 * GNU General Public License for more details.
54 *
55 * You should have received a copy of the GNU General Public License
56 * along with this program; if not, write to the Free Software
3282a76b 57 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
0578089f
PM
58 *
59 * Copyright (c) 2006 Paul E. McKenney, IBM.
60 */
61
62#include <stdio.h>
63#include <stdlib.h>
64#include <errno.h>
65#include <limits.h>
66#include <sys/types.h>
0578089f
PM
67#include <pthread.h>
68#include <sched.h>
69#include <sys/param.h>
70/* #include "atomic.h" */
71
0578089f
PM
72/*
73 * Exclusive locking primitives.
74 */
75
76typedef pthread_mutex_t spinlock_t;
77
78#define DEFINE_SPINLOCK(lock) spinlock_t lock = PTHREAD_MUTEX_INITIALIZER;
79#define __SPIN_LOCK_UNLOCKED(lockp) PTHREAD_MUTEX_INITIALIZER
80
81static void spin_lock_init(spinlock_t *sp)
82{
83 if (pthread_mutex_init(sp, NULL) != 0) {
84 perror("spin_lock_init:pthread_mutex_init");
85 exit(-1);
86 }
87}
88
89static void spin_lock(spinlock_t *sp)
90{
91 if (pthread_mutex_lock(sp) != 0) {
92 perror("spin_lock:pthread_mutex_lock");
93 exit(-1);
94 }
95}
96
97static void spin_unlock(spinlock_t *sp)
98{
99 if (pthread_mutex_unlock(sp) != 0) {
100 perror("spin_unlock:pthread_mutex_unlock");
101 exit(-1);
102 }
103}
104
105#define spin_lock_irqsave(l, f) do { f = 1; spin_lock(l); } while (0)
106#define spin_unlock_irqrestore(l, f) do { f = 0; spin_unlock(l); } while (0)
107
108/*
109 * Thread creation/destruction primitives.
110 */
111
112typedef pthread_t thread_id_t;
113
114#define NR_THREADS 128
115
3c570c9c
PB
116#define __THREAD_ID_MAP_EMPTY ((thread_id_t) 0)
117#define __THREAD_ID_MAP_WAITING ((thread_id_t) 1)
0578089f
PM
118thread_id_t __thread_id_map[NR_THREADS];
119spinlock_t __thread_id_map_mutex;
120
121#define for_each_thread(t) \
122 for (t = 0; t < NR_THREADS; t++)
123
124#define for_each_running_thread(t) \
125 for (t = 0; t < NR_THREADS; t++) \
126 if ((__thread_id_map[t] != __THREAD_ID_MAP_EMPTY) && \
127 (__thread_id_map[t] != __THREAD_ID_MAP_WAITING))
128
3c570c9c
PB
129#define for_each_tid(t, tid) \
130 for (t = 0; t < NR_THREADS; t++) \
131 if ((((tid) = __thread_id_map[t]) != __THREAD_ID_MAP_EMPTY) && \
132 ((tid) != __THREAD_ID_MAP_WAITING))
133
0578089f
PM
134pthread_key_t thread_id_key;
135
136static int __smp_thread_id(void)
137{
138 int i;
139 thread_id_t tid = pthread_self();
140
141 for (i = 0; i < NR_THREADS; i++) {
142 if (__thread_id_map[i] == tid) {
143 long v = i + 1; /* must be non-NULL. */
144
145 if (pthread_setspecific(thread_id_key, (void *)v) != 0) {
146 perror("pthread_setspecific");
147 exit(-1);
148 }
149 return i;
150 }
151 }
152 spin_lock(&__thread_id_map_mutex);
153 for (i = 0; i < NR_THREADS; i++) {
154 if (__thread_id_map[i] == tid)
155 spin_unlock(&__thread_id_map_mutex);
156 return i;
157 }
158 spin_unlock(&__thread_id_map_mutex);
665eb3ef
MD
159 fprintf(stderr, "smp_thread_id: Rogue thread, id: %lu(%#lx)\n",
160 (unsigned long) tid, (unsigned long) tid);
0578089f
PM
161 exit(-1);
162}
163
164static int smp_thread_id(void)
165{
166 void *id;
167
168 id = pthread_getspecific(thread_id_key);
169 if (id == NULL)
170 return __smp_thread_id();
171 return (long)(id - 1);
172}
173
174static thread_id_t create_thread(void *(*func)(void *), void *arg)
175{
176 thread_id_t tid;
177 int i;
178
179 spin_lock(&__thread_id_map_mutex);
180 for (i = 0; i < NR_THREADS; i++) {
181 if (__thread_id_map[i] == __THREAD_ID_MAP_EMPTY)
182 break;
183 }
184 if (i >= NR_THREADS) {
185 spin_unlock(&__thread_id_map_mutex);
186 fprintf(stderr, "Thread limit of %d exceeded!\n", NR_THREADS);
187 exit(-1);
188 }
189 __thread_id_map[i] = __THREAD_ID_MAP_WAITING;
190 spin_unlock(&__thread_id_map_mutex);
191 if (pthread_create(&tid, NULL, func, arg) != 0) {
192 perror("create_thread:pthread_create");
193 exit(-1);
194 }
195 __thread_id_map[i] = tid;
196 return tid;
197}
198
199static void *wait_thread(thread_id_t tid)
200{
201 int i;
202 void *vp;
203
204 for (i = 0; i < NR_THREADS; i++) {
205 if (__thread_id_map[i] == tid)
206 break;
207 }
208 if (i >= NR_THREADS){
665eb3ef
MD
209 fprintf(stderr, "wait_thread: bad tid = %lu(%#lx)\n",
210 (unsigned long)tid, (unsigned long)tid);
0578089f
PM
211 exit(-1);
212 }
213 if (pthread_join(tid, &vp) != 0) {
214 perror("wait_thread:pthread_join");
215 exit(-1);
216 }
217 __thread_id_map[i] = __THREAD_ID_MAP_EMPTY;
218 return vp;
219}
220
221static void wait_all_threads(void)
222{
223 int i;
224 thread_id_t tid;
225
226 for (i = 1; i < NR_THREADS; i++) {
227 tid = __thread_id_map[i];
228 if (tid != __THREAD_ID_MAP_EMPTY &&
229 tid != __THREAD_ID_MAP_WAITING)
230 (void)wait_thread(tid);
231 }
232}
233
d8540fc5
PA
234#ifndef HAVE_CPU_SET_T
235typedef unsigned long cpu_set_t;
236# define CPU_ZERO(cpuset) do { *(cpuset) = 0; } while(0)
237# define CPU_SET(cpu, cpuset) do { *(cpuset) |= (1UL << (cpu)); } while(0)
238#endif
239
0578089f
PM
240static void run_on(int cpu)
241{
d8540fc5 242#if HAVE_SCHED_SETAFFINITY
0578089f
PM
243 cpu_set_t mask;
244
245 CPU_ZERO(&mask);
246 CPU_SET(cpu, &mask);
d8540fc5
PA
247#if SCHED_SETAFFINITY_ARGS == 2
248 sched_setaffinity(0, &mask);
249#else
0578089f 250 sched_setaffinity(0, sizeof(mask), &mask);
d8540fc5
PA
251#endif
252#endif /* HAVE_SCHED_SETAFFINITY */
0578089f
PM
253}
254
255/*
256 * timekeeping -- very crude -- should use MONOTONIC...
257 */
258
259long long get_microseconds(void)
260{
261 struct timeval tv;
262
263 if (gettimeofday(&tv, NULL) != 0)
264 abort();
265 return ((long long)tv.tv_sec) * 1000000LL + (long long)tv.tv_usec;
266}
267
268/*
269 * Per-thread variables.
270 */
271
272#define DEFINE_PER_THREAD(type, name) \
273 struct { \
274 __typeof__(type) v \
06f22bdb 275 __attribute__((__aligned__(CAA_CACHE_LINE_SIZE))); \
0578089f
PM
276 } __per_thread_##name[NR_THREADS];
277#define DECLARE_PER_THREAD(type, name) extern DEFINE_PER_THREAD(type, name)
278
279#define per_thread(name, thread) __per_thread_##name[thread].v
280#define __get_thread_var(name) per_thread(name, smp_thread_id())
281
282#define init_per_thread(name, v) \
283 do { \
284 int __i_p_t_i; \
285 for (__i_p_t_i = 0; __i_p_t_i < NR_THREADS; __i_p_t_i++) \
286 per_thread(name, __i_p_t_i) = v; \
287 } while (0)
288
0578089f
PM
289DEFINE_PER_THREAD(int, smp_processor_id);
290
0578089f
PM
291/*
292 * Bug checks.
293 */
294
295#define BUG_ON(c) do { if (!(c)) abort(); } while (0)
296
297/*
298 * Initialization -- Must be called before calling any primitives.
299 */
300
301static void smp_init(void)
302{
303 int i;
304
305 spin_lock_init(&__thread_id_map_mutex);
306 __thread_id_map[0] = pthread_self();
307 for (i = 1; i < NR_THREADS; i++)
308 __thread_id_map[i] = __THREAD_ID_MAP_EMPTY;
309 init_per_thread(smp_processor_id, 0);
310 if (pthread_key_create(&thread_id_key, NULL) != 0) {
311 perror("pthread_key_create");
312 exit(-1);
313 }
314}
315
1a43bbd8 316#endif
This page took 0.040758 seconds and 4 git commands to generate.