api: remove list/hlist
[urcu.git] / tests / api.h
CommitLineData
1a43bbd8
MD
1
2#ifndef _INCLUDE_API_H
0578089f
PM
3#define _INCLUDE_API_H
4
d8540fc5
PA
5#include "../config.h"
6
0578089f
PM
7/*
8 * common.h: Common Linux kernel-isms.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; but version 2 of the License only due
13 * to code included from the Linux kernel.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23 *
24 * Copyright (c) 2006 Paul E. McKenney, IBM.
25 *
26 * Much code taken from the Linux kernel. For such code, the option
27 * to redistribute under later versions of GPL might not be available.
28 */
29
3c570c9c
PB
30#include <urcu/arch.h>
31
0578089f
PM
32#ifndef __always_inline
33#define __always_inline inline
34#endif
35
36#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)]))
37#define BUILD_BUG_ON_ZERO(e) (sizeof(char[1 - 2 * !!(e)]) - 1)
38
39#ifdef __ASSEMBLY__
40# define stringify_in_c(...) __VA_ARGS__
41# define ASM_CONST(x) x
42#else
43/* This version of stringify will deal with commas... */
44# define __stringify_in_c(...) #__VA_ARGS__
45# define stringify_in_c(...) __stringify_in_c(__VA_ARGS__) " "
46# define __ASM_CONST(x) x##UL
47# define ASM_CONST(x) __ASM_CONST(x)
48#endif
49
50
51/*
52 * arch-i386.h: Expose x86 atomic instructions. 80486 and better only.
53 *
54 * This program is free software; you can redistribute it and/or modify
55 * it under the terms of the GNU General Public License as published by
56 * the Free Software Foundation, but version 2 only due to inclusion
57 * of Linux-kernel code.
58 *
59 * This program is distributed in the hope that it will be useful,
60 * but WITHOUT ANY WARRANTY; without even the implied warranty of
61 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
62 * GNU General Public License for more details.
63 *
64 * You should have received a copy of the GNU General Public License
65 * along with this program; if not, write to the Free Software
66 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
67 *
68 * Copyright (c) 2006 Paul E. McKenney, IBM.
69 *
70 * Much code taken from the Linux kernel. For such code, the option
71 * to redistribute under later versions of GPL might not be available.
72 */
73
74/*
75 * Machine parameters.
76 */
77
06f22bdb 78/* #define CAA_CACHE_LINE_SIZE 64 */
0578089f 79#define ____cacheline_internodealigned_in_smp \
901bbb44 80 __attribute__((__aligned__(CAA_CACHE_LINE_SIZE)))
6ee91d83 81
0578089f
PM
82/*
83 * api_pthreads.h: API mapping to pthreads environment.
84 *
85 * This program is free software; you can redistribute it and/or modify
86 * it under the terms of the GNU General Public License as published by
87 * the Free Software Foundation; either version 2 of the License, or
88 * (at your option) any later version. However, please note that much
89 * of the code in this file derives from the Linux kernel, and that such
90 * code may not be available except under GPLv2.
91 *
92 * This program is distributed in the hope that it will be useful,
93 * but WITHOUT ANY WARRANTY; without even the implied warranty of
94 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
95 * GNU General Public License for more details.
96 *
97 * You should have received a copy of the GNU General Public License
98 * along with this program; if not, write to the Free Software
99 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
100 *
101 * Copyright (c) 2006 Paul E. McKenney, IBM.
102 */
103
104#include <stdio.h>
105#include <stdlib.h>
106#include <errno.h>
107#include <limits.h>
108#include <sys/types.h>
109#define __USE_GNU
110#include <pthread.h>
111#include <sched.h>
112#include <sys/param.h>
113/* #include "atomic.h" */
114
0578089f
PM
115/*
116 * Default machine parameters.
117 */
118
06f22bdb
DG
119#ifndef CAA_CACHE_LINE_SIZE
120/* #define CAA_CACHE_LINE_SIZE 128 */
121#endif /* #ifndef CAA_CACHE_LINE_SIZE */
0578089f
PM
122
123/*
124 * Exclusive locking primitives.
125 */
126
127typedef pthread_mutex_t spinlock_t;
128
129#define DEFINE_SPINLOCK(lock) spinlock_t lock = PTHREAD_MUTEX_INITIALIZER;
130#define __SPIN_LOCK_UNLOCKED(lockp) PTHREAD_MUTEX_INITIALIZER
131
132static void spin_lock_init(spinlock_t *sp)
133{
134 if (pthread_mutex_init(sp, NULL) != 0) {
135 perror("spin_lock_init:pthread_mutex_init");
136 exit(-1);
137 }
138}
139
140static void spin_lock(spinlock_t *sp)
141{
142 if (pthread_mutex_lock(sp) != 0) {
143 perror("spin_lock:pthread_mutex_lock");
144 exit(-1);
145 }
146}
147
148static void spin_unlock(spinlock_t *sp)
149{
150 if (pthread_mutex_unlock(sp) != 0) {
151 perror("spin_unlock:pthread_mutex_unlock");
152 exit(-1);
153 }
154}
155
156#define spin_lock_irqsave(l, f) do { f = 1; spin_lock(l); } while (0)
157#define spin_unlock_irqrestore(l, f) do { f = 0; spin_unlock(l); } while (0)
158
159/*
160 * Thread creation/destruction primitives.
161 */
162
163typedef pthread_t thread_id_t;
164
165#define NR_THREADS 128
166
3c570c9c
PB
167#define __THREAD_ID_MAP_EMPTY ((thread_id_t) 0)
168#define __THREAD_ID_MAP_WAITING ((thread_id_t) 1)
0578089f
PM
169thread_id_t __thread_id_map[NR_THREADS];
170spinlock_t __thread_id_map_mutex;
171
172#define for_each_thread(t) \
173 for (t = 0; t < NR_THREADS; t++)
174
175#define for_each_running_thread(t) \
176 for (t = 0; t < NR_THREADS; t++) \
177 if ((__thread_id_map[t] != __THREAD_ID_MAP_EMPTY) && \
178 (__thread_id_map[t] != __THREAD_ID_MAP_WAITING))
179
3c570c9c
PB
180#define for_each_tid(t, tid) \
181 for (t = 0; t < NR_THREADS; t++) \
182 if ((((tid) = __thread_id_map[t]) != __THREAD_ID_MAP_EMPTY) && \
183 ((tid) != __THREAD_ID_MAP_WAITING))
184
0578089f
PM
185pthread_key_t thread_id_key;
186
187static int __smp_thread_id(void)
188{
189 int i;
190 thread_id_t tid = pthread_self();
191
192 for (i = 0; i < NR_THREADS; i++) {
193 if (__thread_id_map[i] == tid) {
194 long v = i + 1; /* must be non-NULL. */
195
196 if (pthread_setspecific(thread_id_key, (void *)v) != 0) {
197 perror("pthread_setspecific");
198 exit(-1);
199 }
200 return i;
201 }
202 }
203 spin_lock(&__thread_id_map_mutex);
204 for (i = 0; i < NR_THREADS; i++) {
205 if (__thread_id_map[i] == tid)
206 spin_unlock(&__thread_id_map_mutex);
207 return i;
208 }
209 spin_unlock(&__thread_id_map_mutex);
210 fprintf(stderr, "smp_thread_id: Rogue thread, id: %d(%#x)\n",
211 (int)tid, (int)tid);
212 exit(-1);
213}
214
215static int smp_thread_id(void)
216{
217 void *id;
218
219 id = pthread_getspecific(thread_id_key);
220 if (id == NULL)
221 return __smp_thread_id();
222 return (long)(id - 1);
223}
224
225static thread_id_t create_thread(void *(*func)(void *), void *arg)
226{
227 thread_id_t tid;
228 int i;
229
230 spin_lock(&__thread_id_map_mutex);
231 for (i = 0; i < NR_THREADS; i++) {
232 if (__thread_id_map[i] == __THREAD_ID_MAP_EMPTY)
233 break;
234 }
235 if (i >= NR_THREADS) {
236 spin_unlock(&__thread_id_map_mutex);
237 fprintf(stderr, "Thread limit of %d exceeded!\n", NR_THREADS);
238 exit(-1);
239 }
240 __thread_id_map[i] = __THREAD_ID_MAP_WAITING;
241 spin_unlock(&__thread_id_map_mutex);
242 if (pthread_create(&tid, NULL, func, arg) != 0) {
243 perror("create_thread:pthread_create");
244 exit(-1);
245 }
246 __thread_id_map[i] = tid;
247 return tid;
248}
249
250static void *wait_thread(thread_id_t tid)
251{
252 int i;
253 void *vp;
254
255 for (i = 0; i < NR_THREADS; i++) {
256 if (__thread_id_map[i] == tid)
257 break;
258 }
259 if (i >= NR_THREADS){
260 fprintf(stderr, "wait_thread: bad tid = %d(%#x)\n",
261 (int)tid, (int)tid);
262 exit(-1);
263 }
264 if (pthread_join(tid, &vp) != 0) {
265 perror("wait_thread:pthread_join");
266 exit(-1);
267 }
268 __thread_id_map[i] = __THREAD_ID_MAP_EMPTY;
269 return vp;
270}
271
272static void wait_all_threads(void)
273{
274 int i;
275 thread_id_t tid;
276
277 for (i = 1; i < NR_THREADS; i++) {
278 tid = __thread_id_map[i];
279 if (tid != __THREAD_ID_MAP_EMPTY &&
280 tid != __THREAD_ID_MAP_WAITING)
281 (void)wait_thread(tid);
282 }
283}
284
d8540fc5
PA
285#ifndef HAVE_CPU_SET_T
286typedef unsigned long cpu_set_t;
287# define CPU_ZERO(cpuset) do { *(cpuset) = 0; } while(0)
288# define CPU_SET(cpu, cpuset) do { *(cpuset) |= (1UL << (cpu)); } while(0)
289#endif
290
0578089f
PM
291static void run_on(int cpu)
292{
d8540fc5 293#if HAVE_SCHED_SETAFFINITY
0578089f
PM
294 cpu_set_t mask;
295
296 CPU_ZERO(&mask);
297 CPU_SET(cpu, &mask);
d8540fc5
PA
298#if SCHED_SETAFFINITY_ARGS == 2
299 sched_setaffinity(0, &mask);
300#else
0578089f 301 sched_setaffinity(0, sizeof(mask), &mask);
d8540fc5
PA
302#endif
303#endif /* HAVE_SCHED_SETAFFINITY */
0578089f
PM
304}
305
306/*
307 * timekeeping -- very crude -- should use MONOTONIC...
308 */
309
310long long get_microseconds(void)
311{
312 struct timeval tv;
313
314 if (gettimeofday(&tv, NULL) != 0)
315 abort();
316 return ((long long)tv.tv_sec) * 1000000LL + (long long)tv.tv_usec;
317}
318
319/*
320 * Per-thread variables.
321 */
322
323#define DEFINE_PER_THREAD(type, name) \
324 struct { \
325 __typeof__(type) v \
06f22bdb 326 __attribute__((__aligned__(CAA_CACHE_LINE_SIZE))); \
0578089f
PM
327 } __per_thread_##name[NR_THREADS];
328#define DECLARE_PER_THREAD(type, name) extern DEFINE_PER_THREAD(type, name)
329
330#define per_thread(name, thread) __per_thread_##name[thread].v
331#define __get_thread_var(name) per_thread(name, smp_thread_id())
332
333#define init_per_thread(name, v) \
334 do { \
335 int __i_p_t_i; \
336 for (__i_p_t_i = 0; __i_p_t_i < NR_THREADS; __i_p_t_i++) \
337 per_thread(name, __i_p_t_i) = v; \
338 } while (0)
339
340/*
341 * CPU traversal primitives.
342 */
343
344#ifndef NR_CPUS
345#define NR_CPUS 16
346#endif /* #ifndef NR_CPUS */
347
348#define for_each_possible_cpu(cpu) \
349 for (cpu = 0; cpu < NR_CPUS; cpu++)
350#define for_each_online_cpu(cpu) \
351 for (cpu = 0; cpu < NR_CPUS; cpu++)
352
353/*
354 * Per-CPU variables.
355 */
356
357#define DEFINE_PER_CPU(type, name) \
358 struct { \
359 __typeof__(type) v \
06f22bdb 360 __attribute__((__aligned__(CAA_CACHE_LINE_SIZE))); \
0578089f
PM
361 } __per_cpu_##name[NR_CPUS]
362#define DECLARE_PER_CPU(type, name) extern DEFINE_PER_CPU(type, name)
363
364DEFINE_PER_THREAD(int, smp_processor_id);
365
366#define per_cpu(name, thread) __per_cpu_##name[thread].v
367#define __get_cpu_var(name) per_cpu(name, smp_processor_id())
368
369#define init_per_cpu(name, v) \
370 do { \
371 int __i_p_c_i; \
372 for (__i_p_c_i = 0; __i_p_c_i < NR_CPUS; __i_p_c_i++) \
373 per_cpu(name, __i_p_c_i) = v; \
374 } while (0)
375
376/*
377 * CPU state checking (crowbarred).
378 */
379
380#define idle_cpu(cpu) 0
381#define in_softirq() 1
382#define hardirq_count() 0
383#define PREEMPT_SHIFT 0
384#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS)
385#define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS)
386#define PREEMPT_BITS 8
387#define SOFTIRQ_BITS 8
388
389/*
390 * CPU hotplug.
391 */
392
393struct notifier_block {
394 int (*notifier_call)(struct notifier_block *, unsigned long, void *);
395 struct notifier_block *next;
396 int priority;
397};
398
399#define CPU_ONLINE 0x0002 /* CPU (unsigned)v is up */
400#define CPU_UP_PREPARE 0x0003 /* CPU (unsigned)v coming up */
401#define CPU_UP_CANCELED 0x0004 /* CPU (unsigned)v NOT coming up */
402#define CPU_DOWN_PREPARE 0x0005 /* CPU (unsigned)v going down */
403#define CPU_DOWN_FAILED 0x0006 /* CPU (unsigned)v NOT going down */
404#define CPU_DEAD 0x0007 /* CPU (unsigned)v dead */
405#define CPU_DYING 0x0008 /* CPU (unsigned)v not running any task,
406 * not handling interrupts, soon dead */
407#define CPU_POST_DEAD 0x0009 /* CPU (unsigned)v dead, cpu_hotplug
408 * lock is dropped */
409
410/* Used for CPU hotplug events occuring while tasks are frozen due to a suspend
411 * operation in progress
412 */
413#define CPU_TASKS_FROZEN 0x0010
414
415#define CPU_ONLINE_FROZEN (CPU_ONLINE | CPU_TASKS_FROZEN)
416#define CPU_UP_PREPARE_FROZEN (CPU_UP_PREPARE | CPU_TASKS_FROZEN)
417#define CPU_UP_CANCELED_FROZEN (CPU_UP_CANCELED | CPU_TASKS_FROZEN)
418#define CPU_DOWN_PREPARE_FROZEN (CPU_DOWN_PREPARE | CPU_TASKS_FROZEN)
419#define CPU_DOWN_FAILED_FROZEN (CPU_DOWN_FAILED | CPU_TASKS_FROZEN)
420#define CPU_DEAD_FROZEN (CPU_DEAD | CPU_TASKS_FROZEN)
421#define CPU_DYING_FROZEN (CPU_DYING | CPU_TASKS_FROZEN)
422
423/* Hibernation and suspend events */
424#define PM_HIBERNATION_PREPARE 0x0001 /* Going to hibernate */
425#define PM_POST_HIBERNATION 0x0002 /* Hibernation finished */
426#define PM_SUSPEND_PREPARE 0x0003 /* Going to suspend the system */
427#define PM_POST_SUSPEND 0x0004 /* Suspend finished */
428#define PM_RESTORE_PREPARE 0x0005 /* Going to restore a saved image */
429#define PM_POST_RESTORE 0x0006 /* Restore failed */
430
431#define NOTIFY_DONE 0x0000 /* Don't care */
432#define NOTIFY_OK 0x0001 /* Suits me */
433#define NOTIFY_STOP_MASK 0x8000 /* Don't call further */
434#define NOTIFY_BAD (NOTIFY_STOP_MASK|0x0002)
435 /* Bad/Veto action */
436/*
437 * Clean way to return from the notifier and stop further calls.
438 */
439#define NOTIFY_STOP (NOTIFY_OK|NOTIFY_STOP_MASK)
440
441/*
442 * Bug checks.
443 */
444
445#define BUG_ON(c) do { if (!(c)) abort(); } while (0)
446
447/*
448 * Initialization -- Must be called before calling any primitives.
449 */
450
451static void smp_init(void)
452{
453 int i;
454
455 spin_lock_init(&__thread_id_map_mutex);
456 __thread_id_map[0] = pthread_self();
457 for (i = 1; i < NR_THREADS; i++)
458 __thread_id_map[i] = __THREAD_ID_MAP_EMPTY;
459 init_per_thread(smp_processor_id, 0);
460 if (pthread_key_create(&thread_id_key, NULL) != 0) {
461 perror("pthread_key_create");
462 exit(-1);
463 }
464}
465
1a43bbd8 466#endif
This page took 0.043938 seconds and 4 git commands to generate.