Add offsetof to compiler.h
[urcu.git] / tests / api_x86.h
CommitLineData
6d0ce021
PM
1/* MECHANICALLY GENERATED, DO NOT EDIT!!! */
2
3#define _INCLUDE_API_H
4
5/*
6 * common.h: Common Linux kernel-isms.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; but version 2 of the License only due
11 * to code included from the Linux kernel.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 *
22 * Copyright (c) 2006 Paul E. McKenney, IBM.
23 *
24 * Much code taken from the Linux kernel. For such code, the option
25 * to redistribute under later versions of GPL might not be available.
26 */
27
28#ifndef __always_inline
29#define __always_inline inline
30#endif
31
32#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)]))
33#define BUILD_BUG_ON_ZERO(e) (sizeof(char[1 - 2 * !!(e)]) - 1)
34
35#ifdef __ASSEMBLY__
36# define stringify_in_c(...) __VA_ARGS__
37# define ASM_CONST(x) x
38#else
39/* This version of stringify will deal with commas... */
40# define __stringify_in_c(...) #__VA_ARGS__
41# define stringify_in_c(...) __stringify_in_c(__VA_ARGS__) " "
42# define __ASM_CONST(x) x##UL
43# define ASM_CONST(x) __ASM_CONST(x)
44#endif
45
46
47/*
48 * arch-i386.h: Expose x86 atomic instructions. 80486 and better only.
49 *
50 * This program is free software; you can redistribute it and/or modify
51 * it under the terms of the GNU General Public License as published by
52 * the Free Software Foundation, but version 2 only due to inclusion
53 * of Linux-kernel code.
54 *
55 * This program is distributed in the hope that it will be useful,
56 * but WITHOUT ANY WARRANTY; without even the implied warranty of
57 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
58 * GNU General Public License for more details.
59 *
60 * You should have received a copy of the GNU General Public License
61 * along with this program; if not, write to the Free Software
62 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
63 *
64 * Copyright (c) 2006 Paul E. McKenney, IBM.
65 *
66 * Much code taken from the Linux kernel. For such code, the option
67 * to redistribute under later versions of GPL might not be available.
68 */
69
70/*
71 * Machine parameters.
72 */
73
6d0ce021
PM
74#define CACHE_LINE_SIZE 64
75#define ____cacheline_internodealigned_in_smp \
76 __attribute__((__aligned__(1 << 6)))
77
78#define LOCK_PREFIX "lock ; "
79
80/*
81 * Atomic data structure, initialization, and access.
82 */
83
84typedef struct { volatile int counter; } atomic_t;
85
86#define ATOMIC_INIT(i) { (i) }
87
88#define atomic_read(v) ((v)->counter)
89#define atomic_set(v, i) (((v)->counter) = (i))
90
91/*
92 * Atomic operations.
93 */
94
95/**
96 * atomic_add - add integer to atomic variable
97 * @i: integer value to add
98 * @v: pointer of type atomic_t
99 *
100 * Atomically adds @i to @v.
101 */
102static __inline__ void atomic_add(int i, atomic_t *v)
103{
104 __asm__ __volatile__(
105 LOCK_PREFIX "addl %1,%0"
106 :"+m" (v->counter)
107 :"ir" (i));
108}
109
110/**
111 * atomic_sub - subtract the atomic variable
112 * @i: integer value to subtract
113 * @v: pointer of type atomic_t
114 *
115 * Atomically subtracts @i from @v.
116 */
117static __inline__ void atomic_sub(int i, atomic_t *v)
118{
119 __asm__ __volatile__(
120 LOCK_PREFIX "subl %1,%0"
121 :"+m" (v->counter)
122 :"ir" (i));
123}
124
125/**
126 * atomic_sub_and_test - subtract value from variable and test result
127 * @i: integer value to subtract
128 * @v: pointer of type atomic_t
129 *
130 * Atomically subtracts @i from @v and returns
131 * true if the result is zero, or false for all
132 * other cases.
133 */
134static __inline__ int atomic_sub_and_test(int i, atomic_t *v)
135{
136 unsigned char c;
137
138 __asm__ __volatile__(
139 LOCK_PREFIX "subl %2,%0; sete %1"
140 :"+m" (v->counter), "=qm" (c)
141 :"ir" (i) : "memory");
142 return c;
143}
144
145/**
146 * atomic_inc - increment atomic variable
147 * @v: pointer of type atomic_t
148 *
149 * Atomically increments @v by 1.
150 */
151static __inline__ void atomic_inc(atomic_t *v)
152{
153 __asm__ __volatile__(
154 LOCK_PREFIX "incl %0"
155 :"+m" (v->counter));
156}
157
158/**
159 * atomic_dec - decrement atomic variable
160 * @v: pointer of type atomic_t
161 *
162 * Atomically decrements @v by 1.
163 */
164static __inline__ void atomic_dec(atomic_t *v)
165{
166 __asm__ __volatile__(
167 LOCK_PREFIX "decl %0"
168 :"+m" (v->counter));
169}
170
171/**
172 * atomic_dec_and_test - decrement and test
173 * @v: pointer of type atomic_t
174 *
175 * Atomically decrements @v by 1 and
176 * returns true if the result is 0, or false for all other
177 * cases.
178 */
179static __inline__ int atomic_dec_and_test(atomic_t *v)
180{
181 unsigned char c;
182
183 __asm__ __volatile__(
184 LOCK_PREFIX "decl %0; sete %1"
185 :"+m" (v->counter), "=qm" (c)
186 : : "memory");
187 return c != 0;
188}
189
190/**
191 * atomic_inc_and_test - increment and test
192 * @v: pointer of type atomic_t
193 *
194 * Atomically increments @v by 1
195 * and returns true if the result is zero, or false for all
196 * other cases.
197 */
198static __inline__ int atomic_inc_and_test(atomic_t *v)
199{
200 unsigned char c;
201
202 __asm__ __volatile__(
203 LOCK_PREFIX "incl %0; sete %1"
204 :"+m" (v->counter), "=qm" (c)
205 : : "memory");
206 return c != 0;
207}
208
209/**
210 * atomic_add_negative - add and test if negative
211 * @v: pointer of type atomic_t
212 * @i: integer value to add
213 *
214 * Atomically adds @i to @v and returns true
215 * if the result is negative, or false when
216 * result is greater than or equal to zero.
217 */
218static __inline__ int atomic_add_negative(int i, atomic_t *v)
219{
220 unsigned char c;
221
222 __asm__ __volatile__(
223 LOCK_PREFIX "addl %2,%0; sets %1"
224 :"+m" (v->counter), "=qm" (c)
225 :"ir" (i) : "memory");
226 return c;
227}
228
229/**
230 * atomic_add_return - add and return
231 * @v: pointer of type atomic_t
232 * @i: integer value to add
233 *
234 * Atomically adds @i to @v and returns @i + @v
235 */
236static __inline__ int atomic_add_return(int i, atomic_t *v)
237{
238 int __i;
239
240 __i = i;
241 __asm__ __volatile__(
242 LOCK_PREFIX "xaddl %0, %1;"
243 :"=r"(i)
244 :"m"(v->counter), "0"(i));
245 return i + __i;
246}
247
248static __inline__ int atomic_sub_return(int i, atomic_t *v)
249{
250 return atomic_add_return(-i,v);
251}
252
253static inline unsigned int
254cmpxchg(volatile long *ptr, long oldval, long newval)
255{
256 unsigned long retval;
257
258 asm("# cmpxchg\n"
259 "lock; cmpxchgl %4,(%2)\n"
260 "# end atomic_cmpxchg4"
261 : "=a" (retval), "=m" (*ptr)
262 : "r" (ptr), "0" (oldval), "r" (newval), "m" (*ptr)
263 : "cc");
264 return (retval);
265}
266
267#define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new))
268#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
269
270/**
271 * atomic_add_unless - add unless the number is a given value
272 * @v: pointer of type atomic_t
273 * @a: the amount to add to v...
274 * @u: ...unless v is equal to u.
275 *
276 * Atomically adds @a to @v, so long as it was not @u.
277 * Returns non-zero if @v was not @u, and zero otherwise.
278 */
279#define atomic_add_unless(v, a, u) \
280({ \
281 int c, old; \
282 c = atomic_read(v); \
283 for (;;) { \
284 if (unlikely(c == (u))) \
285 break; \
286 old = atomic_cmpxchg((v), c, c + (a)); \
287 if (likely(old == c)) \
288 break; \
289 c = old; \
290 } \
291 c != (u); \
292})
293#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
294
295#define atomic_inc_return(v) (atomic_add_return(1,v))
296#define atomic_dec_return(v) (atomic_sub_return(1,v))
297
298/* These are x86-specific, used by some header files */
299#define atomic_clear_mask(mask, addr) \
300__asm__ __volatile__(LOCK_PREFIX "andl %0,%1" \
301: : "r" (~(mask)),"m" (*addr) : "memory")
302
303#define atomic_set_mask(mask, addr) \
304__asm__ __volatile__(LOCK_PREFIX "orl %0,%1" \
305: : "r" (mask),"m" (*(addr)) : "memory")
306
307/* Atomic operations are already serializing on x86 */
308#define smp_mb__before_atomic_dec() barrier()
309#define smp_mb__after_atomic_dec() barrier()
310#define smp_mb__before_atomic_inc() barrier()
311#define smp_mb__after_atomic_inc() barrier()
312
6d0ce021
PM
313/*
314 * api_pthreads.h: API mapping to pthreads environment.
315 *
316 * This program is free software; you can redistribute it and/or modify
317 * it under the terms of the GNU General Public License as published by
318 * the Free Software Foundation; either version 2 of the License, or
319 * (at your option) any later version. However, please note that much
320 * of the code in this file derives from the Linux kernel, and that such
321 * code may not be available except under GPLv2.
322 *
323 * This program is distributed in the hope that it will be useful,
324 * but WITHOUT ANY WARRANTY; without even the implied warranty of
325 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
326 * GNU General Public License for more details.
327 *
328 * You should have received a copy of the GNU General Public License
329 * along with this program; if not, write to the Free Software
330 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
331 *
332 * Copyright (c) 2006 Paul E. McKenney, IBM.
333 */
334
335#include <stdio.h>
336#include <stdlib.h>
337#include <errno.h>
338#include <limits.h>
339#include <sys/types.h>
340#define __USE_GNU
341#include <pthread.h>
342#include <sched.h>
343#include <sys/param.h>
344/* #include "atomic.h" */
345
346/*
347 * Compiler magic.
348 */
6d0ce021
PM
349#define container_of(ptr, type, member) ({ \
350 const typeof( ((type *)0)->member ) *__mptr = (ptr); \
351 (type *)( (char *)__mptr - offsetof(type,member) );})
6d0ce021
PM
352
353/*
354 * Default machine parameters.
355 */
356
357#ifndef CACHE_LINE_SIZE
358#define CACHE_LINE_SIZE 128
359#endif /* #ifndef CACHE_LINE_SIZE */
360
361/*
362 * Exclusive locking primitives.
363 */
364
365typedef pthread_mutex_t spinlock_t;
366
367#define DEFINE_SPINLOCK(lock) spinlock_t lock = PTHREAD_MUTEX_INITIALIZER;
368#define __SPIN_LOCK_UNLOCKED(lockp) PTHREAD_MUTEX_INITIALIZER
369
370static void spin_lock_init(spinlock_t *sp)
371{
372 if (pthread_mutex_init(sp, NULL) != 0) {
373 perror("spin_lock_init:pthread_mutex_init");
374 exit(-1);
375 }
376}
377
378static void spin_lock(spinlock_t *sp)
379{
380 if (pthread_mutex_lock(sp) != 0) {
381 perror("spin_lock:pthread_mutex_lock");
382 exit(-1);
383 }
384}
385
6d0ce021
PM
386static void spin_unlock(spinlock_t *sp)
387{
388 if (pthread_mutex_unlock(sp) != 0) {
389 perror("spin_unlock:pthread_mutex_unlock");
390 exit(-1);
391 }
392}
393
394#define spin_lock_irqsave(l, f) do { f = 1; spin_lock(l); } while (0)
395#define spin_unlock_irqrestore(l, f) do { f = 0; spin_unlock(l); } while (0)
396
6d0ce021
PM
397/*
398 * Thread creation/destruction primitives.
399 */
400
401typedef pthread_t thread_id_t;
402
403#define NR_THREADS 128
404
405#define __THREAD_ID_MAP_EMPTY 0
406#define __THREAD_ID_MAP_WAITING 1
407thread_id_t __thread_id_map[NR_THREADS];
408spinlock_t __thread_id_map_mutex;
409
410#define for_each_thread(t) \
411 for (t = 0; t < NR_THREADS; t++)
412
413#define for_each_running_thread(t) \
414 for (t = 0; t < NR_THREADS; t++) \
415 if ((__thread_id_map[t] != __THREAD_ID_MAP_EMPTY) && \
416 (__thread_id_map[t] != __THREAD_ID_MAP_WAITING))
417
418pthread_key_t thread_id_key;
419
420static int __smp_thread_id(void)
421{
422 int i;
423 thread_id_t tid = pthread_self();
424
425 for (i = 0; i < NR_THREADS; i++) {
426 if (__thread_id_map[i] == tid) {
427 long v = i + 1; /* must be non-NULL. */
428
429 if (pthread_setspecific(thread_id_key, (void *)v) != 0) {
430 perror("pthread_setspecific");
431 exit(-1);
432 }
433 return i;
434 }
435 }
436 spin_lock(&__thread_id_map_mutex);
437 for (i = 0; i < NR_THREADS; i++) {
438 if (__thread_id_map[i] == tid)
439 spin_unlock(&__thread_id_map_mutex);
440 return i;
441 }
442 spin_unlock(&__thread_id_map_mutex);
0578089f
PM
443 fprintf(stderr, "smp_thread_id: Rogue thread, id: %d(%#x)\n",
444 (int)tid, (int)tid);
6d0ce021
PM
445 exit(-1);
446}
447
448static int smp_thread_id(void)
449{
450 void *id;
451
452 id = pthread_getspecific(thread_id_key);
453 if (id == NULL)
454 return __smp_thread_id();
455 return (long)(id - 1);
456}
457
458static thread_id_t create_thread(void *(*func)(void *), void *arg)
459{
460 thread_id_t tid;
461 int i;
462
463 spin_lock(&__thread_id_map_mutex);
464 for (i = 0; i < NR_THREADS; i++) {
465 if (__thread_id_map[i] == __THREAD_ID_MAP_EMPTY)
466 break;
467 }
468 if (i >= NR_THREADS) {
469 spin_unlock(&__thread_id_map_mutex);
470 fprintf(stderr, "Thread limit of %d exceeded!\n", NR_THREADS);
471 exit(-1);
472 }
473 __thread_id_map[i] = __THREAD_ID_MAP_WAITING;
474 spin_unlock(&__thread_id_map_mutex);
475 if (pthread_create(&tid, NULL, func, arg) != 0) {
476 perror("create_thread:pthread_create");
477 exit(-1);
478 }
479 __thread_id_map[i] = tid;
480 return tid;
481}
482
483static void *wait_thread(thread_id_t tid)
484{
485 int i;
486 void *vp;
487
488 for (i = 0; i < NR_THREADS; i++) {
489 if (__thread_id_map[i] == tid)
490 break;
491 }
492 if (i >= NR_THREADS){
0578089f
PM
493 fprintf(stderr, "wait_thread: bad tid = %d(%#x)\n",
494 (int)tid, (int)tid);
6d0ce021
PM
495 exit(-1);
496 }
497 if (pthread_join(tid, &vp) != 0) {
498 perror("wait_thread:pthread_join");
499 exit(-1);
500 }
501 __thread_id_map[i] = __THREAD_ID_MAP_EMPTY;
502 return vp;
503}
504
505static void wait_all_threads(void)
506{
507 int i;
508 thread_id_t tid;
509
510 for (i = 1; i < NR_THREADS; i++) {
511 tid = __thread_id_map[i];
512 if (tid != __THREAD_ID_MAP_EMPTY &&
513 tid != __THREAD_ID_MAP_WAITING)
514 (void)wait_thread(tid);
515 }
516}
517
518static void run_on(int cpu)
519{
520 cpu_set_t mask;
521
522 CPU_ZERO(&mask);
523 CPU_SET(cpu, &mask);
524 sched_setaffinity(0, sizeof(mask), &mask);
525}
526
527/*
528 * timekeeping -- very crude -- should use MONOTONIC...
529 */
530
531long long get_microseconds(void)
532{
533 struct timeval tv;
534
535 if (gettimeofday(&tv, NULL) != 0)
536 abort();
537 return ((long long)tv.tv_sec) * 1000000LL + (long long)tv.tv_usec;
538}
539
540/*
541 * Per-thread variables.
542 */
543
544#define DEFINE_PER_THREAD(type, name) \
545 struct { \
546 __typeof__(type) v \
547 __attribute__((__aligned__(CACHE_LINE_SIZE))); \
548 } __per_thread_##name[NR_THREADS];
549#define DECLARE_PER_THREAD(type, name) extern DEFINE_PER_THREAD(type, name)
550
551#define per_thread(name, thread) __per_thread_##name[thread].v
552#define __get_thread_var(name) per_thread(name, smp_thread_id())
553
554#define init_per_thread(name, v) \
555 do { \
556 int __i_p_t_i; \
557 for (__i_p_t_i = 0; __i_p_t_i < NR_THREADS; __i_p_t_i++) \
558 per_thread(name, __i_p_t_i) = v; \
559 } while (0)
560
561/*
562 * CPU traversal primitives.
563 */
564
565#ifndef NR_CPUS
566#define NR_CPUS 16
567#endif /* #ifndef NR_CPUS */
568
569#define for_each_possible_cpu(cpu) \
570 for (cpu = 0; cpu < NR_CPUS; cpu++)
571#define for_each_online_cpu(cpu) \
572 for (cpu = 0; cpu < NR_CPUS; cpu++)
573
574/*
575 * Per-CPU variables.
576 */
577
578#define DEFINE_PER_CPU(type, name) \
579 struct { \
580 __typeof__(type) v \
581 __attribute__((__aligned__(CACHE_LINE_SIZE))); \
582 } __per_cpu_##name[NR_CPUS]
583#define DECLARE_PER_CPU(type, name) extern DEFINE_PER_CPU(type, name)
584
585DEFINE_PER_THREAD(int, smp_processor_id);
586
6d0ce021
PM
587#define per_cpu(name, thread) __per_cpu_##name[thread].v
588#define __get_cpu_var(name) per_cpu(name, smp_processor_id())
589
590#define init_per_cpu(name, v) \
591 do { \
592 int __i_p_c_i; \
593 for (__i_p_c_i = 0; __i_p_c_i < NR_CPUS; __i_p_c_i++) \
594 per_cpu(name, __i_p_c_i) = v; \
595 } while (0)
596
597/*
598 * CPU state checking (crowbarred).
599 */
600
601#define idle_cpu(cpu) 0
602#define in_softirq() 1
603#define hardirq_count() 0
604#define PREEMPT_SHIFT 0
605#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS)
606#define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS)
607#define PREEMPT_BITS 8
608#define SOFTIRQ_BITS 8
609
610/*
611 * CPU hotplug.
612 */
613
614struct notifier_block {
615 int (*notifier_call)(struct notifier_block *, unsigned long, void *);
616 struct notifier_block *next;
617 int priority;
618};
619
620#define CPU_ONLINE 0x0002 /* CPU (unsigned)v is up */
621#define CPU_UP_PREPARE 0x0003 /* CPU (unsigned)v coming up */
622#define CPU_UP_CANCELED 0x0004 /* CPU (unsigned)v NOT coming up */
623#define CPU_DOWN_PREPARE 0x0005 /* CPU (unsigned)v going down */
624#define CPU_DOWN_FAILED 0x0006 /* CPU (unsigned)v NOT going down */
625#define CPU_DEAD 0x0007 /* CPU (unsigned)v dead */
626#define CPU_DYING 0x0008 /* CPU (unsigned)v not running any task,
627 * not handling interrupts, soon dead */
628#define CPU_POST_DEAD 0x0009 /* CPU (unsigned)v dead, cpu_hotplug
629 * lock is dropped */
630
631/* Used for CPU hotplug events occuring while tasks are frozen due to a suspend
632 * operation in progress
633 */
634#define CPU_TASKS_FROZEN 0x0010
635
636#define CPU_ONLINE_FROZEN (CPU_ONLINE | CPU_TASKS_FROZEN)
637#define CPU_UP_PREPARE_FROZEN (CPU_UP_PREPARE | CPU_TASKS_FROZEN)
638#define CPU_UP_CANCELED_FROZEN (CPU_UP_CANCELED | CPU_TASKS_FROZEN)
639#define CPU_DOWN_PREPARE_FROZEN (CPU_DOWN_PREPARE | CPU_TASKS_FROZEN)
640#define CPU_DOWN_FAILED_FROZEN (CPU_DOWN_FAILED | CPU_TASKS_FROZEN)
641#define CPU_DEAD_FROZEN (CPU_DEAD | CPU_TASKS_FROZEN)
642#define CPU_DYING_FROZEN (CPU_DYING | CPU_TASKS_FROZEN)
643
644/* Hibernation and suspend events */
645#define PM_HIBERNATION_PREPARE 0x0001 /* Going to hibernate */
646#define PM_POST_HIBERNATION 0x0002 /* Hibernation finished */
647#define PM_SUSPEND_PREPARE 0x0003 /* Going to suspend the system */
648#define PM_POST_SUSPEND 0x0004 /* Suspend finished */
649#define PM_RESTORE_PREPARE 0x0005 /* Going to restore a saved image */
650#define PM_POST_RESTORE 0x0006 /* Restore failed */
651
652#define NOTIFY_DONE 0x0000 /* Don't care */
653#define NOTIFY_OK 0x0001 /* Suits me */
654#define NOTIFY_STOP_MASK 0x8000 /* Don't call further */
655#define NOTIFY_BAD (NOTIFY_STOP_MASK|0x0002)
656 /* Bad/Veto action */
657/*
658 * Clean way to return from the notifier and stop further calls.
659 */
660#define NOTIFY_STOP (NOTIFY_OK|NOTIFY_STOP_MASK)
661
662/*
663 * Bug checks.
664 */
665
666#define BUG_ON(c) do { if (!(c)) abort(); } while (0)
667
668/*
669 * Initialization -- Must be called before calling any primitives.
670 */
671
672static void smp_init(void)
673{
674 int i;
675
676 spin_lock_init(&__thread_id_map_mutex);
677 __thread_id_map[0] = pthread_self();
678 for (i = 1; i < NR_THREADS; i++)
679 __thread_id_map[i] = __THREAD_ID_MAP_EMPTY;
680 init_per_thread(smp_processor_id, 0);
681 if (pthread_key_create(&thread_id_key, NULL) != 0) {
682 perror("pthread_key_create");
683 exit(-1);
684 }
685}
686
687/* Taken from the Linux kernel source tree, so GPLv2-only!!! */
688
689#ifndef _LINUX_LIST_H
690#define _LINUX_LIST_H
691
692#define LIST_POISON1 ((void *) 0x00100100)
693#define LIST_POISON2 ((void *) 0x00200200)
694
6d0ce021
PM
695#define container_of(ptr, type, member) ({ \
696 const typeof( ((type *)0)->member ) *__mptr = (ptr); \
697 (type *)( (char *)__mptr - offsetof(type,member) );})
698
699/*
700 * Simple doubly linked list implementation.
701 *
702 * Some of the internal functions ("__xxx") are useful when
703 * manipulating whole lists rather than single entries, as
704 * sometimes we already know the next/prev entries and we can
705 * generate better code by using them directly rather than
706 * using the generic single-entry routines.
707 */
708
709struct list_head {
710 struct list_head *next, *prev;
711};
712
713#define LIST_HEAD_INIT(name) { &(name), &(name) }
714
715#define LIST_HEAD(name) \
716 struct list_head name = LIST_HEAD_INIT(name)
717
718static inline void INIT_LIST_HEAD(struct list_head *list)
719{
720 list->next = list;
721 list->prev = list;
722}
723
724/*
725 * Insert a new entry between two known consecutive entries.
726 *
727 * This is only for internal list manipulation where we know
728 * the prev/next entries already!
729 */
730#ifndef CONFIG_DEBUG_LIST
731static inline void __list_add(struct list_head *new,
732 struct list_head *prev,
733 struct list_head *next)
734{
735 next->prev = new;
736 new->next = next;
737 new->prev = prev;
738 prev->next = new;
739}
740#else
741extern void __list_add(struct list_head *new,
742 struct list_head *prev,
743 struct list_head *next);
744#endif
745
746/**
747 * list_add - add a new entry
748 * @new: new entry to be added
749 * @head: list head to add it after
750 *
751 * Insert a new entry after the specified head.
752 * This is good for implementing stacks.
753 */
754static inline void list_add(struct list_head *new, struct list_head *head)
755{
756 __list_add(new, head, head->next);
757}
758
759
760/**
761 * list_add_tail - add a new entry
762 * @new: new entry to be added
763 * @head: list head to add it before
764 *
765 * Insert a new entry before the specified head.
766 * This is useful for implementing queues.
767 */
768static inline void list_add_tail(struct list_head *new, struct list_head *head)
769{
770 __list_add(new, head->prev, head);
771}
772
773/*
774 * Delete a list entry by making the prev/next entries
775 * point to each other.
776 *
777 * This is only for internal list manipulation where we know
778 * the prev/next entries already!
779 */
780static inline void __list_del(struct list_head * prev, struct list_head * next)
781{
782 next->prev = prev;
783 prev->next = next;
784}
785
786/**
787 * list_del - deletes entry from list.
788 * @entry: the element to delete from the list.
789 * Note: list_empty() on entry does not return true after this, the entry is
790 * in an undefined state.
791 */
792#ifndef CONFIG_DEBUG_LIST
793static inline void list_del(struct list_head *entry)
794{
795 __list_del(entry->prev, entry->next);
796 entry->next = LIST_POISON1;
797 entry->prev = LIST_POISON2;
798}
799#else
800extern void list_del(struct list_head *entry);
801#endif
802
803/**
804 * list_replace - replace old entry by new one
805 * @old : the element to be replaced
806 * @new : the new element to insert
807 *
808 * If @old was empty, it will be overwritten.
809 */
810static inline void list_replace(struct list_head *old,
811 struct list_head *new)
812{
813 new->next = old->next;
814 new->next->prev = new;
815 new->prev = old->prev;
816 new->prev->next = new;
817}
818
819static inline void list_replace_init(struct list_head *old,
820 struct list_head *new)
821{
822 list_replace(old, new);
823 INIT_LIST_HEAD(old);
824}
825
826/**
827 * list_del_init - deletes entry from list and reinitialize it.
828 * @entry: the element to delete from the list.
829 */
830static inline void list_del_init(struct list_head *entry)
831{
832 __list_del(entry->prev, entry->next);
833 INIT_LIST_HEAD(entry);
834}
835
836/**
837 * list_move - delete from one list and add as another's head
838 * @list: the entry to move
839 * @head: the head that will precede our entry
840 */
841static inline void list_move(struct list_head *list, struct list_head *head)
842{
843 __list_del(list->prev, list->next);
844 list_add(list, head);
845}
846
847/**
848 * list_move_tail - delete from one list and add as another's tail
849 * @list: the entry to move
850 * @head: the head that will follow our entry
851 */
852static inline void list_move_tail(struct list_head *list,
853 struct list_head *head)
854{
855 __list_del(list->prev, list->next);
856 list_add_tail(list, head);
857}
858
859/**
860 * list_is_last - tests whether @list is the last entry in list @head
861 * @list: the entry to test
862 * @head: the head of the list
863 */
864static inline int list_is_last(const struct list_head *list,
865 const struct list_head *head)
866{
867 return list->next == head;
868}
869
870/**
871 * list_empty - tests whether a list is empty
872 * @head: the list to test.
873 */
874static inline int list_empty(const struct list_head *head)
875{
876 return head->next == head;
877}
878
879/**
880 * list_empty_careful - tests whether a list is empty and not being modified
881 * @head: the list to test
882 *
883 * Description:
884 * tests whether a list is empty _and_ checks that no other CPU might be
885 * in the process of modifying either member (next or prev)
886 *
887 * NOTE: using list_empty_careful() without synchronization
888 * can only be safe if the only activity that can happen
889 * to the list entry is list_del_init(). Eg. it cannot be used
890 * if another CPU could re-list_add() it.
891 */
892static inline int list_empty_careful(const struct list_head *head)
893{
894 struct list_head *next = head->next;
895 return (next == head) && (next == head->prev);
896}
897
898/**
899 * list_is_singular - tests whether a list has just one entry.
900 * @head: the list to test.
901 */
902static inline int list_is_singular(const struct list_head *head)
903{
904 return !list_empty(head) && (head->next == head->prev);
905}
906
907static inline void __list_cut_position(struct list_head *list,
908 struct list_head *head, struct list_head *entry)
909{
910 struct list_head *new_first = entry->next;
911 list->next = head->next;
912 list->next->prev = list;
913 list->prev = entry;
914 entry->next = list;
915 head->next = new_first;
916 new_first->prev = head;
917}
918
919/**
920 * list_cut_position - cut a list into two
921 * @list: a new list to add all removed entries
922 * @head: a list with entries
923 * @entry: an entry within head, could be the head itself
924 * and if so we won't cut the list
925 *
926 * This helper moves the initial part of @head, up to and
927 * including @entry, from @head to @list. You should
928 * pass on @entry an element you know is on @head. @list
929 * should be an empty list or a list you do not care about
930 * losing its data.
931 *
932 */
933static inline void list_cut_position(struct list_head *list,
934 struct list_head *head, struct list_head *entry)
935{
936 if (list_empty(head))
937 return;
938 if (list_is_singular(head) &&
939 (head->next != entry && head != entry))
940 return;
941 if (entry == head)
942 INIT_LIST_HEAD(list);
943 else
944 __list_cut_position(list, head, entry);
945}
946
947static inline void __list_splice(const struct list_head *list,
948 struct list_head *prev,
949 struct list_head *next)
950{
951 struct list_head *first = list->next;
952 struct list_head *last = list->prev;
953
954 first->prev = prev;
955 prev->next = first;
956
957 last->next = next;
958 next->prev = last;
959}
960
961/**
962 * list_splice - join two lists, this is designed for stacks
963 * @list: the new list to add.
964 * @head: the place to add it in the first list.
965 */
966static inline void list_splice(const struct list_head *list,
967 struct list_head *head)
968{
969 if (!list_empty(list))
970 __list_splice(list, head, head->next);
971}
972
973/**
974 * list_splice_tail - join two lists, each list being a queue
975 * @list: the new list to add.
976 * @head: the place to add it in the first list.
977 */
978static inline void list_splice_tail(struct list_head *list,
979 struct list_head *head)
980{
981 if (!list_empty(list))
982 __list_splice(list, head->prev, head);
983}
984
985/**
986 * list_splice_init - join two lists and reinitialise the emptied list.
987 * @list: the new list to add.
988 * @head: the place to add it in the first list.
989 *
990 * The list at @list is reinitialised
991 */
992static inline void list_splice_init(struct list_head *list,
993 struct list_head *head)
994{
995 if (!list_empty(list)) {
996 __list_splice(list, head, head->next);
997 INIT_LIST_HEAD(list);
998 }
999}
1000
1001/**
1002 * list_splice_tail_init - join two lists and reinitialise the emptied list
1003 * @list: the new list to add.
1004 * @head: the place to add it in the first list.
1005 *
1006 * Each of the lists is a queue.
1007 * The list at @list is reinitialised
1008 */
1009static inline void list_splice_tail_init(struct list_head *list,
1010 struct list_head *head)
1011{
1012 if (!list_empty(list)) {
1013 __list_splice(list, head->prev, head);
1014 INIT_LIST_HEAD(list);
1015 }
1016}
1017
1018/**
1019 * list_entry - get the struct for this entry
1020 * @ptr: the &struct list_head pointer.
1021 * @type: the type of the struct this is embedded in.
1022 * @member: the name of the list_struct within the struct.
1023 */
1024#define list_entry(ptr, type, member) \
1025 container_of(ptr, type, member)
1026
1027/**
1028 * list_first_entry - get the first element from a list
1029 * @ptr: the list head to take the element from.
1030 * @type: the type of the struct this is embedded in.
1031 * @member: the name of the list_struct within the struct.
1032 *
1033 * Note, that list is expected to be not empty.
1034 */
1035#define list_first_entry(ptr, type, member) \
1036 list_entry((ptr)->next, type, member)
1037
1038/**
1039 * list_for_each - iterate over a list
1040 * @pos: the &struct list_head to use as a loop cursor.
1041 * @head: the head for your list.
1042 */
1043#define list_for_each(pos, head) \
1044 for (pos = (head)->next; prefetch(pos->next), pos != (head); \
1045 pos = pos->next)
1046
1047/**
1048 * __list_for_each - iterate over a list
1049 * @pos: the &struct list_head to use as a loop cursor.
1050 * @head: the head for your list.
1051 *
1052 * This variant differs from list_for_each() in that it's the
1053 * simplest possible list iteration code, no prefetching is done.
1054 * Use this for code that knows the list to be very short (empty
1055 * or 1 entry) most of the time.
1056 */
1057#define __list_for_each(pos, head) \
1058 for (pos = (head)->next; pos != (head); pos = pos->next)
1059
1060/**
1061 * list_for_each_prev - iterate over a list backwards
1062 * @pos: the &struct list_head to use as a loop cursor.
1063 * @head: the head for your list.
1064 */
1065#define list_for_each_prev(pos, head) \
1066 for (pos = (head)->prev; prefetch(pos->prev), pos != (head); \
1067 pos = pos->prev)
1068
1069/**
1070 * list_for_each_safe - iterate over a list safe against removal of list entry
1071 * @pos: the &struct list_head to use as a loop cursor.
1072 * @n: another &struct list_head to use as temporary storage
1073 * @head: the head for your list.
1074 */
1075#define list_for_each_safe(pos, n, head) \
1076 for (pos = (head)->next, n = pos->next; pos != (head); \
1077 pos = n, n = pos->next)
1078
1079/**
1080 * list_for_each_prev_safe - iterate over a list backwards safe against removal of list entry
1081 * @pos: the &struct list_head to use as a loop cursor.
1082 * @n: another &struct list_head to use as temporary storage
1083 * @head: the head for your list.
1084 */
1085#define list_for_each_prev_safe(pos, n, head) \
1086 for (pos = (head)->prev, n = pos->prev; \
1087 prefetch(pos->prev), pos != (head); \
1088 pos = n, n = pos->prev)
1089
1090/**
1091 * list_for_each_entry - iterate over list of given type
1092 * @pos: the type * to use as a loop cursor.
1093 * @head: the head for your list.
1094 * @member: the name of the list_struct within the struct.
1095 */
1096#define list_for_each_entry(pos, head, member) \
1097 for (pos = list_entry((head)->next, typeof(*pos), member); \
1098 prefetch(pos->member.next), &pos->member != (head); \
1099 pos = list_entry(pos->member.next, typeof(*pos), member))
1100
1101/**
1102 * list_for_each_entry_reverse - iterate backwards over list of given type.
1103 * @pos: the type * to use as a loop cursor.
1104 * @head: the head for your list.
1105 * @member: the name of the list_struct within the struct.
1106 */
1107#define list_for_each_entry_reverse(pos, head, member) \
1108 for (pos = list_entry((head)->prev, typeof(*pos), member); \
1109 prefetch(pos->member.prev), &pos->member != (head); \
1110 pos = list_entry(pos->member.prev, typeof(*pos), member))
1111
1112/**
1113 * list_prepare_entry - prepare a pos entry for use in list_for_each_entry_continue()
1114 * @pos: the type * to use as a start point
1115 * @head: the head of the list
1116 * @member: the name of the list_struct within the struct.
1117 *
1118 * Prepares a pos entry for use as a start point in list_for_each_entry_continue().
1119 */
1120#define list_prepare_entry(pos, head, member) \
1121 ((pos) ? : list_entry(head, typeof(*pos), member))
1122
1123/**
1124 * list_for_each_entry_continue - continue iteration over list of given type
1125 * @pos: the type * to use as a loop cursor.
1126 * @head: the head for your list.
1127 * @member: the name of the list_struct within the struct.
1128 *
1129 * Continue to iterate over list of given type, continuing after
1130 * the current position.
1131 */
1132#define list_for_each_entry_continue(pos, head, member) \
1133 for (pos = list_entry(pos->member.next, typeof(*pos), member); \
1134 prefetch(pos->member.next), &pos->member != (head); \
1135 pos = list_entry(pos->member.next, typeof(*pos), member))
1136
1137/**
1138 * list_for_each_entry_continue_reverse - iterate backwards from the given point
1139 * @pos: the type * to use as a loop cursor.
1140 * @head: the head for your list.
1141 * @member: the name of the list_struct within the struct.
1142 *
1143 * Start to iterate over list of given type backwards, continuing after
1144 * the current position.
1145 */
1146#define list_for_each_entry_continue_reverse(pos, head, member) \
1147 for (pos = list_entry(pos->member.prev, typeof(*pos), member); \
1148 prefetch(pos->member.prev), &pos->member != (head); \
1149 pos = list_entry(pos->member.prev, typeof(*pos), member))
1150
1151/**
1152 * list_for_each_entry_from - iterate over list of given type from the current point
1153 * @pos: the type * to use as a loop cursor.
1154 * @head: the head for your list.
1155 * @member: the name of the list_struct within the struct.
1156 *
1157 * Iterate over list of given type, continuing from current position.
1158 */
1159#define list_for_each_entry_from(pos, head, member) \
1160 for (; prefetch(pos->member.next), &pos->member != (head); \
1161 pos = list_entry(pos->member.next, typeof(*pos), member))
1162
1163/**
1164 * list_for_each_entry_safe - iterate over list of given type safe against removal of list entry
1165 * @pos: the type * to use as a loop cursor.
1166 * @n: another type * to use as temporary storage
1167 * @head: the head for your list.
1168 * @member: the name of the list_struct within the struct.
1169 */
1170#define list_for_each_entry_safe(pos, n, head, member) \
1171 for (pos = list_entry((head)->next, typeof(*pos), member), \
1172 n = list_entry(pos->member.next, typeof(*pos), member); \
1173 &pos->member != (head); \
1174 pos = n, n = list_entry(n->member.next, typeof(*n), member))
1175
1176/**
1177 * list_for_each_entry_safe_continue
1178 * @pos: the type * to use as a loop cursor.
1179 * @n: another type * to use as temporary storage
1180 * @head: the head for your list.
1181 * @member: the name of the list_struct within the struct.
1182 *
1183 * Iterate over list of given type, continuing after current point,
1184 * safe against removal of list entry.
1185 */
1186#define list_for_each_entry_safe_continue(pos, n, head, member) \
1187 for (pos = list_entry(pos->member.next, typeof(*pos), member), \
1188 n = list_entry(pos->member.next, typeof(*pos), member); \
1189 &pos->member != (head); \
1190 pos = n, n = list_entry(n->member.next, typeof(*n), member))
1191
1192/**
1193 * list_for_each_entry_safe_from
1194 * @pos: the type * to use as a loop cursor.
1195 * @n: another type * to use as temporary storage
1196 * @head: the head for your list.
1197 * @member: the name of the list_struct within the struct.
1198 *
1199 * Iterate over list of given type from current point, safe against
1200 * removal of list entry.
1201 */
1202#define list_for_each_entry_safe_from(pos, n, head, member) \
1203 for (n = list_entry(pos->member.next, typeof(*pos), member); \
1204 &pos->member != (head); \
1205 pos = n, n = list_entry(n->member.next, typeof(*n), member))
1206
1207/**
1208 * list_for_each_entry_safe_reverse
1209 * @pos: the type * to use as a loop cursor.
1210 * @n: another type * to use as temporary storage
1211 * @head: the head for your list.
1212 * @member: the name of the list_struct within the struct.
1213 *
1214 * Iterate backwards over list of given type, safe against removal
1215 * of list entry.
1216 */
1217#define list_for_each_entry_safe_reverse(pos, n, head, member) \
1218 for (pos = list_entry((head)->prev, typeof(*pos), member), \
1219 n = list_entry(pos->member.prev, typeof(*pos), member); \
1220 &pos->member != (head); \
1221 pos = n, n = list_entry(n->member.prev, typeof(*n), member))
1222
1223/*
1224 * Double linked lists with a single pointer list head.
1225 * Mostly useful for hash tables where the two pointer list head is
1226 * too wasteful.
1227 * You lose the ability to access the tail in O(1).
1228 */
1229
1230struct hlist_head {
1231 struct hlist_node *first;
1232};
1233
1234struct hlist_node {
1235 struct hlist_node *next, **pprev;
1236};
1237
1238#define HLIST_HEAD_INIT { .first = NULL }
1239#define HLIST_HEAD(name) struct hlist_head name = { .first = NULL }
1240#define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL)
1241static inline void INIT_HLIST_NODE(struct hlist_node *h)
1242{
1243 h->next = NULL;
1244 h->pprev = NULL;
1245}
1246
1247static inline int hlist_unhashed(const struct hlist_node *h)
1248{
1249 return !h->pprev;
1250}
1251
1252static inline int hlist_empty(const struct hlist_head *h)
1253{
1254 return !h->first;
1255}
1256
1257static inline void __hlist_del(struct hlist_node *n)
1258{
1259 struct hlist_node *next = n->next;
1260 struct hlist_node **pprev = n->pprev;
1261 *pprev = next;
1262 if (next)
1263 next->pprev = pprev;
1264}
1265
1266static inline void hlist_del(struct hlist_node *n)
1267{
1268 __hlist_del(n);
1269 n->next = LIST_POISON1;
1270 n->pprev = LIST_POISON2;
1271}
1272
1273static inline void hlist_del_init(struct hlist_node *n)
1274{
1275 if (!hlist_unhashed(n)) {
1276 __hlist_del(n);
1277 INIT_HLIST_NODE(n);
1278 }
1279}
1280
1281static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h)
1282{
1283 struct hlist_node *first = h->first;
1284 n->next = first;
1285 if (first)
1286 first->pprev = &n->next;
1287 h->first = n;
1288 n->pprev = &h->first;
1289}
1290
1291/* next must be != NULL */
1292static inline void hlist_add_before(struct hlist_node *n,
1293 struct hlist_node *next)
1294{
1295 n->pprev = next->pprev;
1296 n->next = next;
1297 next->pprev = &n->next;
1298 *(n->pprev) = n;
1299}
1300
1301static inline void hlist_add_after(struct hlist_node *n,
1302 struct hlist_node *next)
1303{
1304 next->next = n->next;
1305 n->next = next;
1306 next->pprev = &n->next;
1307
1308 if(next->next)
1309 next->next->pprev = &next->next;
1310}
1311
1312/*
1313 * Move a list from one list head to another. Fixup the pprev
1314 * reference of the first entry if it exists.
1315 */
1316static inline void hlist_move_list(struct hlist_head *old,
1317 struct hlist_head *new)
1318{
1319 new->first = old->first;
1320 if (new->first)
1321 new->first->pprev = &new->first;
1322 old->first = NULL;
1323}
1324
1325#define hlist_entry(ptr, type, member) container_of(ptr,type,member)
1326
1327#define hlist_for_each(pos, head) \
1328 for (pos = (head)->first; pos && ({ prefetch(pos->next); 1; }); \
1329 pos = pos->next)
1330
1331#define hlist_for_each_safe(pos, n, head) \
1332 for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \
1333 pos = n)
1334
1335/**
1336 * hlist_for_each_entry - iterate over list of given type
1337 * @tpos: the type * to use as a loop cursor.
1338 * @pos: the &struct hlist_node to use as a loop cursor.
1339 * @head: the head for your list.
1340 * @member: the name of the hlist_node within the struct.
1341 */
1342#define hlist_for_each_entry(tpos, pos, head, member) \
1343 for (pos = (head)->first; \
1344 pos && ({ prefetch(pos->next); 1;}) && \
1345 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
1346 pos = pos->next)
1347
1348/**
1349 * hlist_for_each_entry_continue - iterate over a hlist continuing after current point
1350 * @tpos: the type * to use as a loop cursor.
1351 * @pos: the &struct hlist_node to use as a loop cursor.
1352 * @member: the name of the hlist_node within the struct.
1353 */
1354#define hlist_for_each_entry_continue(tpos, pos, member) \
1355 for (pos = (pos)->next; \
1356 pos && ({ prefetch(pos->next); 1;}) && \
1357 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
1358 pos = pos->next)
1359
1360/**
1361 * hlist_for_each_entry_from - iterate over a hlist continuing from current point
1362 * @tpos: the type * to use as a loop cursor.
1363 * @pos: the &struct hlist_node to use as a loop cursor.
1364 * @member: the name of the hlist_node within the struct.
1365 */
1366#define hlist_for_each_entry_from(tpos, pos, member) \
1367 for (; pos && ({ prefetch(pos->next); 1;}) && \
1368 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
1369 pos = pos->next)
1370
1371/**
1372 * hlist_for_each_entry_safe - iterate over list of given type safe against removal of list entry
1373 * @tpos: the type * to use as a loop cursor.
1374 * @pos: the &struct hlist_node to use as a loop cursor.
1375 * @n: another &struct hlist_node to use as temporary storage
1376 * @head: the head for your list.
1377 * @member: the name of the hlist_node within the struct.
1378 */
1379#define hlist_for_each_entry_safe(tpos, pos, n, head, member) \
1380 for (pos = (head)->first; \
1381 pos && ({ n = pos->next; 1; }) && \
1382 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
1383 pos = n)
1384
1385#endif
This page took 0.07014 seconds and 4 git commands to generate.