Rename all arch primitives with prefix caa_
[urcu.git] / tests / api_ppc.h
CommitLineData
6d0ce021
PM
1/* MECHANICALLY GENERATED, DO NOT EDIT!!! */
2
1a43bbd8 3#ifndef _INCLUDE_API_H
6d0ce021
PM
4#define _INCLUDE_API_H
5
6/*
7 * common.h: Common Linux kernel-isms.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; but version 2 of the License only due
12 * to code included from the Linux kernel.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
22 *
23 * Copyright (c) 2006 Paul E. McKenney, IBM.
24 *
25 * Much code taken from the Linux kernel. For such code, the option
26 * to redistribute under later versions of GPL might not be available.
27 */
28
22b63ec4
MD
29#include <urcu/arch.h>
30
6d0ce021
PM
31#ifndef __always_inline
32#define __always_inline inline
33#endif
34
35#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)]))
36#define BUILD_BUG_ON_ZERO(e) (sizeof(char[1 - 2 * !!(e)]) - 1)
37
38#ifdef __ASSEMBLY__
39# define stringify_in_c(...) __VA_ARGS__
40# define ASM_CONST(x) x
41#else
42/* This version of stringify will deal with commas... */
43# define __stringify_in_c(...) #__VA_ARGS__
44# define stringify_in_c(...) __stringify_in_c(__VA_ARGS__) " "
45# define __ASM_CONST(x) x##UL
46# define ASM_CONST(x) __ASM_CONST(x)
47#endif
48
49
50/*
51 * arch-ppc64.h: Expose PowerPC atomic instructions.
52 *
53 * This program is free software; you can redistribute it and/or modify
54 * it under the terms of the GNU General Public License as published by
55 * the Free Software Foundation; but version 2 of the License only due
56 * to code included from the Linux kernel.
57 *
58 * This program is distributed in the hope that it will be useful,
59 * but WITHOUT ANY WARRANTY; without even the implied warranty of
60 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
61 * GNU General Public License for more details.
62 *
63 * You should have received a copy of the GNU General Public License
64 * along with this program; if not, write to the Free Software
65 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
66 *
67 * Copyright (c) 2006 Paul E. McKenney, IBM.
68 *
69 * Much code taken from the Linux kernel. For such code, the option
70 * to redistribute under later versions of GPL might not be available.
71 */
72
73/*
74 * Machine parameters.
75 */
76
6d0ce021
PM
77#define CONFIG_PPC64
78
06f22bdb 79/*#define CAA_CACHE_LINE_SIZE 128 */
6d0ce021
PM
80#define ____cacheline_internodealigned_in_smp \
81 __attribute__((__aligned__(1 << 7)))
82
6ee91d83
MD
83#if 0 /* duplicate with arch_atomic.h */
84
6d0ce021
PM
85/*
86 * Atomic data structure, initialization, and access.
87 */
88
89typedef struct { volatile int counter; } atomic_t;
90
91#define ATOMIC_INIT(i) { (i) }
92
93#define atomic_read(v) ((v)->counter)
94#define atomic_set(v, i) (((v)->counter) = (i))
95
96/*
97 * Atomic operations.
98 */
99
100#define LWSYNC lwsync
101#define PPC405_ERR77(ra,rb)
102#ifdef CONFIG_SMP
103# define LWSYNC_ON_SMP stringify_in_c(LWSYNC) "\n"
104# define ISYNC_ON_SMP "\n\tisync\n"
105#else
106# define LWSYNC_ON_SMP
107# define ISYNC_ON_SMP
108#endif
109
6d0ce021
PM
110/*
111 * Atomic exchange
112 *
113 * Changes the memory location '*ptr' to be val and returns
114 * the previous value stored there.
115 */
116static __always_inline unsigned long
117__xchg_u32(volatile void *p, unsigned long val)
118{
119 unsigned long prev;
120
121 __asm__ __volatile__(
122 LWSYNC_ON_SMP
123"1: lwarx %0,0,%2 \n"
124 PPC405_ERR77(0,%2)
125" stwcx. %3,0,%2 \n\
126 bne- 1b"
127 ISYNC_ON_SMP
128 : "=&r" (prev), "+m" (*(volatile unsigned int *)p)
129 : "r" (p), "r" (val)
130 : "cc", "memory");
131
132 return prev;
133}
134
135/*
136 * Atomic exchange
137 *
138 * Changes the memory location '*ptr' to be val and returns
139 * the previous value stored there.
140 */
141static __always_inline unsigned long
142__xchg_u32_local(volatile void *p, unsigned long val)
143{
144 unsigned long prev;
145
146 __asm__ __volatile__(
147"1: lwarx %0,0,%2 \n"
148 PPC405_ERR77(0,%2)
149" stwcx. %3,0,%2 \n\
150 bne- 1b"
151 : "=&r" (prev), "+m" (*(volatile unsigned int *)p)
152 : "r" (p), "r" (val)
153 : "cc", "memory");
154
155 return prev;
156}
157
158#ifdef CONFIG_PPC64
159static __always_inline unsigned long
160__xchg_u64(volatile void *p, unsigned long val)
161{
162 unsigned long prev;
163
164 __asm__ __volatile__(
165 LWSYNC_ON_SMP
166"1: ldarx %0,0,%2 \n"
167 PPC405_ERR77(0,%2)
168" stdcx. %3,0,%2 \n\
169 bne- 1b"
170 ISYNC_ON_SMP
171 : "=&r" (prev), "+m" (*(volatile unsigned long *)p)
172 : "r" (p), "r" (val)
173 : "cc", "memory");
174
175 return prev;
176}
177
178static __always_inline unsigned long
179__xchg_u64_local(volatile void *p, unsigned long val)
180{
181 unsigned long prev;
182
183 __asm__ __volatile__(
184"1: ldarx %0,0,%2 \n"
185 PPC405_ERR77(0,%2)
186" stdcx. %3,0,%2 \n\
187 bne- 1b"
188 : "=&r" (prev), "+m" (*(volatile unsigned long *)p)
189 : "r" (p), "r" (val)
190 : "cc", "memory");
191
192 return prev;
193}
194#endif
195
196/*
197 * This function doesn't exist, so you'll get a linker error
198 * if something tries to do an invalid xchg().
199 */
200extern void __xchg_called_with_bad_pointer(void);
201
202static __always_inline unsigned long
203__xchg(volatile void *ptr, unsigned long x, unsigned int size)
204{
205 switch (size) {
206 case 4:
207 return __xchg_u32(ptr, x);
208#ifdef CONFIG_PPC64
209 case 8:
210 return __xchg_u64(ptr, x);
211#endif
212 }
213 __xchg_called_with_bad_pointer();
214 return x;
215}
216
217static __always_inline unsigned long
218__xchg_local(volatile void *ptr, unsigned long x, unsigned int size)
219{
220 switch (size) {
221 case 4:
222 return __xchg_u32_local(ptr, x);
223#ifdef CONFIG_PPC64
224 case 8:
225 return __xchg_u64_local(ptr, x);
226#endif
227 }
228 __xchg_called_with_bad_pointer();
229 return x;
230}
231#define xchg(ptr,x) \
232 ({ \
233 __typeof__(*(ptr)) _x_ = (x); \
234 (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \
235 })
236
237#define xchg_local(ptr,x) \
238 ({ \
239 __typeof__(*(ptr)) _x_ = (x); \
240 (__typeof__(*(ptr))) __xchg_local((ptr), \
241 (unsigned long)_x_, sizeof(*(ptr))); \
242 })
243
244/*
245 * Compare and exchange - if *p == old, set it to new,
246 * and return the old value of *p.
247 */
248#define __HAVE_ARCH_CMPXCHG 1
249
250static __always_inline unsigned long
251__cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new)
252{
253 unsigned int prev;
254
255 __asm__ __volatile__ (
256 LWSYNC_ON_SMP
257"1: lwarx %0,0,%2 # __cmpxchg_u32\n\
258 cmpw 0,%0,%3\n\
259 bne- 2f\n"
260 PPC405_ERR77(0,%2)
261" stwcx. %4,0,%2\n\
262 bne- 1b"
263 ISYNC_ON_SMP
264 "\n\
2652:"
266 : "=&r" (prev), "+m" (*p)
267 : "r" (p), "r" (old), "r" (new)
268 : "cc", "memory");
269
270 return prev;
271}
272
273static __always_inline unsigned long
274__cmpxchg_u32_local(volatile unsigned int *p, unsigned long old,
275 unsigned long new)
276{
277 unsigned int prev;
278
279 __asm__ __volatile__ (
280"1: lwarx %0,0,%2 # __cmpxchg_u32\n\
281 cmpw 0,%0,%3\n\
282 bne- 2f\n"
283 PPC405_ERR77(0,%2)
284" stwcx. %4,0,%2\n\
285 bne- 1b"
286 "\n\
2872:"
288 : "=&r" (prev), "+m" (*p)
289 : "r" (p), "r" (old), "r" (new)
290 : "cc", "memory");
291
292 return prev;
293}
294
295#ifdef CONFIG_PPC64
296static __always_inline unsigned long
297__cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new)
298{
299 unsigned long prev;
300
301 __asm__ __volatile__ (
302 LWSYNC_ON_SMP
303"1: ldarx %0,0,%2 # __cmpxchg_u64\n\
304 cmpd 0,%0,%3\n\
305 bne- 2f\n\
306 stdcx. %4,0,%2\n\
307 bne- 1b"
308 ISYNC_ON_SMP
309 "\n\
3102:"
311 : "=&r" (prev), "+m" (*p)
312 : "r" (p), "r" (old), "r" (new)
313 : "cc", "memory");
314
315 return prev;
316}
317
318static __always_inline unsigned long
319__cmpxchg_u64_local(volatile unsigned long *p, unsigned long old,
320 unsigned long new)
321{
322 unsigned long prev;
323
324 __asm__ __volatile__ (
325"1: ldarx %0,0,%2 # __cmpxchg_u64\n\
326 cmpd 0,%0,%3\n\
327 bne- 2f\n\
328 stdcx. %4,0,%2\n\
329 bne- 1b"
330 "\n\
3312:"
332 : "=&r" (prev), "+m" (*p)
333 : "r" (p), "r" (old), "r" (new)
334 : "cc", "memory");
335
336 return prev;
337}
338#endif
339
340/* This function doesn't exist, so you'll get a linker error
341 if something tries to do an invalid cmpxchg(). */
342extern void __cmpxchg_called_with_bad_pointer(void);
343
344static __always_inline unsigned long
345__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new,
346 unsigned int size)
347{
348 switch (size) {
349 case 4:
350 return __cmpxchg_u32(ptr, old, new);
351#ifdef CONFIG_PPC64
352 case 8:
353 return __cmpxchg_u64(ptr, old, new);
354#endif
355 }
356 __cmpxchg_called_with_bad_pointer();
357 return old;
358}
359
360static __always_inline unsigned long
361__cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new,
362 unsigned int size)
363{
364 switch (size) {
365 case 4:
366 return __cmpxchg_u32_local(ptr, old, new);
367#ifdef CONFIG_PPC64
368 case 8:
369 return __cmpxchg_u64_local(ptr, old, new);
370#endif
371 }
372 __cmpxchg_called_with_bad_pointer();
373 return old;
374}
375
376#define cmpxchg(ptr, o, n) \
377 ({ \
378 __typeof__(*(ptr)) _o_ = (o); \
379 __typeof__(*(ptr)) _n_ = (n); \
380 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
381 (unsigned long)_n_, sizeof(*(ptr))); \
382 })
383
384
385#define cmpxchg_local(ptr, o, n) \
386 ({ \
387 __typeof__(*(ptr)) _o_ = (o); \
388 __typeof__(*(ptr)) _n_ = (n); \
389 (__typeof__(*(ptr))) __cmpxchg_local((ptr), (unsigned long)_o_, \
390 (unsigned long)_n_, sizeof(*(ptr))); \
391 })
392
393#ifdef CONFIG_PPC64
394/*
395 * We handle most unaligned accesses in hardware. On the other hand
396 * unaligned DMA can be very expensive on some ppc64 IO chips (it does
397 * powers of 2 writes until it reaches sufficient alignment).
398 *
399 * Based on this we disable the IP header alignment in network drivers.
400 * We also modify NET_SKB_PAD to be a cacheline in size, thus maintaining
401 * cacheline alignment of buffers.
402 */
403#define NET_IP_ALIGN 0
404#define NET_SKB_PAD L1_CACHE_BYTES
405
406#define cmpxchg64(ptr, o, n) \
407 ({ \
408 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
409 cmpxchg((ptr), (o), (n)); \
410 })
411#define cmpxchg64_local(ptr, o, n) \
412 ({ \
413 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
414 cmpxchg_local((ptr), (o), (n)); \
415 })
416#endif
417
418#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
419#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
420
421/**
422 * atomic_add - add integer to atomic variable
423 * @i: integer value to add
424 * @v: pointer of type atomic_t
425 *
426 * Atomically adds @a to @v.
427 */
428static __inline__ void atomic_add(int a, atomic_t *v)
429{
430 int t;
431
432 __asm__ __volatile__(
433 "1: lwarx %0,0,%3 # atomic_add\n\
434 add %0,%2,%0 \n\
435 stwcx. %0,0,%3 \n\
436 bne- 1b"
437 : "=&r" (t), "+m" (v->counter)
438 : "r" (a), "r" (&v->counter)
439 : "cc");
440}
441
442/**
443 * atomic_sub - subtract the atomic variable
444 * @i: integer value to subtract
445 * @v: pointer of type atomic_t
446 *
447 * Atomically subtracts @a from @v.
448 */
449static __inline__ void atomic_sub(int a, atomic_t *v)
450{
451 int t;
452
453 __asm__ __volatile__(
454 "1: lwarx %0,0,%3 # atomic_sub \n\
455 subf %0,%2,%0 \n\
456 stwcx. %0,0,%3 \n\
457 bne- 1b"
458 : "=&r" (t), "+m" (v->counter)
459 : "r" (a), "r" (&v->counter)
460 : "cc");
461}
462
463static __inline__ atomic_sub_return(int a, atomic_t *v)
464{
465 int t;
466
467 __asm__ __volatile__(
468 "lwsync\n\
469 1: lwarx %0,0,%2 # atomic_sub_return\n\
470 subf %0,%1,%0\n\
471 stwcx. %0,0,%2 \n\
472 bne- 1b \n\
473 isync"
474 : "=&r" (t)
475 : "r" (a), "r" (&v->counter)
476 : "cc", "memory");
477
478 return t;
479}
480
481/**
482 * atomic_sub_and_test - subtract value from variable and test result
483 * @i: integer value to subtract
484 * @v: pointer of type atomic_t
485 *
486 * Atomically subtracts @i from @v and returns
487 * true if the result is zero, or false for all
488 * other cases.
489 */
490static __inline__ int atomic_sub_and_test(int a, atomic_t *v)
491{
492 return atomic_sub_return(a, v) == 0;
493}
494
495/**
496 * atomic_inc - increment atomic variable
497 * @v: pointer of type atomic_t
498 *
499 * Atomically increments @v by 1.
500 */
501static __inline__ void atomic_inc(atomic_t *v)
502{
503 atomic_add(1, v);
504}
505
506/**
507 * atomic_dec - decrement atomic variable
508 * @v: pointer of type atomic_t
509 *
510 * Atomically decrements @v by 1.
511 */
512static __inline__ void atomic_dec(atomic_t *v)
513{
514 atomic_sub(1, v);
515}
516
517/**
518 * atomic_dec_and_test - decrement and test
519 * @v: pointer of type atomic_t
520 *
521 * Atomically decrements @v by 1 and
522 * returns true if the result is 0, or false for all other
523 * cases.
524 */
525static __inline__ int atomic_dec_and_test(atomic_t *v)
526{
527 return atomic_sub_and_test(1, v);
528}
529
530/**
531 * atomic_inc_and_test - increment and test
532 * @v: pointer of type atomic_t
533 *
534 * Atomically increments @v by 1
535 * and returns true if the result is zero, or false for all
536 * other cases.
537 */
538static __inline__ int atomic_inc_and_test(atomic_t *v)
539{
540 return atomic_inc_return(v);
541}
542
543/**
544 * atomic_add_return - add and return
545 * @v: pointer of type atomic_t
546 * @i: integer value to add
547 *
548 * Atomically adds @i to @v and returns @i + @v
549 */
550static __inline__ int atomic_add_return(int a, atomic_t *v)
551{
552 int t;
553
554 __asm__ __volatile__(
555 "lwsync \n\
556 1: lwarx %0,0,%2 # atomic_add_return \n\
557 add %0,%1,%0 \n\
558 stwcx. %0,0,%2 \n\
559 bne- 1b \n\
560 isync"
561 : "=&r" (t)
562 : "r" (a), "r" (&v->counter)
563 : "cc", "memory");
564
565 return t;
566}
567
568/**
569 * atomic_add_negative - add and test if negative
570 * @v: pointer of type atomic_t
571 * @i: integer value to add
572 *
573 * Atomically adds @i to @v and returns true
574 * if the result is negative, or false when
575 * result is greater than or equal to zero.
576 */
577static __inline__ int atomic_add_negative(int a, atomic_t *v)
578{
579 return atomic_add_return(a, v) < 0;
580}
581
582/**
583 * atomic_add_unless - add unless the number is a given value
584 * @v: pointer of type atomic_t
585 * @a: the amount to add to v...
586 * @u: ...unless v is equal to u.
587 *
588 * Atomically adds @a to @v, so long as it was not @u.
589 * Returns non-zero if @v was not @u, and zero otherwise.
590 */
591static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
592{
593 int t;
594
595 __asm__ __volatile__(
596 "lwsync \n\
597 1: lwarx %0,0,%1 # atomic_add_unless\n\
598 cmpd 0,%0,%3 \n\
599 beq- 2f \n\
600 add %0,%2,%0 \n\
601 stwcx. %0,0,%1 \n\
602 bne- 1b \n\
603 isync \n\
604 subf %0,%2,%0 \n\
605 2:"
606 : "=&r" (t)
607 : "r" (&v->counter), "r" (a), "r" (u)
608 : "cc", "memory");
609
610 return t != u;
611}
612
613#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
614
615#define atomic_inc_return(v) (atomic_add_return(1,v))
616#define atomic_dec_return(v) (atomic_sub_return(1,v))
617
618/* Atomic operations are already serializing on x86 */
5481ddb3
DG
619#define smp_mb__before_atomic_dec() cmm_smp_mb()
620#define smp_mb__after_atomic_dec() cmm_smp_mb()
621#define smp_mb__before_atomic_inc() cmm_smp_mb()
622#define smp_mb__after_atomic_inc() cmm_smp_mb()
6d0ce021 623
6ee91d83
MD
624#endif //0 /* duplicate with arch_atomic.h */
625
6d0ce021
PM
626/*
627 * api_pthreads.h: API mapping to pthreads environment.
628 *
629 * This program is free software; you can redistribute it and/or modify
630 * it under the terms of the GNU General Public License as published by
631 * the Free Software Foundation; either version 2 of the License, or
632 * (at your option) any later version. However, please note that much
633 * of the code in this file derives from the Linux kernel, and that such
634 * code may not be available except under GPLv2.
635 *
636 * This program is distributed in the hope that it will be useful,
637 * but WITHOUT ANY WARRANTY; without even the implied warranty of
638 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
639 * GNU General Public License for more details.
640 *
641 * You should have received a copy of the GNU General Public License
642 * along with this program; if not, write to the Free Software
643 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
644 *
645 * Copyright (c) 2006 Paul E. McKenney, IBM.
646 */
647
648#include <stdio.h>
649#include <stdlib.h>
650#include <errno.h>
651#include <limits.h>
652#include <sys/types.h>
653#define __USE_GNU
654#include <pthread.h>
655#include <sched.h>
656#include <sys/param.h>
657/* #include "atomic.h" */
658
6d0ce021
PM
659/*
660 * Default machine parameters.
661 */
662
06f22bdb
DG
663#ifndef CAA_CACHE_LINE_SIZE
664/* #define CAA_CACHE_LINE_SIZE 128 */
665#endif /* #ifndef CAA_CACHE_LINE_SIZE */
6d0ce021
PM
666
667/*
668 * Exclusive locking primitives.
669 */
670
671typedef pthread_mutex_t spinlock_t;
672
673#define DEFINE_SPINLOCK(lock) spinlock_t lock = PTHREAD_MUTEX_INITIALIZER;
674#define __SPIN_LOCK_UNLOCKED(lockp) PTHREAD_MUTEX_INITIALIZER
675
676static void spin_lock_init(spinlock_t *sp)
677{
678 if (pthread_mutex_init(sp, NULL) != 0) {
679 perror("spin_lock_init:pthread_mutex_init");
680 exit(-1);
681 }
682}
683
684static void spin_lock(spinlock_t *sp)
685{
686 if (pthread_mutex_lock(sp) != 0) {
687 perror("spin_lock:pthread_mutex_lock");
688 exit(-1);
689 }
690}
691
6d0ce021
PM
692static void spin_unlock(spinlock_t *sp)
693{
694 if (pthread_mutex_unlock(sp) != 0) {
695 perror("spin_unlock:pthread_mutex_unlock");
696 exit(-1);
697 }
698}
699
700#define spin_lock_irqsave(l, f) do { f = 1; spin_lock(l); } while (0)
701#define spin_unlock_irqrestore(l, f) do { f = 0; spin_unlock(l); } while (0)
702
6d0ce021
PM
703/*
704 * Thread creation/destruction primitives.
705 */
706
707typedef pthread_t thread_id_t;
708
709#define NR_THREADS 128
710
711#define __THREAD_ID_MAP_EMPTY 0
712#define __THREAD_ID_MAP_WAITING 1
713thread_id_t __thread_id_map[NR_THREADS];
714spinlock_t __thread_id_map_mutex;
715
716#define for_each_thread(t) \
717 for (t = 0; t < NR_THREADS; t++)
718
719#define for_each_running_thread(t) \
720 for (t = 0; t < NR_THREADS; t++) \
721 if ((__thread_id_map[t] != __THREAD_ID_MAP_EMPTY) && \
722 (__thread_id_map[t] != __THREAD_ID_MAP_WAITING))
723
724#define for_each_tid(t, tid) \
725 for (t = 0; t < NR_THREADS; t++) \
726 if ((((tid) = __thread_id_map[t]) != __THREAD_ID_MAP_EMPTY) && \
727 ((tid) != __THREAD_ID_MAP_WAITING))
728
729pthread_key_t thread_id_key;
730
731static int __smp_thread_id(void)
732{
733 int i;
734 thread_id_t tid = pthread_self();
735
736 for (i = 0; i < NR_THREADS; i++) {
737 if (__thread_id_map[i] == tid) {
738 long v = i + 1; /* must be non-NULL. */
739
740 if (pthread_setspecific(thread_id_key, (void *)v) != 0) {
741 perror("pthread_setspecific");
742 exit(-1);
743 }
744 return i;
745 }
746 }
747 spin_lock(&__thread_id_map_mutex);
748 for (i = 0; i < NR_THREADS; i++) {
749 if (__thread_id_map[i] == tid)
750 spin_unlock(&__thread_id_map_mutex);
751 return i;
752 }
753 spin_unlock(&__thread_id_map_mutex);
0578089f
PM
754 fprintf(stderr, "smp_thread_id: Rogue thread, id: %d(%#x)\n",
755 (int)tid, (int)tid);
6d0ce021
PM
756 exit(-1);
757}
758
759static int smp_thread_id(void)
760{
761 void *id;
762
763 id = pthread_getspecific(thread_id_key);
764 if (id == NULL)
765 return __smp_thread_id();
766 return (long)(id - 1);
767}
768
769static thread_id_t create_thread(void *(*func)(void *), void *arg)
770{
771 thread_id_t tid;
772 int i;
773
774 spin_lock(&__thread_id_map_mutex);
775 for (i = 0; i < NR_THREADS; i++) {
776 if (__thread_id_map[i] == __THREAD_ID_MAP_EMPTY)
777 break;
778 }
779 if (i >= NR_THREADS) {
780 spin_unlock(&__thread_id_map_mutex);
781 fprintf(stderr, "Thread limit of %d exceeded!\n", NR_THREADS);
782 exit(-1);
783 }
784 __thread_id_map[i] = __THREAD_ID_MAP_WAITING;
785 spin_unlock(&__thread_id_map_mutex);
786 if (pthread_create(&tid, NULL, func, arg) != 0) {
787 perror("create_thread:pthread_create");
788 exit(-1);
789 }
790 __thread_id_map[i] = tid;
791 return tid;
792}
793
794static void *wait_thread(thread_id_t tid)
795{
796 int i;
797 void *vp;
798
799 for (i = 0; i < NR_THREADS; i++) {
800 if (__thread_id_map[i] == tid)
801 break;
802 }
803 if (i >= NR_THREADS){
0578089f
PM
804 fprintf(stderr, "wait_thread: bad tid = %d(%#x)\n",
805 (int)tid, (int)tid);
6d0ce021
PM
806 exit(-1);
807 }
808 if (pthread_join(tid, &vp) != 0) {
809 perror("wait_thread:pthread_join");
810 exit(-1);
811 }
812 __thread_id_map[i] = __THREAD_ID_MAP_EMPTY;
813 return vp;
814}
815
816static void wait_all_threads(void)
817{
818 int i;
819 thread_id_t tid;
820
821 for (i = 1; i < NR_THREADS; i++) {
822 tid = __thread_id_map[i];
823 if (tid != __THREAD_ID_MAP_EMPTY &&
824 tid != __THREAD_ID_MAP_WAITING)
825 (void)wait_thread(tid);
826 }
827}
828
829static void run_on(int cpu)
830{
831 cpu_set_t mask;
832
833 CPU_ZERO(&mask);
834 CPU_SET(cpu, &mask);
835 sched_setaffinity(0, sizeof(mask), &mask);
836}
837
838/*
839 * timekeeping -- very crude -- should use MONOTONIC...
840 */
841
842long long get_microseconds(void)
843{
844 struct timeval tv;
845
846 if (gettimeofday(&tv, NULL) != 0)
847 abort();
848 return ((long long)tv.tv_sec) * 1000000LL + (long long)tv.tv_usec;
849}
850
851/*
852 * Per-thread variables.
853 */
854
855#define DEFINE_PER_THREAD(type, name) \
856 struct { \
857 __typeof__(type) v \
06f22bdb 858 __attribute__((__aligned__(CAA_CACHE_LINE_SIZE))); \
6d0ce021
PM
859 } __per_thread_##name[NR_THREADS];
860#define DECLARE_PER_THREAD(type, name) extern DEFINE_PER_THREAD(type, name)
861
862#define per_thread(name, thread) __per_thread_##name[thread].v
863#define __get_thread_var(name) per_thread(name, smp_thread_id())
864
865#define init_per_thread(name, v) \
866 do { \
867 int __i_p_t_i; \
868 for (__i_p_t_i = 0; __i_p_t_i < NR_THREADS; __i_p_t_i++) \
869 per_thread(name, __i_p_t_i) = v; \
870 } while (0)
871
872/*
873 * CPU traversal primitives.
874 */
875
876#ifndef NR_CPUS
877#define NR_CPUS 16
878#endif /* #ifndef NR_CPUS */
879
880#define for_each_possible_cpu(cpu) \
881 for (cpu = 0; cpu < NR_CPUS; cpu++)
882#define for_each_online_cpu(cpu) \
883 for (cpu = 0; cpu < NR_CPUS; cpu++)
884
885/*
886 * Per-CPU variables.
887 */
888
889#define DEFINE_PER_CPU(type, name) \
890 struct { \
891 __typeof__(type) v \
06f22bdb 892 __attribute__((__aligned__(CAA_CACHE_LINE_SIZE))); \
6d0ce021
PM
893 } __per_cpu_##name[NR_CPUS]
894#define DECLARE_PER_CPU(type, name) extern DEFINE_PER_CPU(type, name)
895
896DEFINE_PER_THREAD(int, smp_processor_id);
897
6d0ce021
PM
898#define per_cpu(name, thread) __per_cpu_##name[thread].v
899#define __get_cpu_var(name) per_cpu(name, smp_processor_id())
900
901#define init_per_cpu(name, v) \
902 do { \
903 int __i_p_c_i; \
904 for (__i_p_c_i = 0; __i_p_c_i < NR_CPUS; __i_p_c_i++) \
905 per_cpu(name, __i_p_c_i) = v; \
906 } while (0)
907
908/*
909 * CPU state checking (crowbarred).
910 */
911
912#define idle_cpu(cpu) 0
913#define in_softirq() 1
914#define hardirq_count() 0
915#define PREEMPT_SHIFT 0
916#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS)
917#define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS)
918#define PREEMPT_BITS 8
919#define SOFTIRQ_BITS 8
920
921/*
922 * CPU hotplug.
923 */
924
925struct notifier_block {
926 int (*notifier_call)(struct notifier_block *, unsigned long, void *);
927 struct notifier_block *next;
928 int priority;
929};
930
931#define CPU_ONLINE 0x0002 /* CPU (unsigned)v is up */
932#define CPU_UP_PREPARE 0x0003 /* CPU (unsigned)v coming up */
933#define CPU_UP_CANCELED 0x0004 /* CPU (unsigned)v NOT coming up */
934#define CPU_DOWN_PREPARE 0x0005 /* CPU (unsigned)v going down */
935#define CPU_DOWN_FAILED 0x0006 /* CPU (unsigned)v NOT going down */
936#define CPU_DEAD 0x0007 /* CPU (unsigned)v dead */
937#define CPU_DYING 0x0008 /* CPU (unsigned)v not running any task,
938 * not handling interrupts, soon dead */
939#define CPU_POST_DEAD 0x0009 /* CPU (unsigned)v dead, cpu_hotplug
940 * lock is dropped */
941
942/* Used for CPU hotplug events occuring while tasks are frozen due to a suspend
943 * operation in progress
944 */
945#define CPU_TASKS_FROZEN 0x0010
946
947#define CPU_ONLINE_FROZEN (CPU_ONLINE | CPU_TASKS_FROZEN)
948#define CPU_UP_PREPARE_FROZEN (CPU_UP_PREPARE | CPU_TASKS_FROZEN)
949#define CPU_UP_CANCELED_FROZEN (CPU_UP_CANCELED | CPU_TASKS_FROZEN)
950#define CPU_DOWN_PREPARE_FROZEN (CPU_DOWN_PREPARE | CPU_TASKS_FROZEN)
951#define CPU_DOWN_FAILED_FROZEN (CPU_DOWN_FAILED | CPU_TASKS_FROZEN)
952#define CPU_DEAD_FROZEN (CPU_DEAD | CPU_TASKS_FROZEN)
953#define CPU_DYING_FROZEN (CPU_DYING | CPU_TASKS_FROZEN)
954
955/* Hibernation and suspend events */
956#define PM_HIBERNATION_PREPARE 0x0001 /* Going to hibernate */
957#define PM_POST_HIBERNATION 0x0002 /* Hibernation finished */
958#define PM_SUSPEND_PREPARE 0x0003 /* Going to suspend the system */
959#define PM_POST_SUSPEND 0x0004 /* Suspend finished */
960#define PM_RESTORE_PREPARE 0x0005 /* Going to restore a saved image */
961#define PM_POST_RESTORE 0x0006 /* Restore failed */
962
963#define NOTIFY_DONE 0x0000 /* Don't care */
964#define NOTIFY_OK 0x0001 /* Suits me */
965#define NOTIFY_STOP_MASK 0x8000 /* Don't call further */
966#define NOTIFY_BAD (NOTIFY_STOP_MASK|0x0002)
967 /* Bad/Veto action */
968/*
969 * Clean way to return from the notifier and stop further calls.
970 */
971#define NOTIFY_STOP (NOTIFY_OK|NOTIFY_STOP_MASK)
972
973/*
974 * Bug checks.
975 */
976
977#define BUG_ON(c) do { if (!(c)) abort(); } while (0)
978
979/*
980 * Initialization -- Must be called before calling any primitives.
981 */
982
983static void smp_init(void)
984{
985 int i;
986
987 spin_lock_init(&__thread_id_map_mutex);
988 __thread_id_map[0] = pthread_self();
989 for (i = 1; i < NR_THREADS; i++)
990 __thread_id_map[i] = __THREAD_ID_MAP_EMPTY;
991 init_per_thread(smp_processor_id, 0);
992 if (pthread_key_create(&thread_id_key, NULL) != 0) {
993 perror("pthread_key_create");
994 exit(-1);
995 }
996}
997
998/* Taken from the Linux kernel source tree, so GPLv2-only!!! */
999
1000#ifndef _LINUX_LIST_H
1001#define _LINUX_LIST_H
1002
1003#define LIST_POISON1 ((void *) 0x00100100)
1004#define LIST_POISON2 ((void *) 0x00200200)
1005
63ff4873
MD
1006#if 0
1007
6d0ce021
PM
1008/*
1009 * Simple doubly linked list implementation.
1010 *
1011 * Some of the internal functions ("__xxx") are useful when
1012 * manipulating whole lists rather than single entries, as
1013 * sometimes we already know the next/prev entries and we can
1014 * generate better code by using them directly rather than
1015 * using the generic single-entry routines.
1016 */
1017
1018struct list_head {
1019 struct list_head *next, *prev;
1020};
1021
1022#define LIST_HEAD_INIT(name) { &(name), &(name) }
1023
1024#define LIST_HEAD(name) \
1025 struct list_head name = LIST_HEAD_INIT(name)
1026
1027static inline void INIT_LIST_HEAD(struct list_head *list)
1028{
1029 list->next = list;
1030 list->prev = list;
1031}
1032
1033/*
1034 * Insert a new entry between two known consecutive entries.
1035 *
1036 * This is only for internal list manipulation where we know
1037 * the prev/next entries already!
1038 */
1039#ifndef CONFIG_DEBUG_LIST
1040static inline void __list_add(struct list_head *new,
1041 struct list_head *prev,
1042 struct list_head *next)
1043{
1044 next->prev = new;
1045 new->next = next;
1046 new->prev = prev;
1047 prev->next = new;
1048}
1049#else
1050extern void __list_add(struct list_head *new,
1051 struct list_head *prev,
1052 struct list_head *next);
1053#endif
1054
1055/**
1056 * list_add - add a new entry
1057 * @new: new entry to be added
1058 * @head: list head to add it after
1059 *
1060 * Insert a new entry after the specified head.
1061 * This is good for implementing stacks.
1062 */
1063static inline void list_add(struct list_head *new, struct list_head *head)
1064{
1065 __list_add(new, head, head->next);
1066}
1067
1068
1069/**
1070 * list_add_tail - add a new entry
1071 * @new: new entry to be added
1072 * @head: list head to add it before
1073 *
1074 * Insert a new entry before the specified head.
1075 * This is useful for implementing queues.
1076 */
1077static inline void list_add_tail(struct list_head *new, struct list_head *head)
1078{
1079 __list_add(new, head->prev, head);
1080}
1081
1082/*
1083 * Delete a list entry by making the prev/next entries
1084 * point to each other.
1085 *
1086 * This is only for internal list manipulation where we know
1087 * the prev/next entries already!
1088 */
1089static inline void __list_del(struct list_head * prev, struct list_head * next)
1090{
1091 next->prev = prev;
1092 prev->next = next;
1093}
1094
1095/**
1096 * list_del - deletes entry from list.
1097 * @entry: the element to delete from the list.
1098 * Note: list_empty() on entry does not return true after this, the entry is
1099 * in an undefined state.
1100 */
1101#ifndef CONFIG_DEBUG_LIST
1102static inline void list_del(struct list_head *entry)
1103{
1104 __list_del(entry->prev, entry->next);
1105 entry->next = LIST_POISON1;
1106 entry->prev = LIST_POISON2;
1107}
1108#else
1109extern void list_del(struct list_head *entry);
1110#endif
1111
1112/**
1113 * list_replace - replace old entry by new one
1114 * @old : the element to be replaced
1115 * @new : the new element to insert
1116 *
1117 * If @old was empty, it will be overwritten.
1118 */
1119static inline void list_replace(struct list_head *old,
1120 struct list_head *new)
1121{
1122 new->next = old->next;
1123 new->next->prev = new;
1124 new->prev = old->prev;
1125 new->prev->next = new;
1126}
1127
1128static inline void list_replace_init(struct list_head *old,
1129 struct list_head *new)
1130{
1131 list_replace(old, new);
1132 INIT_LIST_HEAD(old);
1133}
1134
1135/**
1136 * list_del_init - deletes entry from list and reinitialize it.
1137 * @entry: the element to delete from the list.
1138 */
1139static inline void list_del_init(struct list_head *entry)
1140{
1141 __list_del(entry->prev, entry->next);
1142 INIT_LIST_HEAD(entry);
1143}
1144
1145/**
1146 * list_move - delete from one list and add as another's head
1147 * @list: the entry to move
1148 * @head: the head that will precede our entry
1149 */
1150static inline void list_move(struct list_head *list, struct list_head *head)
1151{
1152 __list_del(list->prev, list->next);
1153 list_add(list, head);
1154}
1155
1156/**
1157 * list_move_tail - delete from one list and add as another's tail
1158 * @list: the entry to move
1159 * @head: the head that will follow our entry
1160 */
1161static inline void list_move_tail(struct list_head *list,
1162 struct list_head *head)
1163{
1164 __list_del(list->prev, list->next);
1165 list_add_tail(list, head);
1166}
1167
1168/**
1169 * list_is_last - tests whether @list is the last entry in list @head
1170 * @list: the entry to test
1171 * @head: the head of the list
1172 */
1173static inline int list_is_last(const struct list_head *list,
1174 const struct list_head *head)
1175{
1176 return list->next == head;
1177}
1178
1179/**
1180 * list_empty - tests whether a list is empty
1181 * @head: the list to test.
1182 */
1183static inline int list_empty(const struct list_head *head)
1184{
1185 return head->next == head;
1186}
1187
1188/**
1189 * list_empty_careful - tests whether a list is empty and not being modified
1190 * @head: the list to test
1191 *
1192 * Description:
1193 * tests whether a list is empty _and_ checks that no other CPU might be
1194 * in the process of modifying either member (next or prev)
1195 *
1196 * NOTE: using list_empty_careful() without synchronization
1197 * can only be safe if the only activity that can happen
1198 * to the list entry is list_del_init(). Eg. it cannot be used
1199 * if another CPU could re-list_add() it.
1200 */
1201static inline int list_empty_careful(const struct list_head *head)
1202{
1203 struct list_head *next = head->next;
1204 return (next == head) && (next == head->prev);
1205}
1206
1207/**
1208 * list_is_singular - tests whether a list has just one entry.
1209 * @head: the list to test.
1210 */
1211static inline int list_is_singular(const struct list_head *head)
1212{
1213 return !list_empty(head) && (head->next == head->prev);
1214}
1215
1216static inline void __list_cut_position(struct list_head *list,
1217 struct list_head *head, struct list_head *entry)
1218{
1219 struct list_head *new_first = entry->next;
1220 list->next = head->next;
1221 list->next->prev = list;
1222 list->prev = entry;
1223 entry->next = list;
1224 head->next = new_first;
1225 new_first->prev = head;
1226}
1227
1228/**
1229 * list_cut_position - cut a list into two
1230 * @list: a new list to add all removed entries
1231 * @head: a list with entries
1232 * @entry: an entry within head, could be the head itself
1233 * and if so we won't cut the list
1234 *
1235 * This helper moves the initial part of @head, up to and
1236 * including @entry, from @head to @list. You should
1237 * pass on @entry an element you know is on @head. @list
1238 * should be an empty list or a list you do not care about
1239 * losing its data.
1240 *
1241 */
1242static inline void list_cut_position(struct list_head *list,
1243 struct list_head *head, struct list_head *entry)
1244{
1245 if (list_empty(head))
1246 return;
1247 if (list_is_singular(head) &&
1248 (head->next != entry && head != entry))
1249 return;
1250 if (entry == head)
1251 INIT_LIST_HEAD(list);
1252 else
1253 __list_cut_position(list, head, entry);
1254}
1255
1256static inline void __list_splice(const struct list_head *list,
1257 struct list_head *prev,
1258 struct list_head *next)
1259{
1260 struct list_head *first = list->next;
1261 struct list_head *last = list->prev;
1262
1263 first->prev = prev;
1264 prev->next = first;
1265
1266 last->next = next;
1267 next->prev = last;
1268}
1269
1270/**
1271 * list_splice - join two lists, this is designed for stacks
1272 * @list: the new list to add.
1273 * @head: the place to add it in the first list.
1274 */
1275static inline void list_splice(const struct list_head *list,
1276 struct list_head *head)
1277{
1278 if (!list_empty(list))
1279 __list_splice(list, head, head->next);
1280}
1281
1282/**
1283 * list_splice_tail - join two lists, each list being a queue
1284 * @list: the new list to add.
1285 * @head: the place to add it in the first list.
1286 */
1287static inline void list_splice_tail(struct list_head *list,
1288 struct list_head *head)
1289{
1290 if (!list_empty(list))
1291 __list_splice(list, head->prev, head);
1292}
1293
1294/**
1295 * list_splice_init - join two lists and reinitialise the emptied list.
1296 * @list: the new list to add.
1297 * @head: the place to add it in the first list.
1298 *
1299 * The list at @list is reinitialised
1300 */
1301static inline void list_splice_init(struct list_head *list,
1302 struct list_head *head)
1303{
1304 if (!list_empty(list)) {
1305 __list_splice(list, head, head->next);
1306 INIT_LIST_HEAD(list);
1307 }
1308}
1309
1310/**
1311 * list_splice_tail_init - join two lists and reinitialise the emptied list
1312 * @list: the new list to add.
1313 * @head: the place to add it in the first list.
1314 *
1315 * Each of the lists is a queue.
1316 * The list at @list is reinitialised
1317 */
1318static inline void list_splice_tail_init(struct list_head *list,
1319 struct list_head *head)
1320{
1321 if (!list_empty(list)) {
1322 __list_splice(list, head->prev, head);
1323 INIT_LIST_HEAD(list);
1324 }
1325}
1326
1327/**
1328 * list_entry - get the struct for this entry
1329 * @ptr: the &struct list_head pointer.
1330 * @type: the type of the struct this is embedded in.
1331 * @member: the name of the list_struct within the struct.
1332 */
1333#define list_entry(ptr, type, member) \
06f22bdb 1334 caa_container_of(ptr, type, member)
6d0ce021
PM
1335
1336/**
1337 * list_first_entry - get the first element from a list
1338 * @ptr: the list head to take the element from.
1339 * @type: the type of the struct this is embedded in.
1340 * @member: the name of the list_struct within the struct.
1341 *
1342 * Note, that list is expected to be not empty.
1343 */
1344#define list_first_entry(ptr, type, member) \
1345 list_entry((ptr)->next, type, member)
1346
1347/**
1348 * list_for_each - iterate over a list
1349 * @pos: the &struct list_head to use as a loop cursor.
1350 * @head: the head for your list.
1351 */
1352#define list_for_each(pos, head) \
1353 for (pos = (head)->next; prefetch(pos->next), pos != (head); \
1354 pos = pos->next)
1355
1356/**
1357 * __list_for_each - iterate over a list
1358 * @pos: the &struct list_head to use as a loop cursor.
1359 * @head: the head for your list.
1360 *
1361 * This variant differs from list_for_each() in that it's the
1362 * simplest possible list iteration code, no prefetching is done.
1363 * Use this for code that knows the list to be very short (empty
1364 * or 1 entry) most of the time.
1365 */
1366#define __list_for_each(pos, head) \
1367 for (pos = (head)->next; pos != (head); pos = pos->next)
1368
1369/**
1370 * list_for_each_prev - iterate over a list backwards
1371 * @pos: the &struct list_head to use as a loop cursor.
1372 * @head: the head for your list.
1373 */
1374#define list_for_each_prev(pos, head) \
1375 for (pos = (head)->prev; prefetch(pos->prev), pos != (head); \
1376 pos = pos->prev)
1377
1378/**
1379 * list_for_each_safe - iterate over a list safe against removal of list entry
1380 * @pos: the &struct list_head to use as a loop cursor.
1381 * @n: another &struct list_head to use as temporary storage
1382 * @head: the head for your list.
1383 */
1384#define list_for_each_safe(pos, n, head) \
1385 for (pos = (head)->next, n = pos->next; pos != (head); \
1386 pos = n, n = pos->next)
1387
1388/**
1389 * list_for_each_prev_safe - iterate over a list backwards safe against removal of list entry
1390 * @pos: the &struct list_head to use as a loop cursor.
1391 * @n: another &struct list_head to use as temporary storage
1392 * @head: the head for your list.
1393 */
1394#define list_for_each_prev_safe(pos, n, head) \
1395 for (pos = (head)->prev, n = pos->prev; \
1396 prefetch(pos->prev), pos != (head); \
1397 pos = n, n = pos->prev)
1398
1399/**
1400 * list_for_each_entry - iterate over list of given type
1401 * @pos: the type * to use as a loop cursor.
1402 * @head: the head for your list.
1403 * @member: the name of the list_struct within the struct.
1404 */
1405#define list_for_each_entry(pos, head, member) \
1406 for (pos = list_entry((head)->next, typeof(*pos), member); \
1407 prefetch(pos->member.next), &pos->member != (head); \
1408 pos = list_entry(pos->member.next, typeof(*pos), member))
1409
1410/**
1411 * list_for_each_entry_reverse - iterate backwards over list of given type.
1412 * @pos: the type * to use as a loop cursor.
1413 * @head: the head for your list.
1414 * @member: the name of the list_struct within the struct.
1415 */
1416#define list_for_each_entry_reverse(pos, head, member) \
1417 for (pos = list_entry((head)->prev, typeof(*pos), member); \
1418 prefetch(pos->member.prev), &pos->member != (head); \
1419 pos = list_entry(pos->member.prev, typeof(*pos), member))
1420
1421/**
1422 * list_prepare_entry - prepare a pos entry for use in list_for_each_entry_continue()
1423 * @pos: the type * to use as a start point
1424 * @head: the head of the list
1425 * @member: the name of the list_struct within the struct.
1426 *
1427 * Prepares a pos entry for use as a start point in list_for_each_entry_continue().
1428 */
1429#define list_prepare_entry(pos, head, member) \
1430 ((pos) ? : list_entry(head, typeof(*pos), member))
1431
1432/**
1433 * list_for_each_entry_continue - continue iteration over list of given type
1434 * @pos: the type * to use as a loop cursor.
1435 * @head: the head for your list.
1436 * @member: the name of the list_struct within the struct.
1437 *
1438 * Continue to iterate over list of given type, continuing after
1439 * the current position.
1440 */
1441#define list_for_each_entry_continue(pos, head, member) \
1442 for (pos = list_entry(pos->member.next, typeof(*pos), member); \
1443 prefetch(pos->member.next), &pos->member != (head); \
1444 pos = list_entry(pos->member.next, typeof(*pos), member))
1445
1446/**
1447 * list_for_each_entry_continue_reverse - iterate backwards from the given point
1448 * @pos: the type * to use as a loop cursor.
1449 * @head: the head for your list.
1450 * @member: the name of the list_struct within the struct.
1451 *
1452 * Start to iterate over list of given type backwards, continuing after
1453 * the current position.
1454 */
1455#define list_for_each_entry_continue_reverse(pos, head, member) \
1456 for (pos = list_entry(pos->member.prev, typeof(*pos), member); \
1457 prefetch(pos->member.prev), &pos->member != (head); \
1458 pos = list_entry(pos->member.prev, typeof(*pos), member))
1459
1460/**
1461 * list_for_each_entry_from - iterate over list of given type from the current point
1462 * @pos: the type * to use as a loop cursor.
1463 * @head: the head for your list.
1464 * @member: the name of the list_struct within the struct.
1465 *
1466 * Iterate over list of given type, continuing from current position.
1467 */
1468#define list_for_each_entry_from(pos, head, member) \
1469 for (; prefetch(pos->member.next), &pos->member != (head); \
1470 pos = list_entry(pos->member.next, typeof(*pos), member))
1471
1472/**
1473 * list_for_each_entry_safe - iterate over list of given type safe against removal of list entry
1474 * @pos: the type * to use as a loop cursor.
1475 * @n: another type * to use as temporary storage
1476 * @head: the head for your list.
1477 * @member: the name of the list_struct within the struct.
1478 */
1479#define list_for_each_entry_safe(pos, n, head, member) \
1480 for (pos = list_entry((head)->next, typeof(*pos), member), \
1481 n = list_entry(pos->member.next, typeof(*pos), member); \
1482 &pos->member != (head); \
1483 pos = n, n = list_entry(n->member.next, typeof(*n), member))
1484
1485/**
1486 * list_for_each_entry_safe_continue
1487 * @pos: the type * to use as a loop cursor.
1488 * @n: another type * to use as temporary storage
1489 * @head: the head for your list.
1490 * @member: the name of the list_struct within the struct.
1491 *
1492 * Iterate over list of given type, continuing after current point,
1493 * safe against removal of list entry.
1494 */
1495#define list_for_each_entry_safe_continue(pos, n, head, member) \
1496 for (pos = list_entry(pos->member.next, typeof(*pos), member), \
1497 n = list_entry(pos->member.next, typeof(*pos), member); \
1498 &pos->member != (head); \
1499 pos = n, n = list_entry(n->member.next, typeof(*n), member))
1500
1501/**
1502 * list_for_each_entry_safe_from
1503 * @pos: the type * to use as a loop cursor.
1504 * @n: another type * to use as temporary storage
1505 * @head: the head for your list.
1506 * @member: the name of the list_struct within the struct.
1507 *
1508 * Iterate over list of given type from current point, safe against
1509 * removal of list entry.
1510 */
1511#define list_for_each_entry_safe_from(pos, n, head, member) \
1512 for (n = list_entry(pos->member.next, typeof(*pos), member); \
1513 &pos->member != (head); \
1514 pos = n, n = list_entry(n->member.next, typeof(*n), member))
1515
1516/**
1517 * list_for_each_entry_safe_reverse
1518 * @pos: the type * to use as a loop cursor.
1519 * @n: another type * to use as temporary storage
1520 * @head: the head for your list.
1521 * @member: the name of the list_struct within the struct.
1522 *
1523 * Iterate backwards over list of given type, safe against removal
1524 * of list entry.
1525 */
1526#define list_for_each_entry_safe_reverse(pos, n, head, member) \
1527 for (pos = list_entry((head)->prev, typeof(*pos), member), \
1528 n = list_entry(pos->member.prev, typeof(*pos), member); \
1529 &pos->member != (head); \
1530 pos = n, n = list_entry(n->member.prev, typeof(*n), member))
1531
63ff4873
MD
1532#endif //0
1533
6d0ce021
PM
1534/*
1535 * Double linked lists with a single pointer list head.
1536 * Mostly useful for hash tables where the two pointer list head is
1537 * too wasteful.
1538 * You lose the ability to access the tail in O(1).
1539 */
1540
1541struct hlist_head {
1542 struct hlist_node *first;
1543};
1544
1545struct hlist_node {
1546 struct hlist_node *next, **pprev;
1547};
1548
1549#define HLIST_HEAD_INIT { .first = NULL }
1550#define HLIST_HEAD(name) struct hlist_head name = { .first = NULL }
1551#define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL)
1552static inline void INIT_HLIST_NODE(struct hlist_node *h)
1553{
1554 h->next = NULL;
1555 h->pprev = NULL;
1556}
1557
1558static inline int hlist_unhashed(const struct hlist_node *h)
1559{
1560 return !h->pprev;
1561}
1562
1563static inline int hlist_empty(const struct hlist_head *h)
1564{
1565 return !h->first;
1566}
1567
1568static inline void __hlist_del(struct hlist_node *n)
1569{
1570 struct hlist_node *next = n->next;
1571 struct hlist_node **pprev = n->pprev;
1572 *pprev = next;
1573 if (next)
1574 next->pprev = pprev;
1575}
1576
1577static inline void hlist_del(struct hlist_node *n)
1578{
1579 __hlist_del(n);
1580 n->next = LIST_POISON1;
1581 n->pprev = LIST_POISON2;
1582}
1583
1584static inline void hlist_del_init(struct hlist_node *n)
1585{
1586 if (!hlist_unhashed(n)) {
1587 __hlist_del(n);
1588 INIT_HLIST_NODE(n);
1589 }
1590}
1591
1592static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h)
1593{
1594 struct hlist_node *first = h->first;
1595 n->next = first;
1596 if (first)
1597 first->pprev = &n->next;
1598 h->first = n;
1599 n->pprev = &h->first;
1600}
1601
1602/* next must be != NULL */
1603static inline void hlist_add_before(struct hlist_node *n,
1604 struct hlist_node *next)
1605{
1606 n->pprev = next->pprev;
1607 n->next = next;
1608 next->pprev = &n->next;
1609 *(n->pprev) = n;
1610}
1611
1612static inline void hlist_add_after(struct hlist_node *n,
1613 struct hlist_node *next)
1614{
1615 next->next = n->next;
1616 n->next = next;
1617 next->pprev = &n->next;
1618
1619 if(next->next)
1620 next->next->pprev = &next->next;
1621}
1622
1623/*
1624 * Move a list from one list head to another. Fixup the pprev
1625 * reference of the first entry if it exists.
1626 */
1627static inline void hlist_move_list(struct hlist_head *old,
1628 struct hlist_head *new)
1629{
1630 new->first = old->first;
1631 if (new->first)
1632 new->first->pprev = &new->first;
1633 old->first = NULL;
1634}
1635
06f22bdb 1636#define hlist_entry(ptr, type, member) caa_container_of(ptr,type,member)
6d0ce021
PM
1637
1638#define hlist_for_each(pos, head) \
1639 for (pos = (head)->first; pos && ({ prefetch(pos->next); 1; }); \
1640 pos = pos->next)
1641
1642#define hlist_for_each_safe(pos, n, head) \
1643 for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \
1644 pos = n)
1645
1646/**
1647 * hlist_for_each_entry - iterate over list of given type
1648 * @tpos: the type * to use as a loop cursor.
1649 * @pos: the &struct hlist_node to use as a loop cursor.
1650 * @head: the head for your list.
1651 * @member: the name of the hlist_node within the struct.
1652 */
1653#define hlist_for_each_entry(tpos, pos, head, member) \
1654 for (pos = (head)->first; \
1655 pos && ({ prefetch(pos->next); 1;}) && \
1656 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
1657 pos = pos->next)
1658
1659/**
1660 * hlist_for_each_entry_continue - iterate over a hlist continuing after current point
1661 * @tpos: the type * to use as a loop cursor.
1662 * @pos: the &struct hlist_node to use as a loop cursor.
1663 * @member: the name of the hlist_node within the struct.
1664 */
1665#define hlist_for_each_entry_continue(tpos, pos, member) \
1666 for (pos = (pos)->next; \
1667 pos && ({ prefetch(pos->next); 1;}) && \
1668 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
1669 pos = pos->next)
1670
1671/**
1672 * hlist_for_each_entry_from - iterate over a hlist continuing from current point
1673 * @tpos: the type * to use as a loop cursor.
1674 * @pos: the &struct hlist_node to use as a loop cursor.
1675 * @member: the name of the hlist_node within the struct.
1676 */
1677#define hlist_for_each_entry_from(tpos, pos, member) \
1678 for (; pos && ({ prefetch(pos->next); 1;}) && \
1679 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
1680 pos = pos->next)
1681
1682/**
1683 * hlist_for_each_entry_safe - iterate over list of given type safe against removal of list entry
1684 * @tpos: the type * to use as a loop cursor.
1685 * @pos: the &struct hlist_node to use as a loop cursor.
1686 * @n: another &struct hlist_node to use as temporary storage
1687 * @head: the head for your list.
1688 * @member: the name of the hlist_node within the struct.
1689 */
1690#define hlist_for_each_entry_safe(tpos, pos, n, head, member) \
1691 for (pos = (head)->first; \
1692 pos && ({ n = pos->next; 1; }) && \
1693 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
1694 pos = n)
1695
1696#endif
1a43bbd8
MD
1697
1698#endif
This page took 0.083308 seconds and 4 git commands to generate.