test_atomic: test for byte/short atomic support
[urcu.git] / tests / api_ppc.h
1 /* MECHANICALLY GENERATED, DO NOT EDIT!!! */
2
3 #define _INCLUDE_API_H
4
5 /*
6 * common.h: Common Linux kernel-isms.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; but version 2 of the License only due
11 * to code included from the Linux kernel.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 *
22 * Copyright (c) 2006 Paul E. McKenney, IBM.
23 *
24 * Much code taken from the Linux kernel. For such code, the option
25 * to redistribute under later versions of GPL might not be available.
26 */
27
28 #ifndef __always_inline
29 #define __always_inline inline
30 #endif
31
32 #define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)]))
33 #define BUILD_BUG_ON_ZERO(e) (sizeof(char[1 - 2 * !!(e)]) - 1)
34
35 #ifdef __ASSEMBLY__
36 # define stringify_in_c(...) __VA_ARGS__
37 # define ASM_CONST(x) x
38 #else
39 /* This version of stringify will deal with commas... */
40 # define __stringify_in_c(...) #__VA_ARGS__
41 # define stringify_in_c(...) __stringify_in_c(__VA_ARGS__) " "
42 # define __ASM_CONST(x) x##UL
43 # define ASM_CONST(x) __ASM_CONST(x)
44 #endif
45
46
47 /*
48 * arch-ppc64.h: Expose PowerPC atomic instructions.
49 *
50 * This program is free software; you can redistribute it and/or modify
51 * it under the terms of the GNU General Public License as published by
52 * the Free Software Foundation; but version 2 of the License only due
53 * to code included from the Linux kernel.
54 *
55 * This program is distributed in the hope that it will be useful,
56 * but WITHOUT ANY WARRANTY; without even the implied warranty of
57 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
58 * GNU General Public License for more details.
59 *
60 * You should have received a copy of the GNU General Public License
61 * along with this program; if not, write to the Free Software
62 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
63 *
64 * Copyright (c) 2006 Paul E. McKenney, IBM.
65 *
66 * Much code taken from the Linux kernel. For such code, the option
67 * to redistribute under later versions of GPL might not be available.
68 */
69
70 /*
71 * Machine parameters.
72 */
73
74 #define CONFIG_PPC64
75
76 #define CACHE_LINE_SIZE 128
77 #define ____cacheline_internodealigned_in_smp \
78 __attribute__((__aligned__(1 << 7)))
79
80 /*
81 * Atomic data structure, initialization, and access.
82 */
83
84 typedef struct { volatile int counter; } atomic_t;
85
86 #define ATOMIC_INIT(i) { (i) }
87
88 #define atomic_read(v) ((v)->counter)
89 #define atomic_set(v, i) (((v)->counter) = (i))
90
91 /*
92 * Atomic operations.
93 */
94
95 #define LWSYNC lwsync
96 #define PPC405_ERR77(ra,rb)
97 #ifdef CONFIG_SMP
98 # define LWSYNC_ON_SMP stringify_in_c(LWSYNC) "\n"
99 # define ISYNC_ON_SMP "\n\tisync\n"
100 #else
101 # define LWSYNC_ON_SMP
102 # define ISYNC_ON_SMP
103 #endif
104
105
106 /*
107 * Atomic exchange
108 *
109 * Changes the memory location '*ptr' to be val and returns
110 * the previous value stored there.
111 */
112 static __always_inline unsigned long
113 __xchg_u32(volatile void *p, unsigned long val)
114 {
115 unsigned long prev;
116
117 __asm__ __volatile__(
118 LWSYNC_ON_SMP
119 "1: lwarx %0,0,%2 \n"
120 PPC405_ERR77(0,%2)
121 " stwcx. %3,0,%2 \n\
122 bne- 1b"
123 ISYNC_ON_SMP
124 : "=&r" (prev), "+m" (*(volatile unsigned int *)p)
125 : "r" (p), "r" (val)
126 : "cc", "memory");
127
128 return prev;
129 }
130
131 /*
132 * Atomic exchange
133 *
134 * Changes the memory location '*ptr' to be val and returns
135 * the previous value stored there.
136 */
137 static __always_inline unsigned long
138 __xchg_u32_local(volatile void *p, unsigned long val)
139 {
140 unsigned long prev;
141
142 __asm__ __volatile__(
143 "1: lwarx %0,0,%2 \n"
144 PPC405_ERR77(0,%2)
145 " stwcx. %3,0,%2 \n\
146 bne- 1b"
147 : "=&r" (prev), "+m" (*(volatile unsigned int *)p)
148 : "r" (p), "r" (val)
149 : "cc", "memory");
150
151 return prev;
152 }
153
154 #ifdef CONFIG_PPC64
155 static __always_inline unsigned long
156 __xchg_u64(volatile void *p, unsigned long val)
157 {
158 unsigned long prev;
159
160 __asm__ __volatile__(
161 LWSYNC_ON_SMP
162 "1: ldarx %0,0,%2 \n"
163 PPC405_ERR77(0,%2)
164 " stdcx. %3,0,%2 \n\
165 bne- 1b"
166 ISYNC_ON_SMP
167 : "=&r" (prev), "+m" (*(volatile unsigned long *)p)
168 : "r" (p), "r" (val)
169 : "cc", "memory");
170
171 return prev;
172 }
173
174 static __always_inline unsigned long
175 __xchg_u64_local(volatile void *p, unsigned long val)
176 {
177 unsigned long prev;
178
179 __asm__ __volatile__(
180 "1: ldarx %0,0,%2 \n"
181 PPC405_ERR77(0,%2)
182 " stdcx. %3,0,%2 \n\
183 bne- 1b"
184 : "=&r" (prev), "+m" (*(volatile unsigned long *)p)
185 : "r" (p), "r" (val)
186 : "cc", "memory");
187
188 return prev;
189 }
190 #endif
191
192 /*
193 * This function doesn't exist, so you'll get a linker error
194 * if something tries to do an invalid xchg().
195 */
196 extern void __xchg_called_with_bad_pointer(void);
197
198 static __always_inline unsigned long
199 __xchg(volatile void *ptr, unsigned long x, unsigned int size)
200 {
201 switch (size) {
202 case 4:
203 return __xchg_u32(ptr, x);
204 #ifdef CONFIG_PPC64
205 case 8:
206 return __xchg_u64(ptr, x);
207 #endif
208 }
209 __xchg_called_with_bad_pointer();
210 return x;
211 }
212
213 static __always_inline unsigned long
214 __xchg_local(volatile void *ptr, unsigned long x, unsigned int size)
215 {
216 switch (size) {
217 case 4:
218 return __xchg_u32_local(ptr, x);
219 #ifdef CONFIG_PPC64
220 case 8:
221 return __xchg_u64_local(ptr, x);
222 #endif
223 }
224 __xchg_called_with_bad_pointer();
225 return x;
226 }
227 #define xchg(ptr,x) \
228 ({ \
229 __typeof__(*(ptr)) _x_ = (x); \
230 (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \
231 })
232
233 #define xchg_local(ptr,x) \
234 ({ \
235 __typeof__(*(ptr)) _x_ = (x); \
236 (__typeof__(*(ptr))) __xchg_local((ptr), \
237 (unsigned long)_x_, sizeof(*(ptr))); \
238 })
239
240 /*
241 * Compare and exchange - if *p == old, set it to new,
242 * and return the old value of *p.
243 */
244 #define __HAVE_ARCH_CMPXCHG 1
245
246 static __always_inline unsigned long
247 __cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new)
248 {
249 unsigned int prev;
250
251 __asm__ __volatile__ (
252 LWSYNC_ON_SMP
253 "1: lwarx %0,0,%2 # __cmpxchg_u32\n\
254 cmpw 0,%0,%3\n\
255 bne- 2f\n"
256 PPC405_ERR77(0,%2)
257 " stwcx. %4,0,%2\n\
258 bne- 1b"
259 ISYNC_ON_SMP
260 "\n\
261 2:"
262 : "=&r" (prev), "+m" (*p)
263 : "r" (p), "r" (old), "r" (new)
264 : "cc", "memory");
265
266 return prev;
267 }
268
269 static __always_inline unsigned long
270 __cmpxchg_u32_local(volatile unsigned int *p, unsigned long old,
271 unsigned long new)
272 {
273 unsigned int prev;
274
275 __asm__ __volatile__ (
276 "1: lwarx %0,0,%2 # __cmpxchg_u32\n\
277 cmpw 0,%0,%3\n\
278 bne- 2f\n"
279 PPC405_ERR77(0,%2)
280 " stwcx. %4,0,%2\n\
281 bne- 1b"
282 "\n\
283 2:"
284 : "=&r" (prev), "+m" (*p)
285 : "r" (p), "r" (old), "r" (new)
286 : "cc", "memory");
287
288 return prev;
289 }
290
291 #ifdef CONFIG_PPC64
292 static __always_inline unsigned long
293 __cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new)
294 {
295 unsigned long prev;
296
297 __asm__ __volatile__ (
298 LWSYNC_ON_SMP
299 "1: ldarx %0,0,%2 # __cmpxchg_u64\n\
300 cmpd 0,%0,%3\n\
301 bne- 2f\n\
302 stdcx. %4,0,%2\n\
303 bne- 1b"
304 ISYNC_ON_SMP
305 "\n\
306 2:"
307 : "=&r" (prev), "+m" (*p)
308 : "r" (p), "r" (old), "r" (new)
309 : "cc", "memory");
310
311 return prev;
312 }
313
314 static __always_inline unsigned long
315 __cmpxchg_u64_local(volatile unsigned long *p, unsigned long old,
316 unsigned long new)
317 {
318 unsigned long prev;
319
320 __asm__ __volatile__ (
321 "1: ldarx %0,0,%2 # __cmpxchg_u64\n\
322 cmpd 0,%0,%3\n\
323 bne- 2f\n\
324 stdcx. %4,0,%2\n\
325 bne- 1b"
326 "\n\
327 2:"
328 : "=&r" (prev), "+m" (*p)
329 : "r" (p), "r" (old), "r" (new)
330 : "cc", "memory");
331
332 return prev;
333 }
334 #endif
335
336 /* This function doesn't exist, so you'll get a linker error
337 if something tries to do an invalid cmpxchg(). */
338 extern void __cmpxchg_called_with_bad_pointer(void);
339
340 static __always_inline unsigned long
341 __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new,
342 unsigned int size)
343 {
344 switch (size) {
345 case 4:
346 return __cmpxchg_u32(ptr, old, new);
347 #ifdef CONFIG_PPC64
348 case 8:
349 return __cmpxchg_u64(ptr, old, new);
350 #endif
351 }
352 __cmpxchg_called_with_bad_pointer();
353 return old;
354 }
355
356 static __always_inline unsigned long
357 __cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new,
358 unsigned int size)
359 {
360 switch (size) {
361 case 4:
362 return __cmpxchg_u32_local(ptr, old, new);
363 #ifdef CONFIG_PPC64
364 case 8:
365 return __cmpxchg_u64_local(ptr, old, new);
366 #endif
367 }
368 __cmpxchg_called_with_bad_pointer();
369 return old;
370 }
371
372 #define cmpxchg(ptr, o, n) \
373 ({ \
374 __typeof__(*(ptr)) _o_ = (o); \
375 __typeof__(*(ptr)) _n_ = (n); \
376 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
377 (unsigned long)_n_, sizeof(*(ptr))); \
378 })
379
380
381 #define cmpxchg_local(ptr, o, n) \
382 ({ \
383 __typeof__(*(ptr)) _o_ = (o); \
384 __typeof__(*(ptr)) _n_ = (n); \
385 (__typeof__(*(ptr))) __cmpxchg_local((ptr), (unsigned long)_o_, \
386 (unsigned long)_n_, sizeof(*(ptr))); \
387 })
388
389 #ifdef CONFIG_PPC64
390 /*
391 * We handle most unaligned accesses in hardware. On the other hand
392 * unaligned DMA can be very expensive on some ppc64 IO chips (it does
393 * powers of 2 writes until it reaches sufficient alignment).
394 *
395 * Based on this we disable the IP header alignment in network drivers.
396 * We also modify NET_SKB_PAD to be a cacheline in size, thus maintaining
397 * cacheline alignment of buffers.
398 */
399 #define NET_IP_ALIGN 0
400 #define NET_SKB_PAD L1_CACHE_BYTES
401
402 #define cmpxchg64(ptr, o, n) \
403 ({ \
404 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
405 cmpxchg((ptr), (o), (n)); \
406 })
407 #define cmpxchg64_local(ptr, o, n) \
408 ({ \
409 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
410 cmpxchg_local((ptr), (o), (n)); \
411 })
412 #endif
413
414 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
415 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
416
417 /**
418 * atomic_add - add integer to atomic variable
419 * @i: integer value to add
420 * @v: pointer of type atomic_t
421 *
422 * Atomically adds @a to @v.
423 */
424 static __inline__ void atomic_add(int a, atomic_t *v)
425 {
426 int t;
427
428 __asm__ __volatile__(
429 "1: lwarx %0,0,%3 # atomic_add\n\
430 add %0,%2,%0 \n\
431 stwcx. %0,0,%3 \n\
432 bne- 1b"
433 : "=&r" (t), "+m" (v->counter)
434 : "r" (a), "r" (&v->counter)
435 : "cc");
436 }
437
438 /**
439 * atomic_sub - subtract the atomic variable
440 * @i: integer value to subtract
441 * @v: pointer of type atomic_t
442 *
443 * Atomically subtracts @a from @v.
444 */
445 static __inline__ void atomic_sub(int a, atomic_t *v)
446 {
447 int t;
448
449 __asm__ __volatile__(
450 "1: lwarx %0,0,%3 # atomic_sub \n\
451 subf %0,%2,%0 \n\
452 stwcx. %0,0,%3 \n\
453 bne- 1b"
454 : "=&r" (t), "+m" (v->counter)
455 : "r" (a), "r" (&v->counter)
456 : "cc");
457 }
458
459 static __inline__ atomic_sub_return(int a, atomic_t *v)
460 {
461 int t;
462
463 __asm__ __volatile__(
464 "lwsync\n\
465 1: lwarx %0,0,%2 # atomic_sub_return\n\
466 subf %0,%1,%0\n\
467 stwcx. %0,0,%2 \n\
468 bne- 1b \n\
469 isync"
470 : "=&r" (t)
471 : "r" (a), "r" (&v->counter)
472 : "cc", "memory");
473
474 return t;
475 }
476
477 /**
478 * atomic_sub_and_test - subtract value from variable and test result
479 * @i: integer value to subtract
480 * @v: pointer of type atomic_t
481 *
482 * Atomically subtracts @i from @v and returns
483 * true if the result is zero, or false for all
484 * other cases.
485 */
486 static __inline__ int atomic_sub_and_test(int a, atomic_t *v)
487 {
488 return atomic_sub_return(a, v) == 0;
489 }
490
491 /**
492 * atomic_inc - increment atomic variable
493 * @v: pointer of type atomic_t
494 *
495 * Atomically increments @v by 1.
496 */
497 static __inline__ void atomic_inc(atomic_t *v)
498 {
499 atomic_add(1, v);
500 }
501
502 /**
503 * atomic_dec - decrement atomic variable
504 * @v: pointer of type atomic_t
505 *
506 * Atomically decrements @v by 1.
507 */
508 static __inline__ void atomic_dec(atomic_t *v)
509 {
510 atomic_sub(1, v);
511 }
512
513 /**
514 * atomic_dec_and_test - decrement and test
515 * @v: pointer of type atomic_t
516 *
517 * Atomically decrements @v by 1 and
518 * returns true if the result is 0, or false for all other
519 * cases.
520 */
521 static __inline__ int atomic_dec_and_test(atomic_t *v)
522 {
523 return atomic_sub_and_test(1, v);
524 }
525
526 /**
527 * atomic_inc_and_test - increment and test
528 * @v: pointer of type atomic_t
529 *
530 * Atomically increments @v by 1
531 * and returns true if the result is zero, or false for all
532 * other cases.
533 */
534 static __inline__ int atomic_inc_and_test(atomic_t *v)
535 {
536 return atomic_inc_return(v);
537 }
538
539 /**
540 * atomic_add_return - add and return
541 * @v: pointer of type atomic_t
542 * @i: integer value to add
543 *
544 * Atomically adds @i to @v and returns @i + @v
545 */
546 static __inline__ int atomic_add_return(int a, atomic_t *v)
547 {
548 int t;
549
550 __asm__ __volatile__(
551 "lwsync \n\
552 1: lwarx %0,0,%2 # atomic_add_return \n\
553 add %0,%1,%0 \n\
554 stwcx. %0,0,%2 \n\
555 bne- 1b \n\
556 isync"
557 : "=&r" (t)
558 : "r" (a), "r" (&v->counter)
559 : "cc", "memory");
560
561 return t;
562 }
563
564 /**
565 * atomic_add_negative - add and test if negative
566 * @v: pointer of type atomic_t
567 * @i: integer value to add
568 *
569 * Atomically adds @i to @v and returns true
570 * if the result is negative, or false when
571 * result is greater than or equal to zero.
572 */
573 static __inline__ int atomic_add_negative(int a, atomic_t *v)
574 {
575 return atomic_add_return(a, v) < 0;
576 }
577
578 /**
579 * atomic_add_unless - add unless the number is a given value
580 * @v: pointer of type atomic_t
581 * @a: the amount to add to v...
582 * @u: ...unless v is equal to u.
583 *
584 * Atomically adds @a to @v, so long as it was not @u.
585 * Returns non-zero if @v was not @u, and zero otherwise.
586 */
587 static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
588 {
589 int t;
590
591 __asm__ __volatile__(
592 "lwsync \n\
593 1: lwarx %0,0,%1 # atomic_add_unless\n\
594 cmpd 0,%0,%3 \n\
595 beq- 2f \n\
596 add %0,%2,%0 \n\
597 stwcx. %0,0,%1 \n\
598 bne- 1b \n\
599 isync \n\
600 subf %0,%2,%0 \n\
601 2:"
602 : "=&r" (t)
603 : "r" (&v->counter), "r" (a), "r" (u)
604 : "cc", "memory");
605
606 return t != u;
607 }
608
609 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
610
611 #define atomic_inc_return(v) (atomic_add_return(1,v))
612 #define atomic_dec_return(v) (atomic_sub_return(1,v))
613
614 /* Atomic operations are already serializing on x86 */
615 #define smp_mb__before_atomic_dec() smp_mb()
616 #define smp_mb__after_atomic_dec() smp_mb()
617 #define smp_mb__before_atomic_inc() smp_mb()
618 #define smp_mb__after_atomic_inc() smp_mb()
619
620 /*
621 * api_pthreads.h: API mapping to pthreads environment.
622 *
623 * This program is free software; you can redistribute it and/or modify
624 * it under the terms of the GNU General Public License as published by
625 * the Free Software Foundation; either version 2 of the License, or
626 * (at your option) any later version. However, please note that much
627 * of the code in this file derives from the Linux kernel, and that such
628 * code may not be available except under GPLv2.
629 *
630 * This program is distributed in the hope that it will be useful,
631 * but WITHOUT ANY WARRANTY; without even the implied warranty of
632 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
633 * GNU General Public License for more details.
634 *
635 * You should have received a copy of the GNU General Public License
636 * along with this program; if not, write to the Free Software
637 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
638 *
639 * Copyright (c) 2006 Paul E. McKenney, IBM.
640 */
641
642 #include <stdio.h>
643 #include <stdlib.h>
644 #include <errno.h>
645 #include <limits.h>
646 #include <sys/types.h>
647 #define __USE_GNU
648 #include <pthread.h>
649 #include <sched.h>
650 #include <sys/param.h>
651 /* #include "atomic.h" */
652
653 /*
654 * Compiler magic.
655 */
656 #define container_of(ptr, type, member) ({ \
657 const typeof( ((type *)0)->member ) *__mptr = (ptr); \
658 (type *)( (char *)__mptr - offsetof(type,member) );})
659
660 /*
661 * Default machine parameters.
662 */
663
664 #ifndef CACHE_LINE_SIZE
665 #define CACHE_LINE_SIZE 128
666 #endif /* #ifndef CACHE_LINE_SIZE */
667
668 /*
669 * Exclusive locking primitives.
670 */
671
672 typedef pthread_mutex_t spinlock_t;
673
674 #define DEFINE_SPINLOCK(lock) spinlock_t lock = PTHREAD_MUTEX_INITIALIZER;
675 #define __SPIN_LOCK_UNLOCKED(lockp) PTHREAD_MUTEX_INITIALIZER
676
677 static void spin_lock_init(spinlock_t *sp)
678 {
679 if (pthread_mutex_init(sp, NULL) != 0) {
680 perror("spin_lock_init:pthread_mutex_init");
681 exit(-1);
682 }
683 }
684
685 static void spin_lock(spinlock_t *sp)
686 {
687 if (pthread_mutex_lock(sp) != 0) {
688 perror("spin_lock:pthread_mutex_lock");
689 exit(-1);
690 }
691 }
692
693 static void spin_unlock(spinlock_t *sp)
694 {
695 if (pthread_mutex_unlock(sp) != 0) {
696 perror("spin_unlock:pthread_mutex_unlock");
697 exit(-1);
698 }
699 }
700
701 #define spin_lock_irqsave(l, f) do { f = 1; spin_lock(l); } while (0)
702 #define spin_unlock_irqrestore(l, f) do { f = 0; spin_unlock(l); } while (0)
703
704 /*
705 * Thread creation/destruction primitives.
706 */
707
708 typedef pthread_t thread_id_t;
709
710 #define NR_THREADS 128
711
712 #define __THREAD_ID_MAP_EMPTY 0
713 #define __THREAD_ID_MAP_WAITING 1
714 thread_id_t __thread_id_map[NR_THREADS];
715 spinlock_t __thread_id_map_mutex;
716
717 #define for_each_thread(t) \
718 for (t = 0; t < NR_THREADS; t++)
719
720 #define for_each_running_thread(t) \
721 for (t = 0; t < NR_THREADS; t++) \
722 if ((__thread_id_map[t] != __THREAD_ID_MAP_EMPTY) && \
723 (__thread_id_map[t] != __THREAD_ID_MAP_WAITING))
724
725 #define for_each_tid(t, tid) \
726 for (t = 0; t < NR_THREADS; t++) \
727 if ((((tid) = __thread_id_map[t]) != __THREAD_ID_MAP_EMPTY) && \
728 ((tid) != __THREAD_ID_MAP_WAITING))
729
730 pthread_key_t thread_id_key;
731
732 static int __smp_thread_id(void)
733 {
734 int i;
735 thread_id_t tid = pthread_self();
736
737 for (i = 0; i < NR_THREADS; i++) {
738 if (__thread_id_map[i] == tid) {
739 long v = i + 1; /* must be non-NULL. */
740
741 if (pthread_setspecific(thread_id_key, (void *)v) != 0) {
742 perror("pthread_setspecific");
743 exit(-1);
744 }
745 return i;
746 }
747 }
748 spin_lock(&__thread_id_map_mutex);
749 for (i = 0; i < NR_THREADS; i++) {
750 if (__thread_id_map[i] == tid)
751 spin_unlock(&__thread_id_map_mutex);
752 return i;
753 }
754 spin_unlock(&__thread_id_map_mutex);
755 fprintf(stderr, "smp_thread_id: Rogue thread, id: %d(%#x)\n",
756 (int)tid, (int)tid);
757 exit(-1);
758 }
759
760 static int smp_thread_id(void)
761 {
762 void *id;
763
764 id = pthread_getspecific(thread_id_key);
765 if (id == NULL)
766 return __smp_thread_id();
767 return (long)(id - 1);
768 }
769
770 static thread_id_t create_thread(void *(*func)(void *), void *arg)
771 {
772 thread_id_t tid;
773 int i;
774
775 spin_lock(&__thread_id_map_mutex);
776 for (i = 0; i < NR_THREADS; i++) {
777 if (__thread_id_map[i] == __THREAD_ID_MAP_EMPTY)
778 break;
779 }
780 if (i >= NR_THREADS) {
781 spin_unlock(&__thread_id_map_mutex);
782 fprintf(stderr, "Thread limit of %d exceeded!\n", NR_THREADS);
783 exit(-1);
784 }
785 __thread_id_map[i] = __THREAD_ID_MAP_WAITING;
786 spin_unlock(&__thread_id_map_mutex);
787 if (pthread_create(&tid, NULL, func, arg) != 0) {
788 perror("create_thread:pthread_create");
789 exit(-1);
790 }
791 __thread_id_map[i] = tid;
792 return tid;
793 }
794
795 static void *wait_thread(thread_id_t tid)
796 {
797 int i;
798 void *vp;
799
800 for (i = 0; i < NR_THREADS; i++) {
801 if (__thread_id_map[i] == tid)
802 break;
803 }
804 if (i >= NR_THREADS){
805 fprintf(stderr, "wait_thread: bad tid = %d(%#x)\n",
806 (int)tid, (int)tid);
807 exit(-1);
808 }
809 if (pthread_join(tid, &vp) != 0) {
810 perror("wait_thread:pthread_join");
811 exit(-1);
812 }
813 __thread_id_map[i] = __THREAD_ID_MAP_EMPTY;
814 return vp;
815 }
816
817 static void wait_all_threads(void)
818 {
819 int i;
820 thread_id_t tid;
821
822 for (i = 1; i < NR_THREADS; i++) {
823 tid = __thread_id_map[i];
824 if (tid != __THREAD_ID_MAP_EMPTY &&
825 tid != __THREAD_ID_MAP_WAITING)
826 (void)wait_thread(tid);
827 }
828 }
829
830 static void run_on(int cpu)
831 {
832 cpu_set_t mask;
833
834 CPU_ZERO(&mask);
835 CPU_SET(cpu, &mask);
836 sched_setaffinity(0, sizeof(mask), &mask);
837 }
838
839 /*
840 * timekeeping -- very crude -- should use MONOTONIC...
841 */
842
843 long long get_microseconds(void)
844 {
845 struct timeval tv;
846
847 if (gettimeofday(&tv, NULL) != 0)
848 abort();
849 return ((long long)tv.tv_sec) * 1000000LL + (long long)tv.tv_usec;
850 }
851
852 /*
853 * Per-thread variables.
854 */
855
856 #define DEFINE_PER_THREAD(type, name) \
857 struct { \
858 __typeof__(type) v \
859 __attribute__((__aligned__(CACHE_LINE_SIZE))); \
860 } __per_thread_##name[NR_THREADS];
861 #define DECLARE_PER_THREAD(type, name) extern DEFINE_PER_THREAD(type, name)
862
863 #define per_thread(name, thread) __per_thread_##name[thread].v
864 #define __get_thread_var(name) per_thread(name, smp_thread_id())
865
866 #define init_per_thread(name, v) \
867 do { \
868 int __i_p_t_i; \
869 for (__i_p_t_i = 0; __i_p_t_i < NR_THREADS; __i_p_t_i++) \
870 per_thread(name, __i_p_t_i) = v; \
871 } while (0)
872
873 /*
874 * CPU traversal primitives.
875 */
876
877 #ifndef NR_CPUS
878 #define NR_CPUS 16
879 #endif /* #ifndef NR_CPUS */
880
881 #define for_each_possible_cpu(cpu) \
882 for (cpu = 0; cpu < NR_CPUS; cpu++)
883 #define for_each_online_cpu(cpu) \
884 for (cpu = 0; cpu < NR_CPUS; cpu++)
885
886 /*
887 * Per-CPU variables.
888 */
889
890 #define DEFINE_PER_CPU(type, name) \
891 struct { \
892 __typeof__(type) v \
893 __attribute__((__aligned__(CACHE_LINE_SIZE))); \
894 } __per_cpu_##name[NR_CPUS]
895 #define DECLARE_PER_CPU(type, name) extern DEFINE_PER_CPU(type, name)
896
897 DEFINE_PER_THREAD(int, smp_processor_id);
898
899 #define per_cpu(name, thread) __per_cpu_##name[thread].v
900 #define __get_cpu_var(name) per_cpu(name, smp_processor_id())
901
902 #define init_per_cpu(name, v) \
903 do { \
904 int __i_p_c_i; \
905 for (__i_p_c_i = 0; __i_p_c_i < NR_CPUS; __i_p_c_i++) \
906 per_cpu(name, __i_p_c_i) = v; \
907 } while (0)
908
909 /*
910 * CPU state checking (crowbarred).
911 */
912
913 #define idle_cpu(cpu) 0
914 #define in_softirq() 1
915 #define hardirq_count() 0
916 #define PREEMPT_SHIFT 0
917 #define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS)
918 #define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS)
919 #define PREEMPT_BITS 8
920 #define SOFTIRQ_BITS 8
921
922 /*
923 * CPU hotplug.
924 */
925
926 struct notifier_block {
927 int (*notifier_call)(struct notifier_block *, unsigned long, void *);
928 struct notifier_block *next;
929 int priority;
930 };
931
932 #define CPU_ONLINE 0x0002 /* CPU (unsigned)v is up */
933 #define CPU_UP_PREPARE 0x0003 /* CPU (unsigned)v coming up */
934 #define CPU_UP_CANCELED 0x0004 /* CPU (unsigned)v NOT coming up */
935 #define CPU_DOWN_PREPARE 0x0005 /* CPU (unsigned)v going down */
936 #define CPU_DOWN_FAILED 0x0006 /* CPU (unsigned)v NOT going down */
937 #define CPU_DEAD 0x0007 /* CPU (unsigned)v dead */
938 #define CPU_DYING 0x0008 /* CPU (unsigned)v not running any task,
939 * not handling interrupts, soon dead */
940 #define CPU_POST_DEAD 0x0009 /* CPU (unsigned)v dead, cpu_hotplug
941 * lock is dropped */
942
943 /* Used for CPU hotplug events occuring while tasks are frozen due to a suspend
944 * operation in progress
945 */
946 #define CPU_TASKS_FROZEN 0x0010
947
948 #define CPU_ONLINE_FROZEN (CPU_ONLINE | CPU_TASKS_FROZEN)
949 #define CPU_UP_PREPARE_FROZEN (CPU_UP_PREPARE | CPU_TASKS_FROZEN)
950 #define CPU_UP_CANCELED_FROZEN (CPU_UP_CANCELED | CPU_TASKS_FROZEN)
951 #define CPU_DOWN_PREPARE_FROZEN (CPU_DOWN_PREPARE | CPU_TASKS_FROZEN)
952 #define CPU_DOWN_FAILED_FROZEN (CPU_DOWN_FAILED | CPU_TASKS_FROZEN)
953 #define CPU_DEAD_FROZEN (CPU_DEAD | CPU_TASKS_FROZEN)
954 #define CPU_DYING_FROZEN (CPU_DYING | CPU_TASKS_FROZEN)
955
956 /* Hibernation and suspend events */
957 #define PM_HIBERNATION_PREPARE 0x0001 /* Going to hibernate */
958 #define PM_POST_HIBERNATION 0x0002 /* Hibernation finished */
959 #define PM_SUSPEND_PREPARE 0x0003 /* Going to suspend the system */
960 #define PM_POST_SUSPEND 0x0004 /* Suspend finished */
961 #define PM_RESTORE_PREPARE 0x0005 /* Going to restore a saved image */
962 #define PM_POST_RESTORE 0x0006 /* Restore failed */
963
964 #define NOTIFY_DONE 0x0000 /* Don't care */
965 #define NOTIFY_OK 0x0001 /* Suits me */
966 #define NOTIFY_STOP_MASK 0x8000 /* Don't call further */
967 #define NOTIFY_BAD (NOTIFY_STOP_MASK|0x0002)
968 /* Bad/Veto action */
969 /*
970 * Clean way to return from the notifier and stop further calls.
971 */
972 #define NOTIFY_STOP (NOTIFY_OK|NOTIFY_STOP_MASK)
973
974 /*
975 * Bug checks.
976 */
977
978 #define BUG_ON(c) do { if (!(c)) abort(); } while (0)
979
980 /*
981 * Initialization -- Must be called before calling any primitives.
982 */
983
984 static void smp_init(void)
985 {
986 int i;
987
988 spin_lock_init(&__thread_id_map_mutex);
989 __thread_id_map[0] = pthread_self();
990 for (i = 1; i < NR_THREADS; i++)
991 __thread_id_map[i] = __THREAD_ID_MAP_EMPTY;
992 init_per_thread(smp_processor_id, 0);
993 if (pthread_key_create(&thread_id_key, NULL) != 0) {
994 perror("pthread_key_create");
995 exit(-1);
996 }
997 }
998
999 /* Taken from the Linux kernel source tree, so GPLv2-only!!! */
1000
1001 #ifndef _LINUX_LIST_H
1002 #define _LINUX_LIST_H
1003
1004 #define LIST_POISON1 ((void *) 0x00100100)
1005 #define LIST_POISON2 ((void *) 0x00200200)
1006
1007 #define container_of(ptr, type, member) ({ \
1008 const typeof( ((type *)0)->member ) *__mptr = (ptr); \
1009 (type *)( (char *)__mptr - offsetof(type,member) );})
1010
1011 /*
1012 * Simple doubly linked list implementation.
1013 *
1014 * Some of the internal functions ("__xxx") are useful when
1015 * manipulating whole lists rather than single entries, as
1016 * sometimes we already know the next/prev entries and we can
1017 * generate better code by using them directly rather than
1018 * using the generic single-entry routines.
1019 */
1020
1021 struct list_head {
1022 struct list_head *next, *prev;
1023 };
1024
1025 #define LIST_HEAD_INIT(name) { &(name), &(name) }
1026
1027 #define LIST_HEAD(name) \
1028 struct list_head name = LIST_HEAD_INIT(name)
1029
1030 static inline void INIT_LIST_HEAD(struct list_head *list)
1031 {
1032 list->next = list;
1033 list->prev = list;
1034 }
1035
1036 /*
1037 * Insert a new entry between two known consecutive entries.
1038 *
1039 * This is only for internal list manipulation where we know
1040 * the prev/next entries already!
1041 */
1042 #ifndef CONFIG_DEBUG_LIST
1043 static inline void __list_add(struct list_head *new,
1044 struct list_head *prev,
1045 struct list_head *next)
1046 {
1047 next->prev = new;
1048 new->next = next;
1049 new->prev = prev;
1050 prev->next = new;
1051 }
1052 #else
1053 extern void __list_add(struct list_head *new,
1054 struct list_head *prev,
1055 struct list_head *next);
1056 #endif
1057
1058 /**
1059 * list_add - add a new entry
1060 * @new: new entry to be added
1061 * @head: list head to add it after
1062 *
1063 * Insert a new entry after the specified head.
1064 * This is good for implementing stacks.
1065 */
1066 static inline void list_add(struct list_head *new, struct list_head *head)
1067 {
1068 __list_add(new, head, head->next);
1069 }
1070
1071
1072 /**
1073 * list_add_tail - add a new entry
1074 * @new: new entry to be added
1075 * @head: list head to add it before
1076 *
1077 * Insert a new entry before the specified head.
1078 * This is useful for implementing queues.
1079 */
1080 static inline void list_add_tail(struct list_head *new, struct list_head *head)
1081 {
1082 __list_add(new, head->prev, head);
1083 }
1084
1085 /*
1086 * Delete a list entry by making the prev/next entries
1087 * point to each other.
1088 *
1089 * This is only for internal list manipulation where we know
1090 * the prev/next entries already!
1091 */
1092 static inline void __list_del(struct list_head * prev, struct list_head * next)
1093 {
1094 next->prev = prev;
1095 prev->next = next;
1096 }
1097
1098 /**
1099 * list_del - deletes entry from list.
1100 * @entry: the element to delete from the list.
1101 * Note: list_empty() on entry does not return true after this, the entry is
1102 * in an undefined state.
1103 */
1104 #ifndef CONFIG_DEBUG_LIST
1105 static inline void list_del(struct list_head *entry)
1106 {
1107 __list_del(entry->prev, entry->next);
1108 entry->next = LIST_POISON1;
1109 entry->prev = LIST_POISON2;
1110 }
1111 #else
1112 extern void list_del(struct list_head *entry);
1113 #endif
1114
1115 /**
1116 * list_replace - replace old entry by new one
1117 * @old : the element to be replaced
1118 * @new : the new element to insert
1119 *
1120 * If @old was empty, it will be overwritten.
1121 */
1122 static inline void list_replace(struct list_head *old,
1123 struct list_head *new)
1124 {
1125 new->next = old->next;
1126 new->next->prev = new;
1127 new->prev = old->prev;
1128 new->prev->next = new;
1129 }
1130
1131 static inline void list_replace_init(struct list_head *old,
1132 struct list_head *new)
1133 {
1134 list_replace(old, new);
1135 INIT_LIST_HEAD(old);
1136 }
1137
1138 /**
1139 * list_del_init - deletes entry from list and reinitialize it.
1140 * @entry: the element to delete from the list.
1141 */
1142 static inline void list_del_init(struct list_head *entry)
1143 {
1144 __list_del(entry->prev, entry->next);
1145 INIT_LIST_HEAD(entry);
1146 }
1147
1148 /**
1149 * list_move - delete from one list and add as another's head
1150 * @list: the entry to move
1151 * @head: the head that will precede our entry
1152 */
1153 static inline void list_move(struct list_head *list, struct list_head *head)
1154 {
1155 __list_del(list->prev, list->next);
1156 list_add(list, head);
1157 }
1158
1159 /**
1160 * list_move_tail - delete from one list and add as another's tail
1161 * @list: the entry to move
1162 * @head: the head that will follow our entry
1163 */
1164 static inline void list_move_tail(struct list_head *list,
1165 struct list_head *head)
1166 {
1167 __list_del(list->prev, list->next);
1168 list_add_tail(list, head);
1169 }
1170
1171 /**
1172 * list_is_last - tests whether @list is the last entry in list @head
1173 * @list: the entry to test
1174 * @head: the head of the list
1175 */
1176 static inline int list_is_last(const struct list_head *list,
1177 const struct list_head *head)
1178 {
1179 return list->next == head;
1180 }
1181
1182 /**
1183 * list_empty - tests whether a list is empty
1184 * @head: the list to test.
1185 */
1186 static inline int list_empty(const struct list_head *head)
1187 {
1188 return head->next == head;
1189 }
1190
1191 /**
1192 * list_empty_careful - tests whether a list is empty and not being modified
1193 * @head: the list to test
1194 *
1195 * Description:
1196 * tests whether a list is empty _and_ checks that no other CPU might be
1197 * in the process of modifying either member (next or prev)
1198 *
1199 * NOTE: using list_empty_careful() without synchronization
1200 * can only be safe if the only activity that can happen
1201 * to the list entry is list_del_init(). Eg. it cannot be used
1202 * if another CPU could re-list_add() it.
1203 */
1204 static inline int list_empty_careful(const struct list_head *head)
1205 {
1206 struct list_head *next = head->next;
1207 return (next == head) && (next == head->prev);
1208 }
1209
1210 /**
1211 * list_is_singular - tests whether a list has just one entry.
1212 * @head: the list to test.
1213 */
1214 static inline int list_is_singular(const struct list_head *head)
1215 {
1216 return !list_empty(head) && (head->next == head->prev);
1217 }
1218
1219 static inline void __list_cut_position(struct list_head *list,
1220 struct list_head *head, struct list_head *entry)
1221 {
1222 struct list_head *new_first = entry->next;
1223 list->next = head->next;
1224 list->next->prev = list;
1225 list->prev = entry;
1226 entry->next = list;
1227 head->next = new_first;
1228 new_first->prev = head;
1229 }
1230
1231 /**
1232 * list_cut_position - cut a list into two
1233 * @list: a new list to add all removed entries
1234 * @head: a list with entries
1235 * @entry: an entry within head, could be the head itself
1236 * and if so we won't cut the list
1237 *
1238 * This helper moves the initial part of @head, up to and
1239 * including @entry, from @head to @list. You should
1240 * pass on @entry an element you know is on @head. @list
1241 * should be an empty list or a list you do not care about
1242 * losing its data.
1243 *
1244 */
1245 static inline void list_cut_position(struct list_head *list,
1246 struct list_head *head, struct list_head *entry)
1247 {
1248 if (list_empty(head))
1249 return;
1250 if (list_is_singular(head) &&
1251 (head->next != entry && head != entry))
1252 return;
1253 if (entry == head)
1254 INIT_LIST_HEAD(list);
1255 else
1256 __list_cut_position(list, head, entry);
1257 }
1258
1259 static inline void __list_splice(const struct list_head *list,
1260 struct list_head *prev,
1261 struct list_head *next)
1262 {
1263 struct list_head *first = list->next;
1264 struct list_head *last = list->prev;
1265
1266 first->prev = prev;
1267 prev->next = first;
1268
1269 last->next = next;
1270 next->prev = last;
1271 }
1272
1273 /**
1274 * list_splice - join two lists, this is designed for stacks
1275 * @list: the new list to add.
1276 * @head: the place to add it in the first list.
1277 */
1278 static inline void list_splice(const struct list_head *list,
1279 struct list_head *head)
1280 {
1281 if (!list_empty(list))
1282 __list_splice(list, head, head->next);
1283 }
1284
1285 /**
1286 * list_splice_tail - join two lists, each list being a queue
1287 * @list: the new list to add.
1288 * @head: the place to add it in the first list.
1289 */
1290 static inline void list_splice_tail(struct list_head *list,
1291 struct list_head *head)
1292 {
1293 if (!list_empty(list))
1294 __list_splice(list, head->prev, head);
1295 }
1296
1297 /**
1298 * list_splice_init - join two lists and reinitialise the emptied list.
1299 * @list: the new list to add.
1300 * @head: the place to add it in the first list.
1301 *
1302 * The list at @list is reinitialised
1303 */
1304 static inline void list_splice_init(struct list_head *list,
1305 struct list_head *head)
1306 {
1307 if (!list_empty(list)) {
1308 __list_splice(list, head, head->next);
1309 INIT_LIST_HEAD(list);
1310 }
1311 }
1312
1313 /**
1314 * list_splice_tail_init - join two lists and reinitialise the emptied list
1315 * @list: the new list to add.
1316 * @head: the place to add it in the first list.
1317 *
1318 * Each of the lists is a queue.
1319 * The list at @list is reinitialised
1320 */
1321 static inline void list_splice_tail_init(struct list_head *list,
1322 struct list_head *head)
1323 {
1324 if (!list_empty(list)) {
1325 __list_splice(list, head->prev, head);
1326 INIT_LIST_HEAD(list);
1327 }
1328 }
1329
1330 /**
1331 * list_entry - get the struct for this entry
1332 * @ptr: the &struct list_head pointer.
1333 * @type: the type of the struct this is embedded in.
1334 * @member: the name of the list_struct within the struct.
1335 */
1336 #define list_entry(ptr, type, member) \
1337 container_of(ptr, type, member)
1338
1339 /**
1340 * list_first_entry - get the first element from a list
1341 * @ptr: the list head to take the element from.
1342 * @type: the type of the struct this is embedded in.
1343 * @member: the name of the list_struct within the struct.
1344 *
1345 * Note, that list is expected to be not empty.
1346 */
1347 #define list_first_entry(ptr, type, member) \
1348 list_entry((ptr)->next, type, member)
1349
1350 /**
1351 * list_for_each - iterate over a list
1352 * @pos: the &struct list_head to use as a loop cursor.
1353 * @head: the head for your list.
1354 */
1355 #define list_for_each(pos, head) \
1356 for (pos = (head)->next; prefetch(pos->next), pos != (head); \
1357 pos = pos->next)
1358
1359 /**
1360 * __list_for_each - iterate over a list
1361 * @pos: the &struct list_head to use as a loop cursor.
1362 * @head: the head for your list.
1363 *
1364 * This variant differs from list_for_each() in that it's the
1365 * simplest possible list iteration code, no prefetching is done.
1366 * Use this for code that knows the list to be very short (empty
1367 * or 1 entry) most of the time.
1368 */
1369 #define __list_for_each(pos, head) \
1370 for (pos = (head)->next; pos != (head); pos = pos->next)
1371
1372 /**
1373 * list_for_each_prev - iterate over a list backwards
1374 * @pos: the &struct list_head to use as a loop cursor.
1375 * @head: the head for your list.
1376 */
1377 #define list_for_each_prev(pos, head) \
1378 for (pos = (head)->prev; prefetch(pos->prev), pos != (head); \
1379 pos = pos->prev)
1380
1381 /**
1382 * list_for_each_safe - iterate over a list safe against removal of list entry
1383 * @pos: the &struct list_head to use as a loop cursor.
1384 * @n: another &struct list_head to use as temporary storage
1385 * @head: the head for your list.
1386 */
1387 #define list_for_each_safe(pos, n, head) \
1388 for (pos = (head)->next, n = pos->next; pos != (head); \
1389 pos = n, n = pos->next)
1390
1391 /**
1392 * list_for_each_prev_safe - iterate over a list backwards safe against removal of list entry
1393 * @pos: the &struct list_head to use as a loop cursor.
1394 * @n: another &struct list_head to use as temporary storage
1395 * @head: the head for your list.
1396 */
1397 #define list_for_each_prev_safe(pos, n, head) \
1398 for (pos = (head)->prev, n = pos->prev; \
1399 prefetch(pos->prev), pos != (head); \
1400 pos = n, n = pos->prev)
1401
1402 /**
1403 * list_for_each_entry - iterate over list of given type
1404 * @pos: the type * to use as a loop cursor.
1405 * @head: the head for your list.
1406 * @member: the name of the list_struct within the struct.
1407 */
1408 #define list_for_each_entry(pos, head, member) \
1409 for (pos = list_entry((head)->next, typeof(*pos), member); \
1410 prefetch(pos->member.next), &pos->member != (head); \
1411 pos = list_entry(pos->member.next, typeof(*pos), member))
1412
1413 /**
1414 * list_for_each_entry_reverse - iterate backwards over list of given type.
1415 * @pos: the type * to use as a loop cursor.
1416 * @head: the head for your list.
1417 * @member: the name of the list_struct within the struct.
1418 */
1419 #define list_for_each_entry_reverse(pos, head, member) \
1420 for (pos = list_entry((head)->prev, typeof(*pos), member); \
1421 prefetch(pos->member.prev), &pos->member != (head); \
1422 pos = list_entry(pos->member.prev, typeof(*pos), member))
1423
1424 /**
1425 * list_prepare_entry - prepare a pos entry for use in list_for_each_entry_continue()
1426 * @pos: the type * to use as a start point
1427 * @head: the head of the list
1428 * @member: the name of the list_struct within the struct.
1429 *
1430 * Prepares a pos entry for use as a start point in list_for_each_entry_continue().
1431 */
1432 #define list_prepare_entry(pos, head, member) \
1433 ((pos) ? : list_entry(head, typeof(*pos), member))
1434
1435 /**
1436 * list_for_each_entry_continue - continue iteration over list of given type
1437 * @pos: the type * to use as a loop cursor.
1438 * @head: the head for your list.
1439 * @member: the name of the list_struct within the struct.
1440 *
1441 * Continue to iterate over list of given type, continuing after
1442 * the current position.
1443 */
1444 #define list_for_each_entry_continue(pos, head, member) \
1445 for (pos = list_entry(pos->member.next, typeof(*pos), member); \
1446 prefetch(pos->member.next), &pos->member != (head); \
1447 pos = list_entry(pos->member.next, typeof(*pos), member))
1448
1449 /**
1450 * list_for_each_entry_continue_reverse - iterate backwards from the given point
1451 * @pos: the type * to use as a loop cursor.
1452 * @head: the head for your list.
1453 * @member: the name of the list_struct within the struct.
1454 *
1455 * Start to iterate over list of given type backwards, continuing after
1456 * the current position.
1457 */
1458 #define list_for_each_entry_continue_reverse(pos, head, member) \
1459 for (pos = list_entry(pos->member.prev, typeof(*pos), member); \
1460 prefetch(pos->member.prev), &pos->member != (head); \
1461 pos = list_entry(pos->member.prev, typeof(*pos), member))
1462
1463 /**
1464 * list_for_each_entry_from - iterate over list of given type from the current point
1465 * @pos: the type * to use as a loop cursor.
1466 * @head: the head for your list.
1467 * @member: the name of the list_struct within the struct.
1468 *
1469 * Iterate over list of given type, continuing from current position.
1470 */
1471 #define list_for_each_entry_from(pos, head, member) \
1472 for (; prefetch(pos->member.next), &pos->member != (head); \
1473 pos = list_entry(pos->member.next, typeof(*pos), member))
1474
1475 /**
1476 * list_for_each_entry_safe - iterate over list of given type safe against removal of list entry
1477 * @pos: the type * to use as a loop cursor.
1478 * @n: another type * to use as temporary storage
1479 * @head: the head for your list.
1480 * @member: the name of the list_struct within the struct.
1481 */
1482 #define list_for_each_entry_safe(pos, n, head, member) \
1483 for (pos = list_entry((head)->next, typeof(*pos), member), \
1484 n = list_entry(pos->member.next, typeof(*pos), member); \
1485 &pos->member != (head); \
1486 pos = n, n = list_entry(n->member.next, typeof(*n), member))
1487
1488 /**
1489 * list_for_each_entry_safe_continue
1490 * @pos: the type * to use as a loop cursor.
1491 * @n: another type * to use as temporary storage
1492 * @head: the head for your list.
1493 * @member: the name of the list_struct within the struct.
1494 *
1495 * Iterate over list of given type, continuing after current point,
1496 * safe against removal of list entry.
1497 */
1498 #define list_for_each_entry_safe_continue(pos, n, head, member) \
1499 for (pos = list_entry(pos->member.next, typeof(*pos), member), \
1500 n = list_entry(pos->member.next, typeof(*pos), member); \
1501 &pos->member != (head); \
1502 pos = n, n = list_entry(n->member.next, typeof(*n), member))
1503
1504 /**
1505 * list_for_each_entry_safe_from
1506 * @pos: the type * to use as a loop cursor.
1507 * @n: another type * to use as temporary storage
1508 * @head: the head for your list.
1509 * @member: the name of the list_struct within the struct.
1510 *
1511 * Iterate over list of given type from current point, safe against
1512 * removal of list entry.
1513 */
1514 #define list_for_each_entry_safe_from(pos, n, head, member) \
1515 for (n = list_entry(pos->member.next, typeof(*pos), member); \
1516 &pos->member != (head); \
1517 pos = n, n = list_entry(n->member.next, typeof(*n), member))
1518
1519 /**
1520 * list_for_each_entry_safe_reverse
1521 * @pos: the type * to use as a loop cursor.
1522 * @n: another type * to use as temporary storage
1523 * @head: the head for your list.
1524 * @member: the name of the list_struct within the struct.
1525 *
1526 * Iterate backwards over list of given type, safe against removal
1527 * of list entry.
1528 */
1529 #define list_for_each_entry_safe_reverse(pos, n, head, member) \
1530 for (pos = list_entry((head)->prev, typeof(*pos), member), \
1531 n = list_entry(pos->member.prev, typeof(*pos), member); \
1532 &pos->member != (head); \
1533 pos = n, n = list_entry(n->member.prev, typeof(*n), member))
1534
1535 /*
1536 * Double linked lists with a single pointer list head.
1537 * Mostly useful for hash tables where the two pointer list head is
1538 * too wasteful.
1539 * You lose the ability to access the tail in O(1).
1540 */
1541
1542 struct hlist_head {
1543 struct hlist_node *first;
1544 };
1545
1546 struct hlist_node {
1547 struct hlist_node *next, **pprev;
1548 };
1549
1550 #define HLIST_HEAD_INIT { .first = NULL }
1551 #define HLIST_HEAD(name) struct hlist_head name = { .first = NULL }
1552 #define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL)
1553 static inline void INIT_HLIST_NODE(struct hlist_node *h)
1554 {
1555 h->next = NULL;
1556 h->pprev = NULL;
1557 }
1558
1559 static inline int hlist_unhashed(const struct hlist_node *h)
1560 {
1561 return !h->pprev;
1562 }
1563
1564 static inline int hlist_empty(const struct hlist_head *h)
1565 {
1566 return !h->first;
1567 }
1568
1569 static inline void __hlist_del(struct hlist_node *n)
1570 {
1571 struct hlist_node *next = n->next;
1572 struct hlist_node **pprev = n->pprev;
1573 *pprev = next;
1574 if (next)
1575 next->pprev = pprev;
1576 }
1577
1578 static inline void hlist_del(struct hlist_node *n)
1579 {
1580 __hlist_del(n);
1581 n->next = LIST_POISON1;
1582 n->pprev = LIST_POISON2;
1583 }
1584
1585 static inline void hlist_del_init(struct hlist_node *n)
1586 {
1587 if (!hlist_unhashed(n)) {
1588 __hlist_del(n);
1589 INIT_HLIST_NODE(n);
1590 }
1591 }
1592
1593 static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h)
1594 {
1595 struct hlist_node *first = h->first;
1596 n->next = first;
1597 if (first)
1598 first->pprev = &n->next;
1599 h->first = n;
1600 n->pprev = &h->first;
1601 }
1602
1603 /* next must be != NULL */
1604 static inline void hlist_add_before(struct hlist_node *n,
1605 struct hlist_node *next)
1606 {
1607 n->pprev = next->pprev;
1608 n->next = next;
1609 next->pprev = &n->next;
1610 *(n->pprev) = n;
1611 }
1612
1613 static inline void hlist_add_after(struct hlist_node *n,
1614 struct hlist_node *next)
1615 {
1616 next->next = n->next;
1617 n->next = next;
1618 next->pprev = &n->next;
1619
1620 if(next->next)
1621 next->next->pprev = &next->next;
1622 }
1623
1624 /*
1625 * Move a list from one list head to another. Fixup the pprev
1626 * reference of the first entry if it exists.
1627 */
1628 static inline void hlist_move_list(struct hlist_head *old,
1629 struct hlist_head *new)
1630 {
1631 new->first = old->first;
1632 if (new->first)
1633 new->first->pprev = &new->first;
1634 old->first = NULL;
1635 }
1636
1637 #define hlist_entry(ptr, type, member) container_of(ptr,type,member)
1638
1639 #define hlist_for_each(pos, head) \
1640 for (pos = (head)->first; pos && ({ prefetch(pos->next); 1; }); \
1641 pos = pos->next)
1642
1643 #define hlist_for_each_safe(pos, n, head) \
1644 for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \
1645 pos = n)
1646
1647 /**
1648 * hlist_for_each_entry - iterate over list of given type
1649 * @tpos: the type * to use as a loop cursor.
1650 * @pos: the &struct hlist_node to use as a loop cursor.
1651 * @head: the head for your list.
1652 * @member: the name of the hlist_node within the struct.
1653 */
1654 #define hlist_for_each_entry(tpos, pos, head, member) \
1655 for (pos = (head)->first; \
1656 pos && ({ prefetch(pos->next); 1;}) && \
1657 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
1658 pos = pos->next)
1659
1660 /**
1661 * hlist_for_each_entry_continue - iterate over a hlist continuing after current point
1662 * @tpos: the type * to use as a loop cursor.
1663 * @pos: the &struct hlist_node to use as a loop cursor.
1664 * @member: the name of the hlist_node within the struct.
1665 */
1666 #define hlist_for_each_entry_continue(tpos, pos, member) \
1667 for (pos = (pos)->next; \
1668 pos && ({ prefetch(pos->next); 1;}) && \
1669 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
1670 pos = pos->next)
1671
1672 /**
1673 * hlist_for_each_entry_from - iterate over a hlist continuing from current point
1674 * @tpos: the type * to use as a loop cursor.
1675 * @pos: the &struct hlist_node to use as a loop cursor.
1676 * @member: the name of the hlist_node within the struct.
1677 */
1678 #define hlist_for_each_entry_from(tpos, pos, member) \
1679 for (; pos && ({ prefetch(pos->next); 1;}) && \
1680 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
1681 pos = pos->next)
1682
1683 /**
1684 * hlist_for_each_entry_safe - iterate over list of given type safe against removal of list entry
1685 * @tpos: the type * to use as a loop cursor.
1686 * @pos: the &struct hlist_node to use as a loop cursor.
1687 * @n: another &struct hlist_node to use as temporary storage
1688 * @head: the head for your list.
1689 * @member: the name of the hlist_node within the struct.
1690 */
1691 #define hlist_for_each_entry_safe(tpos, pos, n, head, member) \
1692 for (pos = (head)->first; \
1693 pos && ({ n = pos->next; 1; }) && \
1694 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
1695 pos = n)
1696
1697 #endif
This page took 0.080715 seconds and 4 git commands to generate.