Commit | Line | Data |
---|---|---|
6d0ce021 PM |
1 | /* MECHANICALLY GENERATED, DO NOT EDIT!!! */ |
2 | ||
3 | #define _INCLUDE_API_H | |
4 | ||
5 | /* | |
6 | * common.h: Common Linux kernel-isms. | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify | |
9 | * it under the terms of the GNU General Public License as published by | |
10 | * the Free Software Foundation; but version 2 of the License only due | |
11 | * to code included from the Linux kernel. | |
12 | * | |
13 | * This program is distributed in the hope that it will be useful, | |
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
16 | * GNU General Public License for more details. | |
17 | * | |
18 | * You should have received a copy of the GNU General Public License | |
19 | * along with this program; if not, write to the Free Software | |
20 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
21 | * | |
22 | * Copyright (c) 2006 Paul E. McKenney, IBM. | |
23 | * | |
24 | * Much code taken from the Linux kernel. For such code, the option | |
25 | * to redistribute under later versions of GPL might not be available. | |
26 | */ | |
27 | ||
28 | #ifndef __always_inline | |
29 | #define __always_inline inline | |
30 | #endif | |
31 | ||
32 | #define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)])) | |
33 | #define BUILD_BUG_ON_ZERO(e) (sizeof(char[1 - 2 * !!(e)]) - 1) | |
34 | ||
35 | #ifdef __ASSEMBLY__ | |
36 | # define stringify_in_c(...) __VA_ARGS__ | |
37 | # define ASM_CONST(x) x | |
38 | #else | |
39 | /* This version of stringify will deal with commas... */ | |
40 | # define __stringify_in_c(...) #__VA_ARGS__ | |
41 | # define stringify_in_c(...) __stringify_in_c(__VA_ARGS__) " " | |
42 | # define __ASM_CONST(x) x##UL | |
43 | # define ASM_CONST(x) __ASM_CONST(x) | |
44 | #endif | |
45 | ||
46 | ||
47 | /* | |
48 | * arch-i386.h: Expose x86 atomic instructions. 80486 and better only. | |
49 | * | |
50 | * This program is free software; you can redistribute it and/or modify | |
51 | * it under the terms of the GNU General Public License as published by | |
52 | * the Free Software Foundation, but version 2 only due to inclusion | |
53 | * of Linux-kernel code. | |
54 | * | |
55 | * This program is distributed in the hope that it will be useful, | |
56 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
57 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
58 | * GNU General Public License for more details. | |
59 | * | |
60 | * You should have received a copy of the GNU General Public License | |
61 | * along with this program; if not, write to the Free Software | |
62 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
63 | * | |
64 | * Copyright (c) 2006 Paul E. McKenney, IBM. | |
65 | * | |
66 | * Much code taken from the Linux kernel. For such code, the option | |
67 | * to redistribute under later versions of GPL might not be available. | |
68 | */ | |
69 | ||
70 | /* | |
71 | * Machine parameters. | |
72 | */ | |
73 | ||
74 | #define CONFIG_SMP | |
75 | ||
76 | #define CACHE_LINE_SIZE 64 | |
77 | #define ____cacheline_internodealigned_in_smp \ | |
78 | __attribute__((__aligned__(1 << 6))) | |
79 | ||
80 | #define LOCK_PREFIX "lock ; " | |
81 | ||
82 | /* | |
83 | * Atomic data structure, initialization, and access. | |
84 | */ | |
85 | ||
86 | typedef struct { volatile int counter; } atomic_t; | |
87 | ||
88 | #define ATOMIC_INIT(i) { (i) } | |
89 | ||
90 | #define atomic_read(v) ((v)->counter) | |
91 | #define atomic_set(v, i) (((v)->counter) = (i)) | |
92 | ||
93 | /* | |
94 | * Atomic operations. | |
95 | */ | |
96 | ||
97 | /** | |
98 | * atomic_add - add integer to atomic variable | |
99 | * @i: integer value to add | |
100 | * @v: pointer of type atomic_t | |
101 | * | |
102 | * Atomically adds @i to @v. | |
103 | */ | |
104 | static __inline__ void atomic_add(int i, atomic_t *v) | |
105 | { | |
106 | __asm__ __volatile__( | |
107 | LOCK_PREFIX "addl %1,%0" | |
108 | :"+m" (v->counter) | |
109 | :"ir" (i)); | |
110 | } | |
111 | ||
112 | /** | |
113 | * atomic_sub - subtract the atomic variable | |
114 | * @i: integer value to subtract | |
115 | * @v: pointer of type atomic_t | |
116 | * | |
117 | * Atomically subtracts @i from @v. | |
118 | */ | |
119 | static __inline__ void atomic_sub(int i, atomic_t *v) | |
120 | { | |
121 | __asm__ __volatile__( | |
122 | LOCK_PREFIX "subl %1,%0" | |
123 | :"+m" (v->counter) | |
124 | :"ir" (i)); | |
125 | } | |
126 | ||
127 | /** | |
128 | * atomic_sub_and_test - subtract value from variable and test result | |
129 | * @i: integer value to subtract | |
130 | * @v: pointer of type atomic_t | |
131 | * | |
132 | * Atomically subtracts @i from @v and returns | |
133 | * true if the result is zero, or false for all | |
134 | * other cases. | |
135 | */ | |
136 | static __inline__ int atomic_sub_and_test(int i, atomic_t *v) | |
137 | { | |
138 | unsigned char c; | |
139 | ||
140 | __asm__ __volatile__( | |
141 | LOCK_PREFIX "subl %2,%0; sete %1" | |
142 | :"+m" (v->counter), "=qm" (c) | |
143 | :"ir" (i) : "memory"); | |
144 | return c; | |
145 | } | |
146 | ||
147 | /** | |
148 | * atomic_inc - increment atomic variable | |
149 | * @v: pointer of type atomic_t | |
150 | * | |
151 | * Atomically increments @v by 1. | |
152 | */ | |
153 | static __inline__ void atomic_inc(atomic_t *v) | |
154 | { | |
155 | __asm__ __volatile__( | |
156 | LOCK_PREFIX "incl %0" | |
157 | :"+m" (v->counter)); | |
158 | } | |
159 | ||
160 | /** | |
161 | * atomic_dec - decrement atomic variable | |
162 | * @v: pointer of type atomic_t | |
163 | * | |
164 | * Atomically decrements @v by 1. | |
165 | */ | |
166 | static __inline__ void atomic_dec(atomic_t *v) | |
167 | { | |
168 | __asm__ __volatile__( | |
169 | LOCK_PREFIX "decl %0" | |
170 | :"+m" (v->counter)); | |
171 | } | |
172 | ||
173 | /** | |
174 | * atomic_dec_and_test - decrement and test | |
175 | * @v: pointer of type atomic_t | |
176 | * | |
177 | * Atomically decrements @v by 1 and | |
178 | * returns true if the result is 0, or false for all other | |
179 | * cases. | |
180 | */ | |
181 | static __inline__ int atomic_dec_and_test(atomic_t *v) | |
182 | { | |
183 | unsigned char c; | |
184 | ||
185 | __asm__ __volatile__( | |
186 | LOCK_PREFIX "decl %0; sete %1" | |
187 | :"+m" (v->counter), "=qm" (c) | |
188 | : : "memory"); | |
189 | return c != 0; | |
190 | } | |
191 | ||
192 | /** | |
193 | * atomic_inc_and_test - increment and test | |
194 | * @v: pointer of type atomic_t | |
195 | * | |
196 | * Atomically increments @v by 1 | |
197 | * and returns true if the result is zero, or false for all | |
198 | * other cases. | |
199 | */ | |
200 | static __inline__ int atomic_inc_and_test(atomic_t *v) | |
201 | { | |
202 | unsigned char c; | |
203 | ||
204 | __asm__ __volatile__( | |
205 | LOCK_PREFIX "incl %0; sete %1" | |
206 | :"+m" (v->counter), "=qm" (c) | |
207 | : : "memory"); | |
208 | return c != 0; | |
209 | } | |
210 | ||
211 | /** | |
212 | * atomic_add_negative - add and test if negative | |
213 | * @v: pointer of type atomic_t | |
214 | * @i: integer value to add | |
215 | * | |
216 | * Atomically adds @i to @v and returns true | |
217 | * if the result is negative, or false when | |
218 | * result is greater than or equal to zero. | |
219 | */ | |
220 | static __inline__ int atomic_add_negative(int i, atomic_t *v) | |
221 | { | |
222 | unsigned char c; | |
223 | ||
224 | __asm__ __volatile__( | |
225 | LOCK_PREFIX "addl %2,%0; sets %1" | |
226 | :"+m" (v->counter), "=qm" (c) | |
227 | :"ir" (i) : "memory"); | |
228 | return c; | |
229 | } | |
230 | ||
231 | /** | |
232 | * atomic_add_return - add and return | |
233 | * @v: pointer of type atomic_t | |
234 | * @i: integer value to add | |
235 | * | |
236 | * Atomically adds @i to @v and returns @i + @v | |
237 | */ | |
238 | static __inline__ int atomic_add_return(int i, atomic_t *v) | |
239 | { | |
240 | int __i; | |
241 | ||
242 | __i = i; | |
243 | __asm__ __volatile__( | |
244 | LOCK_PREFIX "xaddl %0, %1;" | |
245 | :"=r"(i) | |
246 | :"m"(v->counter), "0"(i)); | |
247 | return i + __i; | |
248 | } | |
249 | ||
250 | static __inline__ int atomic_sub_return(int i, atomic_t *v) | |
251 | { | |
252 | return atomic_add_return(-i,v); | |
253 | } | |
254 | ||
255 | static inline unsigned int | |
256 | cmpxchg(volatile long *ptr, long oldval, long newval) | |
257 | { | |
258 | unsigned long retval; | |
259 | ||
260 | asm("# cmpxchg\n" | |
261 | "lock; cmpxchgl %4,(%2)\n" | |
262 | "# end atomic_cmpxchg4" | |
263 | : "=a" (retval), "=m" (*ptr) | |
264 | : "r" (ptr), "0" (oldval), "r" (newval), "m" (*ptr) | |
265 | : "cc"); | |
266 | return (retval); | |
267 | } | |
268 | ||
269 | #define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new)) | |
270 | #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) | |
271 | ||
272 | /** | |
273 | * atomic_add_unless - add unless the number is a given value | |
274 | * @v: pointer of type atomic_t | |
275 | * @a: the amount to add to v... | |
276 | * @u: ...unless v is equal to u. | |
277 | * | |
278 | * Atomically adds @a to @v, so long as it was not @u. | |
279 | * Returns non-zero if @v was not @u, and zero otherwise. | |
280 | */ | |
281 | #define atomic_add_unless(v, a, u) \ | |
282 | ({ \ | |
283 | int c, old; \ | |
284 | c = atomic_read(v); \ | |
285 | for (;;) { \ | |
286 | if (unlikely(c == (u))) \ | |
287 | break; \ | |
288 | old = atomic_cmpxchg((v), c, c + (a)); \ | |
289 | if (likely(old == c)) \ | |
290 | break; \ | |
291 | c = old; \ | |
292 | } \ | |
293 | c != (u); \ | |
294 | }) | |
295 | #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) | |
296 | ||
297 | #define atomic_inc_return(v) (atomic_add_return(1,v)) | |
298 | #define atomic_dec_return(v) (atomic_sub_return(1,v)) | |
299 | ||
300 | /* These are x86-specific, used by some header files */ | |
301 | #define atomic_clear_mask(mask, addr) \ | |
302 | __asm__ __volatile__(LOCK_PREFIX "andl %0,%1" \ | |
303 | : : "r" (~(mask)),"m" (*addr) : "memory") | |
304 | ||
305 | #define atomic_set_mask(mask, addr) \ | |
306 | __asm__ __volatile__(LOCK_PREFIX "orl %0,%1" \ | |
307 | : : "r" (mask),"m" (*(addr)) : "memory") | |
308 | ||
309 | /* Atomic operations are already serializing on x86 */ | |
310 | #define smp_mb__before_atomic_dec() barrier() | |
311 | #define smp_mb__after_atomic_dec() barrier() | |
312 | #define smp_mb__before_atomic_inc() barrier() | |
313 | #define smp_mb__after_atomic_inc() barrier() | |
314 | ||
315 | #define smp_mb() \ | |
316 | __asm__ __volatile__("mfence" : : : "memory") | |
317 | /* __asm__ __volatile__("lock; addl $0,0(%%esp)" : : : "memory") */ | |
318 | ||
319 | ||
320 | /* | |
321 | * Generate 64-bit timestamp. | |
322 | */ | |
323 | ||
324 | static unsigned long long get_timestamp(void) | |
325 | { | |
326 | unsigned int __a,__d; | |
327 | ||
328 | __asm__ __volatile__("rdtsc" : "=a" (__a), "=d" (__d)); | |
329 | return ((long long)__a) | (((long long)__d)<<32); | |
330 | } | |
331 | ||
332 | /* | |
333 | * api_pthreads.h: API mapping to pthreads environment. | |
334 | * | |
335 | * This program is free software; you can redistribute it and/or modify | |
336 | * it under the terms of the GNU General Public License as published by | |
337 | * the Free Software Foundation; either version 2 of the License, or | |
338 | * (at your option) any later version. However, please note that much | |
339 | * of the code in this file derives from the Linux kernel, and that such | |
340 | * code may not be available except under GPLv2. | |
341 | * | |
342 | * This program is distributed in the hope that it will be useful, | |
343 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
344 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
345 | * GNU General Public License for more details. | |
346 | * | |
347 | * You should have received a copy of the GNU General Public License | |
348 | * along with this program; if not, write to the Free Software | |
349 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
350 | * | |
351 | * Copyright (c) 2006 Paul E. McKenney, IBM. | |
352 | */ | |
353 | ||
354 | #include <stdio.h> | |
355 | #include <stdlib.h> | |
356 | #include <errno.h> | |
357 | #include <limits.h> | |
358 | #include <sys/types.h> | |
359 | #define __USE_GNU | |
360 | #include <pthread.h> | |
361 | #include <sched.h> | |
362 | #include <sys/param.h> | |
363 | /* #include "atomic.h" */ | |
364 | ||
365 | /* | |
366 | * Compiler magic. | |
367 | */ | |
368 | #define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER) | |
369 | #define container_of(ptr, type, member) ({ \ | |
370 | const typeof( ((type *)0)->member ) *__mptr = (ptr); \ | |
371 | (type *)( (char *)__mptr - offsetof(type,member) );}) | |
372 | #define barrier() __asm__ __volatile__("": : :"memory") | |
373 | ||
374 | /* | |
375 | * Default machine parameters. | |
376 | */ | |
377 | ||
378 | #ifndef CACHE_LINE_SIZE | |
379 | #define CACHE_LINE_SIZE 128 | |
380 | #endif /* #ifndef CACHE_LINE_SIZE */ | |
381 | ||
382 | /* | |
383 | * Exclusive locking primitives. | |
384 | */ | |
385 | ||
386 | typedef pthread_mutex_t spinlock_t; | |
387 | ||
388 | #define DEFINE_SPINLOCK(lock) spinlock_t lock = PTHREAD_MUTEX_INITIALIZER; | |
389 | #define __SPIN_LOCK_UNLOCKED(lockp) PTHREAD_MUTEX_INITIALIZER | |
390 | ||
391 | static void spin_lock_init(spinlock_t *sp) | |
392 | { | |
393 | if (pthread_mutex_init(sp, NULL) != 0) { | |
394 | perror("spin_lock_init:pthread_mutex_init"); | |
395 | exit(-1); | |
396 | } | |
397 | } | |
398 | ||
399 | static void spin_lock(spinlock_t *sp) | |
400 | { | |
401 | if (pthread_mutex_lock(sp) != 0) { | |
402 | perror("spin_lock:pthread_mutex_lock"); | |
403 | exit(-1); | |
404 | } | |
405 | } | |
406 | ||
407 | static int spin_trylock(spinlock_t *sp) | |
408 | { | |
409 | int retval; | |
410 | ||
411 | if ((retval = pthread_mutex_trylock(sp)) == 0) | |
412 | return 1; | |
413 | if (retval == EBUSY) | |
414 | return 0; | |
415 | perror("spin_trylock:pthread_mutex_trylock"); | |
416 | exit(-1); | |
417 | } | |
418 | ||
419 | static void spin_unlock(spinlock_t *sp) | |
420 | { | |
421 | if (pthread_mutex_unlock(sp) != 0) { | |
422 | perror("spin_unlock:pthread_mutex_unlock"); | |
423 | exit(-1); | |
424 | } | |
425 | } | |
426 | ||
427 | #define spin_lock_irqsave(l, f) do { f = 1; spin_lock(l); } while (0) | |
428 | #define spin_unlock_irqrestore(l, f) do { f = 0; spin_unlock(l); } while (0) | |
429 | ||
430 | #define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) | |
431 | #define unlikely(x) x | |
432 | #define likely(x) x | |
433 | #define prefetch(x) x | |
434 | ||
435 | /* | |
436 | * Thread creation/destruction primitives. | |
437 | */ | |
438 | ||
439 | typedef pthread_t thread_id_t; | |
440 | ||
441 | #define NR_THREADS 128 | |
442 | ||
443 | #define __THREAD_ID_MAP_EMPTY 0 | |
444 | #define __THREAD_ID_MAP_WAITING 1 | |
445 | thread_id_t __thread_id_map[NR_THREADS]; | |
446 | spinlock_t __thread_id_map_mutex; | |
447 | ||
448 | #define for_each_thread(t) \ | |
449 | for (t = 0; t < NR_THREADS; t++) | |
450 | ||
451 | #define for_each_running_thread(t) \ | |
452 | for (t = 0; t < NR_THREADS; t++) \ | |
453 | if ((__thread_id_map[t] != __THREAD_ID_MAP_EMPTY) && \ | |
454 | (__thread_id_map[t] != __THREAD_ID_MAP_WAITING)) | |
455 | ||
456 | pthread_key_t thread_id_key; | |
457 | ||
458 | static int __smp_thread_id(void) | |
459 | { | |
460 | int i; | |
461 | thread_id_t tid = pthread_self(); | |
462 | ||
463 | for (i = 0; i < NR_THREADS; i++) { | |
464 | if (__thread_id_map[i] == tid) { | |
465 | long v = i + 1; /* must be non-NULL. */ | |
466 | ||
467 | if (pthread_setspecific(thread_id_key, (void *)v) != 0) { | |
468 | perror("pthread_setspecific"); | |
469 | exit(-1); | |
470 | } | |
471 | return i; | |
472 | } | |
473 | } | |
474 | spin_lock(&__thread_id_map_mutex); | |
475 | for (i = 0; i < NR_THREADS; i++) { | |
476 | if (__thread_id_map[i] == tid) | |
477 | spin_unlock(&__thread_id_map_mutex); | |
478 | return i; | |
479 | } | |
480 | spin_unlock(&__thread_id_map_mutex); | |
481 | fprintf(stderr, "smp_thread_id: Rogue thread, id: %d(%#x)\n", tid, tid); | |
482 | exit(-1); | |
483 | } | |
484 | ||
485 | static int smp_thread_id(void) | |
486 | { | |
487 | void *id; | |
488 | ||
489 | id = pthread_getspecific(thread_id_key); | |
490 | if (id == NULL) | |
491 | return __smp_thread_id(); | |
492 | return (long)(id - 1); | |
493 | } | |
494 | ||
495 | static thread_id_t create_thread(void *(*func)(void *), void *arg) | |
496 | { | |
497 | thread_id_t tid; | |
498 | int i; | |
499 | ||
500 | spin_lock(&__thread_id_map_mutex); | |
501 | for (i = 0; i < NR_THREADS; i++) { | |
502 | if (__thread_id_map[i] == __THREAD_ID_MAP_EMPTY) | |
503 | break; | |
504 | } | |
505 | if (i >= NR_THREADS) { | |
506 | spin_unlock(&__thread_id_map_mutex); | |
507 | fprintf(stderr, "Thread limit of %d exceeded!\n", NR_THREADS); | |
508 | exit(-1); | |
509 | } | |
510 | __thread_id_map[i] = __THREAD_ID_MAP_WAITING; | |
511 | spin_unlock(&__thread_id_map_mutex); | |
512 | if (pthread_create(&tid, NULL, func, arg) != 0) { | |
513 | perror("create_thread:pthread_create"); | |
514 | exit(-1); | |
515 | } | |
516 | __thread_id_map[i] = tid; | |
517 | return tid; | |
518 | } | |
519 | ||
520 | static void *wait_thread(thread_id_t tid) | |
521 | { | |
522 | int i; | |
523 | void *vp; | |
524 | ||
525 | for (i = 0; i < NR_THREADS; i++) { | |
526 | if (__thread_id_map[i] == tid) | |
527 | break; | |
528 | } | |
529 | if (i >= NR_THREADS){ | |
530 | fprintf(stderr, "wait_thread: bad tid = %d(%#x)\n", tid, tid); | |
531 | exit(-1); | |
532 | } | |
533 | if (pthread_join(tid, &vp) != 0) { | |
534 | perror("wait_thread:pthread_join"); | |
535 | exit(-1); | |
536 | } | |
537 | __thread_id_map[i] = __THREAD_ID_MAP_EMPTY; | |
538 | return vp; | |
539 | } | |
540 | ||
541 | static void wait_all_threads(void) | |
542 | { | |
543 | int i; | |
544 | thread_id_t tid; | |
545 | ||
546 | for (i = 1; i < NR_THREADS; i++) { | |
547 | tid = __thread_id_map[i]; | |
548 | if (tid != __THREAD_ID_MAP_EMPTY && | |
549 | tid != __THREAD_ID_MAP_WAITING) | |
550 | (void)wait_thread(tid); | |
551 | } | |
552 | } | |
553 | ||
554 | static void run_on(int cpu) | |
555 | { | |
556 | cpu_set_t mask; | |
557 | ||
558 | CPU_ZERO(&mask); | |
559 | CPU_SET(cpu, &mask); | |
560 | sched_setaffinity(0, sizeof(mask), &mask); | |
561 | } | |
562 | ||
563 | /* | |
564 | * timekeeping -- very crude -- should use MONOTONIC... | |
565 | */ | |
566 | ||
567 | long long get_microseconds(void) | |
568 | { | |
569 | struct timeval tv; | |
570 | ||
571 | if (gettimeofday(&tv, NULL) != 0) | |
572 | abort(); | |
573 | return ((long long)tv.tv_sec) * 1000000LL + (long long)tv.tv_usec; | |
574 | } | |
575 | ||
576 | /* | |
577 | * Per-thread variables. | |
578 | */ | |
579 | ||
580 | #define DEFINE_PER_THREAD(type, name) \ | |
581 | struct { \ | |
582 | __typeof__(type) v \ | |
583 | __attribute__((__aligned__(CACHE_LINE_SIZE))); \ | |
584 | } __per_thread_##name[NR_THREADS]; | |
585 | #define DECLARE_PER_THREAD(type, name) extern DEFINE_PER_THREAD(type, name) | |
586 | ||
587 | #define per_thread(name, thread) __per_thread_##name[thread].v | |
588 | #define __get_thread_var(name) per_thread(name, smp_thread_id()) | |
589 | ||
590 | #define init_per_thread(name, v) \ | |
591 | do { \ | |
592 | int __i_p_t_i; \ | |
593 | for (__i_p_t_i = 0; __i_p_t_i < NR_THREADS; __i_p_t_i++) \ | |
594 | per_thread(name, __i_p_t_i) = v; \ | |
595 | } while (0) | |
596 | ||
597 | /* | |
598 | * CPU traversal primitives. | |
599 | */ | |
600 | ||
601 | #ifndef NR_CPUS | |
602 | #define NR_CPUS 16 | |
603 | #endif /* #ifndef NR_CPUS */ | |
604 | ||
605 | #define for_each_possible_cpu(cpu) \ | |
606 | for (cpu = 0; cpu < NR_CPUS; cpu++) | |
607 | #define for_each_online_cpu(cpu) \ | |
608 | for (cpu = 0; cpu < NR_CPUS; cpu++) | |
609 | ||
610 | /* | |
611 | * Per-CPU variables. | |
612 | */ | |
613 | ||
614 | #define DEFINE_PER_CPU(type, name) \ | |
615 | struct { \ | |
616 | __typeof__(type) v \ | |
617 | __attribute__((__aligned__(CACHE_LINE_SIZE))); \ | |
618 | } __per_cpu_##name[NR_CPUS] | |
619 | #define DECLARE_PER_CPU(type, name) extern DEFINE_PER_CPU(type, name) | |
620 | ||
621 | DEFINE_PER_THREAD(int, smp_processor_id); | |
622 | ||
623 | static int smp_processor_id(void) | |
624 | { | |
625 | return __get_thread_var(smp_processor_id); | |
626 | } | |
627 | ||
628 | static void set_smp_processor_id(int cpu) | |
629 | { | |
630 | __get_thread_var(smp_processor_id) = cpu; | |
631 | } | |
632 | ||
633 | #define per_cpu(name, thread) __per_cpu_##name[thread].v | |
634 | #define __get_cpu_var(name) per_cpu(name, smp_processor_id()) | |
635 | ||
636 | #define init_per_cpu(name, v) \ | |
637 | do { \ | |
638 | int __i_p_c_i; \ | |
639 | for (__i_p_c_i = 0; __i_p_c_i < NR_CPUS; __i_p_c_i++) \ | |
640 | per_cpu(name, __i_p_c_i) = v; \ | |
641 | } while (0) | |
642 | ||
643 | /* | |
644 | * CPU state checking (crowbarred). | |
645 | */ | |
646 | ||
647 | #define idle_cpu(cpu) 0 | |
648 | #define in_softirq() 1 | |
649 | #define hardirq_count() 0 | |
650 | #define PREEMPT_SHIFT 0 | |
651 | #define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS) | |
652 | #define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS) | |
653 | #define PREEMPT_BITS 8 | |
654 | #define SOFTIRQ_BITS 8 | |
655 | ||
656 | /* | |
657 | * CPU hotplug. | |
658 | */ | |
659 | ||
660 | struct notifier_block { | |
661 | int (*notifier_call)(struct notifier_block *, unsigned long, void *); | |
662 | struct notifier_block *next; | |
663 | int priority; | |
664 | }; | |
665 | ||
666 | #define CPU_ONLINE 0x0002 /* CPU (unsigned)v is up */ | |
667 | #define CPU_UP_PREPARE 0x0003 /* CPU (unsigned)v coming up */ | |
668 | #define CPU_UP_CANCELED 0x0004 /* CPU (unsigned)v NOT coming up */ | |
669 | #define CPU_DOWN_PREPARE 0x0005 /* CPU (unsigned)v going down */ | |
670 | #define CPU_DOWN_FAILED 0x0006 /* CPU (unsigned)v NOT going down */ | |
671 | #define CPU_DEAD 0x0007 /* CPU (unsigned)v dead */ | |
672 | #define CPU_DYING 0x0008 /* CPU (unsigned)v not running any task, | |
673 | * not handling interrupts, soon dead */ | |
674 | #define CPU_POST_DEAD 0x0009 /* CPU (unsigned)v dead, cpu_hotplug | |
675 | * lock is dropped */ | |
676 | ||
677 | /* Used for CPU hotplug events occuring while tasks are frozen due to a suspend | |
678 | * operation in progress | |
679 | */ | |
680 | #define CPU_TASKS_FROZEN 0x0010 | |
681 | ||
682 | #define CPU_ONLINE_FROZEN (CPU_ONLINE | CPU_TASKS_FROZEN) | |
683 | #define CPU_UP_PREPARE_FROZEN (CPU_UP_PREPARE | CPU_TASKS_FROZEN) | |
684 | #define CPU_UP_CANCELED_FROZEN (CPU_UP_CANCELED | CPU_TASKS_FROZEN) | |
685 | #define CPU_DOWN_PREPARE_FROZEN (CPU_DOWN_PREPARE | CPU_TASKS_FROZEN) | |
686 | #define CPU_DOWN_FAILED_FROZEN (CPU_DOWN_FAILED | CPU_TASKS_FROZEN) | |
687 | #define CPU_DEAD_FROZEN (CPU_DEAD | CPU_TASKS_FROZEN) | |
688 | #define CPU_DYING_FROZEN (CPU_DYING | CPU_TASKS_FROZEN) | |
689 | ||
690 | /* Hibernation and suspend events */ | |
691 | #define PM_HIBERNATION_PREPARE 0x0001 /* Going to hibernate */ | |
692 | #define PM_POST_HIBERNATION 0x0002 /* Hibernation finished */ | |
693 | #define PM_SUSPEND_PREPARE 0x0003 /* Going to suspend the system */ | |
694 | #define PM_POST_SUSPEND 0x0004 /* Suspend finished */ | |
695 | #define PM_RESTORE_PREPARE 0x0005 /* Going to restore a saved image */ | |
696 | #define PM_POST_RESTORE 0x0006 /* Restore failed */ | |
697 | ||
698 | #define NOTIFY_DONE 0x0000 /* Don't care */ | |
699 | #define NOTIFY_OK 0x0001 /* Suits me */ | |
700 | #define NOTIFY_STOP_MASK 0x8000 /* Don't call further */ | |
701 | #define NOTIFY_BAD (NOTIFY_STOP_MASK|0x0002) | |
702 | /* Bad/Veto action */ | |
703 | /* | |
704 | * Clean way to return from the notifier and stop further calls. | |
705 | */ | |
706 | #define NOTIFY_STOP (NOTIFY_OK|NOTIFY_STOP_MASK) | |
707 | ||
708 | /* | |
709 | * Bug checks. | |
710 | */ | |
711 | ||
712 | #define BUG_ON(c) do { if (!(c)) abort(); } while (0) | |
713 | ||
714 | /* | |
715 | * Initialization -- Must be called before calling any primitives. | |
716 | */ | |
717 | ||
718 | static void smp_init(void) | |
719 | { | |
720 | int i; | |
721 | ||
722 | spin_lock_init(&__thread_id_map_mutex); | |
723 | __thread_id_map[0] = pthread_self(); | |
724 | for (i = 1; i < NR_THREADS; i++) | |
725 | __thread_id_map[i] = __THREAD_ID_MAP_EMPTY; | |
726 | init_per_thread(smp_processor_id, 0); | |
727 | if (pthread_key_create(&thread_id_key, NULL) != 0) { | |
728 | perror("pthread_key_create"); | |
729 | exit(-1); | |
730 | } | |
731 | } | |
732 | ||
733 | /* Taken from the Linux kernel source tree, so GPLv2-only!!! */ | |
734 | ||
735 | #ifndef _LINUX_LIST_H | |
736 | #define _LINUX_LIST_H | |
737 | ||
738 | #define LIST_POISON1 ((void *) 0x00100100) | |
739 | #define LIST_POISON2 ((void *) 0x00200200) | |
740 | ||
741 | #define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER) | |
742 | #define container_of(ptr, type, member) ({ \ | |
743 | const typeof( ((type *)0)->member ) *__mptr = (ptr); \ | |
744 | (type *)( (char *)__mptr - offsetof(type,member) );}) | |
745 | ||
746 | /* | |
747 | * Simple doubly linked list implementation. | |
748 | * | |
749 | * Some of the internal functions ("__xxx") are useful when | |
750 | * manipulating whole lists rather than single entries, as | |
751 | * sometimes we already know the next/prev entries and we can | |
752 | * generate better code by using them directly rather than | |
753 | * using the generic single-entry routines. | |
754 | */ | |
755 | ||
756 | struct list_head { | |
757 | struct list_head *next, *prev; | |
758 | }; | |
759 | ||
760 | #define LIST_HEAD_INIT(name) { &(name), &(name) } | |
761 | ||
762 | #define LIST_HEAD(name) \ | |
763 | struct list_head name = LIST_HEAD_INIT(name) | |
764 | ||
765 | static inline void INIT_LIST_HEAD(struct list_head *list) | |
766 | { | |
767 | list->next = list; | |
768 | list->prev = list; | |
769 | } | |
770 | ||
771 | /* | |
772 | * Insert a new entry between two known consecutive entries. | |
773 | * | |
774 | * This is only for internal list manipulation where we know | |
775 | * the prev/next entries already! | |
776 | */ | |
777 | #ifndef CONFIG_DEBUG_LIST | |
778 | static inline void __list_add(struct list_head *new, | |
779 | struct list_head *prev, | |
780 | struct list_head *next) | |
781 | { | |
782 | next->prev = new; | |
783 | new->next = next; | |
784 | new->prev = prev; | |
785 | prev->next = new; | |
786 | } | |
787 | #else | |
788 | extern void __list_add(struct list_head *new, | |
789 | struct list_head *prev, | |
790 | struct list_head *next); | |
791 | #endif | |
792 | ||
793 | /** | |
794 | * list_add - add a new entry | |
795 | * @new: new entry to be added | |
796 | * @head: list head to add it after | |
797 | * | |
798 | * Insert a new entry after the specified head. | |
799 | * This is good for implementing stacks. | |
800 | */ | |
801 | static inline void list_add(struct list_head *new, struct list_head *head) | |
802 | { | |
803 | __list_add(new, head, head->next); | |
804 | } | |
805 | ||
806 | ||
807 | /** | |
808 | * list_add_tail - add a new entry | |
809 | * @new: new entry to be added | |
810 | * @head: list head to add it before | |
811 | * | |
812 | * Insert a new entry before the specified head. | |
813 | * This is useful for implementing queues. | |
814 | */ | |
815 | static inline void list_add_tail(struct list_head *new, struct list_head *head) | |
816 | { | |
817 | __list_add(new, head->prev, head); | |
818 | } | |
819 | ||
820 | /* | |
821 | * Delete a list entry by making the prev/next entries | |
822 | * point to each other. | |
823 | * | |
824 | * This is only for internal list manipulation where we know | |
825 | * the prev/next entries already! | |
826 | */ | |
827 | static inline void __list_del(struct list_head * prev, struct list_head * next) | |
828 | { | |
829 | next->prev = prev; | |
830 | prev->next = next; | |
831 | } | |
832 | ||
833 | /** | |
834 | * list_del - deletes entry from list. | |
835 | * @entry: the element to delete from the list. | |
836 | * Note: list_empty() on entry does not return true after this, the entry is | |
837 | * in an undefined state. | |
838 | */ | |
839 | #ifndef CONFIG_DEBUG_LIST | |
840 | static inline void list_del(struct list_head *entry) | |
841 | { | |
842 | __list_del(entry->prev, entry->next); | |
843 | entry->next = LIST_POISON1; | |
844 | entry->prev = LIST_POISON2; | |
845 | } | |
846 | #else | |
847 | extern void list_del(struct list_head *entry); | |
848 | #endif | |
849 | ||
850 | /** | |
851 | * list_replace - replace old entry by new one | |
852 | * @old : the element to be replaced | |
853 | * @new : the new element to insert | |
854 | * | |
855 | * If @old was empty, it will be overwritten. | |
856 | */ | |
857 | static inline void list_replace(struct list_head *old, | |
858 | struct list_head *new) | |
859 | { | |
860 | new->next = old->next; | |
861 | new->next->prev = new; | |
862 | new->prev = old->prev; | |
863 | new->prev->next = new; | |
864 | } | |
865 | ||
866 | static inline void list_replace_init(struct list_head *old, | |
867 | struct list_head *new) | |
868 | { | |
869 | list_replace(old, new); | |
870 | INIT_LIST_HEAD(old); | |
871 | } | |
872 | ||
873 | /** | |
874 | * list_del_init - deletes entry from list and reinitialize it. | |
875 | * @entry: the element to delete from the list. | |
876 | */ | |
877 | static inline void list_del_init(struct list_head *entry) | |
878 | { | |
879 | __list_del(entry->prev, entry->next); | |
880 | INIT_LIST_HEAD(entry); | |
881 | } | |
882 | ||
883 | /** | |
884 | * list_move - delete from one list and add as another's head | |
885 | * @list: the entry to move | |
886 | * @head: the head that will precede our entry | |
887 | */ | |
888 | static inline void list_move(struct list_head *list, struct list_head *head) | |
889 | { | |
890 | __list_del(list->prev, list->next); | |
891 | list_add(list, head); | |
892 | } | |
893 | ||
894 | /** | |
895 | * list_move_tail - delete from one list and add as another's tail | |
896 | * @list: the entry to move | |
897 | * @head: the head that will follow our entry | |
898 | */ | |
899 | static inline void list_move_tail(struct list_head *list, | |
900 | struct list_head *head) | |
901 | { | |
902 | __list_del(list->prev, list->next); | |
903 | list_add_tail(list, head); | |
904 | } | |
905 | ||
906 | /** | |
907 | * list_is_last - tests whether @list is the last entry in list @head | |
908 | * @list: the entry to test | |
909 | * @head: the head of the list | |
910 | */ | |
911 | static inline int list_is_last(const struct list_head *list, | |
912 | const struct list_head *head) | |
913 | { | |
914 | return list->next == head; | |
915 | } | |
916 | ||
917 | /** | |
918 | * list_empty - tests whether a list is empty | |
919 | * @head: the list to test. | |
920 | */ | |
921 | static inline int list_empty(const struct list_head *head) | |
922 | { | |
923 | return head->next == head; | |
924 | } | |
925 | ||
926 | /** | |
927 | * list_empty_careful - tests whether a list is empty and not being modified | |
928 | * @head: the list to test | |
929 | * | |
930 | * Description: | |
931 | * tests whether a list is empty _and_ checks that no other CPU might be | |
932 | * in the process of modifying either member (next or prev) | |
933 | * | |
934 | * NOTE: using list_empty_careful() without synchronization | |
935 | * can only be safe if the only activity that can happen | |
936 | * to the list entry is list_del_init(). Eg. it cannot be used | |
937 | * if another CPU could re-list_add() it. | |
938 | */ | |
939 | static inline int list_empty_careful(const struct list_head *head) | |
940 | { | |
941 | struct list_head *next = head->next; | |
942 | return (next == head) && (next == head->prev); | |
943 | } | |
944 | ||
945 | /** | |
946 | * list_is_singular - tests whether a list has just one entry. | |
947 | * @head: the list to test. | |
948 | */ | |
949 | static inline int list_is_singular(const struct list_head *head) | |
950 | { | |
951 | return !list_empty(head) && (head->next == head->prev); | |
952 | } | |
953 | ||
954 | static inline void __list_cut_position(struct list_head *list, | |
955 | struct list_head *head, struct list_head *entry) | |
956 | { | |
957 | struct list_head *new_first = entry->next; | |
958 | list->next = head->next; | |
959 | list->next->prev = list; | |
960 | list->prev = entry; | |
961 | entry->next = list; | |
962 | head->next = new_first; | |
963 | new_first->prev = head; | |
964 | } | |
965 | ||
966 | /** | |
967 | * list_cut_position - cut a list into two | |
968 | * @list: a new list to add all removed entries | |
969 | * @head: a list with entries | |
970 | * @entry: an entry within head, could be the head itself | |
971 | * and if so we won't cut the list | |
972 | * | |
973 | * This helper moves the initial part of @head, up to and | |
974 | * including @entry, from @head to @list. You should | |
975 | * pass on @entry an element you know is on @head. @list | |
976 | * should be an empty list or a list you do not care about | |
977 | * losing its data. | |
978 | * | |
979 | */ | |
980 | static inline void list_cut_position(struct list_head *list, | |
981 | struct list_head *head, struct list_head *entry) | |
982 | { | |
983 | if (list_empty(head)) | |
984 | return; | |
985 | if (list_is_singular(head) && | |
986 | (head->next != entry && head != entry)) | |
987 | return; | |
988 | if (entry == head) | |
989 | INIT_LIST_HEAD(list); | |
990 | else | |
991 | __list_cut_position(list, head, entry); | |
992 | } | |
993 | ||
994 | static inline void __list_splice(const struct list_head *list, | |
995 | struct list_head *prev, | |
996 | struct list_head *next) | |
997 | { | |
998 | struct list_head *first = list->next; | |
999 | struct list_head *last = list->prev; | |
1000 | ||
1001 | first->prev = prev; | |
1002 | prev->next = first; | |
1003 | ||
1004 | last->next = next; | |
1005 | next->prev = last; | |
1006 | } | |
1007 | ||
1008 | /** | |
1009 | * list_splice - join two lists, this is designed for stacks | |
1010 | * @list: the new list to add. | |
1011 | * @head: the place to add it in the first list. | |
1012 | */ | |
1013 | static inline void list_splice(const struct list_head *list, | |
1014 | struct list_head *head) | |
1015 | { | |
1016 | if (!list_empty(list)) | |
1017 | __list_splice(list, head, head->next); | |
1018 | } | |
1019 | ||
1020 | /** | |
1021 | * list_splice_tail - join two lists, each list being a queue | |
1022 | * @list: the new list to add. | |
1023 | * @head: the place to add it in the first list. | |
1024 | */ | |
1025 | static inline void list_splice_tail(struct list_head *list, | |
1026 | struct list_head *head) | |
1027 | { | |
1028 | if (!list_empty(list)) | |
1029 | __list_splice(list, head->prev, head); | |
1030 | } | |
1031 | ||
1032 | /** | |
1033 | * list_splice_init - join two lists and reinitialise the emptied list. | |
1034 | * @list: the new list to add. | |
1035 | * @head: the place to add it in the first list. | |
1036 | * | |
1037 | * The list at @list is reinitialised | |
1038 | */ | |
1039 | static inline void list_splice_init(struct list_head *list, | |
1040 | struct list_head *head) | |
1041 | { | |
1042 | if (!list_empty(list)) { | |
1043 | __list_splice(list, head, head->next); | |
1044 | INIT_LIST_HEAD(list); | |
1045 | } | |
1046 | } | |
1047 | ||
1048 | /** | |
1049 | * list_splice_tail_init - join two lists and reinitialise the emptied list | |
1050 | * @list: the new list to add. | |
1051 | * @head: the place to add it in the first list. | |
1052 | * | |
1053 | * Each of the lists is a queue. | |
1054 | * The list at @list is reinitialised | |
1055 | */ | |
1056 | static inline void list_splice_tail_init(struct list_head *list, | |
1057 | struct list_head *head) | |
1058 | { | |
1059 | if (!list_empty(list)) { | |
1060 | __list_splice(list, head->prev, head); | |
1061 | INIT_LIST_HEAD(list); | |
1062 | } | |
1063 | } | |
1064 | ||
1065 | /** | |
1066 | * list_entry - get the struct for this entry | |
1067 | * @ptr: the &struct list_head pointer. | |
1068 | * @type: the type of the struct this is embedded in. | |
1069 | * @member: the name of the list_struct within the struct. | |
1070 | */ | |
1071 | #define list_entry(ptr, type, member) \ | |
1072 | container_of(ptr, type, member) | |
1073 | ||
1074 | /** | |
1075 | * list_first_entry - get the first element from a list | |
1076 | * @ptr: the list head to take the element from. | |
1077 | * @type: the type of the struct this is embedded in. | |
1078 | * @member: the name of the list_struct within the struct. | |
1079 | * | |
1080 | * Note, that list is expected to be not empty. | |
1081 | */ | |
1082 | #define list_first_entry(ptr, type, member) \ | |
1083 | list_entry((ptr)->next, type, member) | |
1084 | ||
1085 | /** | |
1086 | * list_for_each - iterate over a list | |
1087 | * @pos: the &struct list_head to use as a loop cursor. | |
1088 | * @head: the head for your list. | |
1089 | */ | |
1090 | #define list_for_each(pos, head) \ | |
1091 | for (pos = (head)->next; prefetch(pos->next), pos != (head); \ | |
1092 | pos = pos->next) | |
1093 | ||
1094 | /** | |
1095 | * __list_for_each - iterate over a list | |
1096 | * @pos: the &struct list_head to use as a loop cursor. | |
1097 | * @head: the head for your list. | |
1098 | * | |
1099 | * This variant differs from list_for_each() in that it's the | |
1100 | * simplest possible list iteration code, no prefetching is done. | |
1101 | * Use this for code that knows the list to be very short (empty | |
1102 | * or 1 entry) most of the time. | |
1103 | */ | |
1104 | #define __list_for_each(pos, head) \ | |
1105 | for (pos = (head)->next; pos != (head); pos = pos->next) | |
1106 | ||
1107 | /** | |
1108 | * list_for_each_prev - iterate over a list backwards | |
1109 | * @pos: the &struct list_head to use as a loop cursor. | |
1110 | * @head: the head for your list. | |
1111 | */ | |
1112 | #define list_for_each_prev(pos, head) \ | |
1113 | for (pos = (head)->prev; prefetch(pos->prev), pos != (head); \ | |
1114 | pos = pos->prev) | |
1115 | ||
1116 | /** | |
1117 | * list_for_each_safe - iterate over a list safe against removal of list entry | |
1118 | * @pos: the &struct list_head to use as a loop cursor. | |
1119 | * @n: another &struct list_head to use as temporary storage | |
1120 | * @head: the head for your list. | |
1121 | */ | |
1122 | #define list_for_each_safe(pos, n, head) \ | |
1123 | for (pos = (head)->next, n = pos->next; pos != (head); \ | |
1124 | pos = n, n = pos->next) | |
1125 | ||
1126 | /** | |
1127 | * list_for_each_prev_safe - iterate over a list backwards safe against removal of list entry | |
1128 | * @pos: the &struct list_head to use as a loop cursor. | |
1129 | * @n: another &struct list_head to use as temporary storage | |
1130 | * @head: the head for your list. | |
1131 | */ | |
1132 | #define list_for_each_prev_safe(pos, n, head) \ | |
1133 | for (pos = (head)->prev, n = pos->prev; \ | |
1134 | prefetch(pos->prev), pos != (head); \ | |
1135 | pos = n, n = pos->prev) | |
1136 | ||
1137 | /** | |
1138 | * list_for_each_entry - iterate over list of given type | |
1139 | * @pos: the type * to use as a loop cursor. | |
1140 | * @head: the head for your list. | |
1141 | * @member: the name of the list_struct within the struct. | |
1142 | */ | |
1143 | #define list_for_each_entry(pos, head, member) \ | |
1144 | for (pos = list_entry((head)->next, typeof(*pos), member); \ | |
1145 | prefetch(pos->member.next), &pos->member != (head); \ | |
1146 | pos = list_entry(pos->member.next, typeof(*pos), member)) | |
1147 | ||
1148 | /** | |
1149 | * list_for_each_entry_reverse - iterate backwards over list of given type. | |
1150 | * @pos: the type * to use as a loop cursor. | |
1151 | * @head: the head for your list. | |
1152 | * @member: the name of the list_struct within the struct. | |
1153 | */ | |
1154 | #define list_for_each_entry_reverse(pos, head, member) \ | |
1155 | for (pos = list_entry((head)->prev, typeof(*pos), member); \ | |
1156 | prefetch(pos->member.prev), &pos->member != (head); \ | |
1157 | pos = list_entry(pos->member.prev, typeof(*pos), member)) | |
1158 | ||
1159 | /** | |
1160 | * list_prepare_entry - prepare a pos entry for use in list_for_each_entry_continue() | |
1161 | * @pos: the type * to use as a start point | |
1162 | * @head: the head of the list | |
1163 | * @member: the name of the list_struct within the struct. | |
1164 | * | |
1165 | * Prepares a pos entry for use as a start point in list_for_each_entry_continue(). | |
1166 | */ | |
1167 | #define list_prepare_entry(pos, head, member) \ | |
1168 | ((pos) ? : list_entry(head, typeof(*pos), member)) | |
1169 | ||
1170 | /** | |
1171 | * list_for_each_entry_continue - continue iteration over list of given type | |
1172 | * @pos: the type * to use as a loop cursor. | |
1173 | * @head: the head for your list. | |
1174 | * @member: the name of the list_struct within the struct. | |
1175 | * | |
1176 | * Continue to iterate over list of given type, continuing after | |
1177 | * the current position. | |
1178 | */ | |
1179 | #define list_for_each_entry_continue(pos, head, member) \ | |
1180 | for (pos = list_entry(pos->member.next, typeof(*pos), member); \ | |
1181 | prefetch(pos->member.next), &pos->member != (head); \ | |
1182 | pos = list_entry(pos->member.next, typeof(*pos), member)) | |
1183 | ||
1184 | /** | |
1185 | * list_for_each_entry_continue_reverse - iterate backwards from the given point | |
1186 | * @pos: the type * to use as a loop cursor. | |
1187 | * @head: the head for your list. | |
1188 | * @member: the name of the list_struct within the struct. | |
1189 | * | |
1190 | * Start to iterate over list of given type backwards, continuing after | |
1191 | * the current position. | |
1192 | */ | |
1193 | #define list_for_each_entry_continue_reverse(pos, head, member) \ | |
1194 | for (pos = list_entry(pos->member.prev, typeof(*pos), member); \ | |
1195 | prefetch(pos->member.prev), &pos->member != (head); \ | |
1196 | pos = list_entry(pos->member.prev, typeof(*pos), member)) | |
1197 | ||
1198 | /** | |
1199 | * list_for_each_entry_from - iterate over list of given type from the current point | |
1200 | * @pos: the type * to use as a loop cursor. | |
1201 | * @head: the head for your list. | |
1202 | * @member: the name of the list_struct within the struct. | |
1203 | * | |
1204 | * Iterate over list of given type, continuing from current position. | |
1205 | */ | |
1206 | #define list_for_each_entry_from(pos, head, member) \ | |
1207 | for (; prefetch(pos->member.next), &pos->member != (head); \ | |
1208 | pos = list_entry(pos->member.next, typeof(*pos), member)) | |
1209 | ||
1210 | /** | |
1211 | * list_for_each_entry_safe - iterate over list of given type safe against removal of list entry | |
1212 | * @pos: the type * to use as a loop cursor. | |
1213 | * @n: another type * to use as temporary storage | |
1214 | * @head: the head for your list. | |
1215 | * @member: the name of the list_struct within the struct. | |
1216 | */ | |
1217 | #define list_for_each_entry_safe(pos, n, head, member) \ | |
1218 | for (pos = list_entry((head)->next, typeof(*pos), member), \ | |
1219 | n = list_entry(pos->member.next, typeof(*pos), member); \ | |
1220 | &pos->member != (head); \ | |
1221 | pos = n, n = list_entry(n->member.next, typeof(*n), member)) | |
1222 | ||
1223 | /** | |
1224 | * list_for_each_entry_safe_continue | |
1225 | * @pos: the type * to use as a loop cursor. | |
1226 | * @n: another type * to use as temporary storage | |
1227 | * @head: the head for your list. | |
1228 | * @member: the name of the list_struct within the struct. | |
1229 | * | |
1230 | * Iterate over list of given type, continuing after current point, | |
1231 | * safe against removal of list entry. | |
1232 | */ | |
1233 | #define list_for_each_entry_safe_continue(pos, n, head, member) \ | |
1234 | for (pos = list_entry(pos->member.next, typeof(*pos), member), \ | |
1235 | n = list_entry(pos->member.next, typeof(*pos), member); \ | |
1236 | &pos->member != (head); \ | |
1237 | pos = n, n = list_entry(n->member.next, typeof(*n), member)) | |
1238 | ||
1239 | /** | |
1240 | * list_for_each_entry_safe_from | |
1241 | * @pos: the type * to use as a loop cursor. | |
1242 | * @n: another type * to use as temporary storage | |
1243 | * @head: the head for your list. | |
1244 | * @member: the name of the list_struct within the struct. | |
1245 | * | |
1246 | * Iterate over list of given type from current point, safe against | |
1247 | * removal of list entry. | |
1248 | */ | |
1249 | #define list_for_each_entry_safe_from(pos, n, head, member) \ | |
1250 | for (n = list_entry(pos->member.next, typeof(*pos), member); \ | |
1251 | &pos->member != (head); \ | |
1252 | pos = n, n = list_entry(n->member.next, typeof(*n), member)) | |
1253 | ||
1254 | /** | |
1255 | * list_for_each_entry_safe_reverse | |
1256 | * @pos: the type * to use as a loop cursor. | |
1257 | * @n: another type * to use as temporary storage | |
1258 | * @head: the head for your list. | |
1259 | * @member: the name of the list_struct within the struct. | |
1260 | * | |
1261 | * Iterate backwards over list of given type, safe against removal | |
1262 | * of list entry. | |
1263 | */ | |
1264 | #define list_for_each_entry_safe_reverse(pos, n, head, member) \ | |
1265 | for (pos = list_entry((head)->prev, typeof(*pos), member), \ | |
1266 | n = list_entry(pos->member.prev, typeof(*pos), member); \ | |
1267 | &pos->member != (head); \ | |
1268 | pos = n, n = list_entry(n->member.prev, typeof(*n), member)) | |
1269 | ||
1270 | /* | |
1271 | * Double linked lists with a single pointer list head. | |
1272 | * Mostly useful for hash tables where the two pointer list head is | |
1273 | * too wasteful. | |
1274 | * You lose the ability to access the tail in O(1). | |
1275 | */ | |
1276 | ||
1277 | struct hlist_head { | |
1278 | struct hlist_node *first; | |
1279 | }; | |
1280 | ||
1281 | struct hlist_node { | |
1282 | struct hlist_node *next, **pprev; | |
1283 | }; | |
1284 | ||
1285 | #define HLIST_HEAD_INIT { .first = NULL } | |
1286 | #define HLIST_HEAD(name) struct hlist_head name = { .first = NULL } | |
1287 | #define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL) | |
1288 | static inline void INIT_HLIST_NODE(struct hlist_node *h) | |
1289 | { | |
1290 | h->next = NULL; | |
1291 | h->pprev = NULL; | |
1292 | } | |
1293 | ||
1294 | static inline int hlist_unhashed(const struct hlist_node *h) | |
1295 | { | |
1296 | return !h->pprev; | |
1297 | } | |
1298 | ||
1299 | static inline int hlist_empty(const struct hlist_head *h) | |
1300 | { | |
1301 | return !h->first; | |
1302 | } | |
1303 | ||
1304 | static inline void __hlist_del(struct hlist_node *n) | |
1305 | { | |
1306 | struct hlist_node *next = n->next; | |
1307 | struct hlist_node **pprev = n->pprev; | |
1308 | *pprev = next; | |
1309 | if (next) | |
1310 | next->pprev = pprev; | |
1311 | } | |
1312 | ||
1313 | static inline void hlist_del(struct hlist_node *n) | |
1314 | { | |
1315 | __hlist_del(n); | |
1316 | n->next = LIST_POISON1; | |
1317 | n->pprev = LIST_POISON2; | |
1318 | } | |
1319 | ||
1320 | static inline void hlist_del_init(struct hlist_node *n) | |
1321 | { | |
1322 | if (!hlist_unhashed(n)) { | |
1323 | __hlist_del(n); | |
1324 | INIT_HLIST_NODE(n); | |
1325 | } | |
1326 | } | |
1327 | ||
1328 | static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h) | |
1329 | { | |
1330 | struct hlist_node *first = h->first; | |
1331 | n->next = first; | |
1332 | if (first) | |
1333 | first->pprev = &n->next; | |
1334 | h->first = n; | |
1335 | n->pprev = &h->first; | |
1336 | } | |
1337 | ||
1338 | /* next must be != NULL */ | |
1339 | static inline void hlist_add_before(struct hlist_node *n, | |
1340 | struct hlist_node *next) | |
1341 | { | |
1342 | n->pprev = next->pprev; | |
1343 | n->next = next; | |
1344 | next->pprev = &n->next; | |
1345 | *(n->pprev) = n; | |
1346 | } | |
1347 | ||
1348 | static inline void hlist_add_after(struct hlist_node *n, | |
1349 | struct hlist_node *next) | |
1350 | { | |
1351 | next->next = n->next; | |
1352 | n->next = next; | |
1353 | next->pprev = &n->next; | |
1354 | ||
1355 | if(next->next) | |
1356 | next->next->pprev = &next->next; | |
1357 | } | |
1358 | ||
1359 | /* | |
1360 | * Move a list from one list head to another. Fixup the pprev | |
1361 | * reference of the first entry if it exists. | |
1362 | */ | |
1363 | static inline void hlist_move_list(struct hlist_head *old, | |
1364 | struct hlist_head *new) | |
1365 | { | |
1366 | new->first = old->first; | |
1367 | if (new->first) | |
1368 | new->first->pprev = &new->first; | |
1369 | old->first = NULL; | |
1370 | } | |
1371 | ||
1372 | #define hlist_entry(ptr, type, member) container_of(ptr,type,member) | |
1373 | ||
1374 | #define hlist_for_each(pos, head) \ | |
1375 | for (pos = (head)->first; pos && ({ prefetch(pos->next); 1; }); \ | |
1376 | pos = pos->next) | |
1377 | ||
1378 | #define hlist_for_each_safe(pos, n, head) \ | |
1379 | for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \ | |
1380 | pos = n) | |
1381 | ||
1382 | /** | |
1383 | * hlist_for_each_entry - iterate over list of given type | |
1384 | * @tpos: the type * to use as a loop cursor. | |
1385 | * @pos: the &struct hlist_node to use as a loop cursor. | |
1386 | * @head: the head for your list. | |
1387 | * @member: the name of the hlist_node within the struct. | |
1388 | */ | |
1389 | #define hlist_for_each_entry(tpos, pos, head, member) \ | |
1390 | for (pos = (head)->first; \ | |
1391 | pos && ({ prefetch(pos->next); 1;}) && \ | |
1392 | ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ | |
1393 | pos = pos->next) | |
1394 | ||
1395 | /** | |
1396 | * hlist_for_each_entry_continue - iterate over a hlist continuing after current point | |
1397 | * @tpos: the type * to use as a loop cursor. | |
1398 | * @pos: the &struct hlist_node to use as a loop cursor. | |
1399 | * @member: the name of the hlist_node within the struct. | |
1400 | */ | |
1401 | #define hlist_for_each_entry_continue(tpos, pos, member) \ | |
1402 | for (pos = (pos)->next; \ | |
1403 | pos && ({ prefetch(pos->next); 1;}) && \ | |
1404 | ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ | |
1405 | pos = pos->next) | |
1406 | ||
1407 | /** | |
1408 | * hlist_for_each_entry_from - iterate over a hlist continuing from current point | |
1409 | * @tpos: the type * to use as a loop cursor. | |
1410 | * @pos: the &struct hlist_node to use as a loop cursor. | |
1411 | * @member: the name of the hlist_node within the struct. | |
1412 | */ | |
1413 | #define hlist_for_each_entry_from(tpos, pos, member) \ | |
1414 | for (; pos && ({ prefetch(pos->next); 1;}) && \ | |
1415 | ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ | |
1416 | pos = pos->next) | |
1417 | ||
1418 | /** | |
1419 | * hlist_for_each_entry_safe - iterate over list of given type safe against removal of list entry | |
1420 | * @tpos: the type * to use as a loop cursor. | |
1421 | * @pos: the &struct hlist_node to use as a loop cursor. | |
1422 | * @n: another &struct hlist_node to use as temporary storage | |
1423 | * @head: the head for your list. | |
1424 | * @member: the name of the hlist_node within the struct. | |
1425 | */ | |
1426 | #define hlist_for_each_entry_safe(tpos, pos, n, head, member) \ | |
1427 | for (pos = (head)->first; \ | |
1428 | pos && ({ n = pos->next; 1; }) && \ | |
1429 | ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ | |
1430 | pos = n) | |
1431 | ||
1432 | #endif |