Commit | Line | Data |
---|---|---|
6d0ce021 PM |
1 | /* MECHANICALLY GENERATED, DO NOT EDIT!!! */ |
2 | ||
3 | #define _INCLUDE_API_H | |
4 | ||
5 | /* | |
6 | * common.h: Common Linux kernel-isms. | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify | |
9 | * it under the terms of the GNU General Public License as published by | |
10 | * the Free Software Foundation; but version 2 of the License only due | |
11 | * to code included from the Linux kernel. | |
12 | * | |
13 | * This program is distributed in the hope that it will be useful, | |
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
16 | * GNU General Public License for more details. | |
17 | * | |
18 | * You should have received a copy of the GNU General Public License | |
19 | * along with this program; if not, write to the Free Software | |
20 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
21 | * | |
22 | * Copyright (c) 2006 Paul E. McKenney, IBM. | |
23 | * | |
24 | * Much code taken from the Linux kernel. For such code, the option | |
25 | * to redistribute under later versions of GPL might not be available. | |
26 | */ | |
27 | ||
28 | #ifndef __always_inline | |
29 | #define __always_inline inline | |
30 | #endif | |
31 | ||
32 | #define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)])) | |
33 | #define BUILD_BUG_ON_ZERO(e) (sizeof(char[1 - 2 * !!(e)]) - 1) | |
34 | ||
35 | #ifdef __ASSEMBLY__ | |
36 | # define stringify_in_c(...) __VA_ARGS__ | |
37 | # define ASM_CONST(x) x | |
38 | #else | |
39 | /* This version of stringify will deal with commas... */ | |
40 | # define __stringify_in_c(...) #__VA_ARGS__ | |
41 | # define stringify_in_c(...) __stringify_in_c(__VA_ARGS__) " " | |
42 | # define __ASM_CONST(x) x##UL | |
43 | # define ASM_CONST(x) __ASM_CONST(x) | |
44 | #endif | |
45 | ||
46 | ||
47 | /* | |
48 | * arch-ppc64.h: Expose PowerPC atomic instructions. | |
49 | * | |
50 | * This program is free software; you can redistribute it and/or modify | |
51 | * it under the terms of the GNU General Public License as published by | |
52 | * the Free Software Foundation; but version 2 of the License only due | |
53 | * to code included from the Linux kernel. | |
54 | * | |
55 | * This program is distributed in the hope that it will be useful, | |
56 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
57 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
58 | * GNU General Public License for more details. | |
59 | * | |
60 | * You should have received a copy of the GNU General Public License | |
61 | * along with this program; if not, write to the Free Software | |
62 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
63 | * | |
64 | * Copyright (c) 2006 Paul E. McKenney, IBM. | |
65 | * | |
66 | * Much code taken from the Linux kernel. For such code, the option | |
67 | * to redistribute under later versions of GPL might not be available. | |
68 | */ | |
69 | ||
70 | /* | |
71 | * Machine parameters. | |
72 | */ | |
73 | ||
6d0ce021 PM |
74 | #define CONFIG_PPC64 |
75 | ||
76 | #define CACHE_LINE_SIZE 128 | |
77 | #define ____cacheline_internodealigned_in_smp \ | |
78 | __attribute__((__aligned__(1 << 7))) | |
79 | ||
80 | /* | |
81 | * Atomic data structure, initialization, and access. | |
82 | */ | |
83 | ||
84 | typedef struct { volatile int counter; } atomic_t; | |
85 | ||
86 | #define ATOMIC_INIT(i) { (i) } | |
87 | ||
88 | #define atomic_read(v) ((v)->counter) | |
89 | #define atomic_set(v, i) (((v)->counter) = (i)) | |
90 | ||
91 | /* | |
92 | * Atomic operations. | |
93 | */ | |
94 | ||
95 | #define LWSYNC lwsync | |
96 | #define PPC405_ERR77(ra,rb) | |
97 | #ifdef CONFIG_SMP | |
98 | # define LWSYNC_ON_SMP stringify_in_c(LWSYNC) "\n" | |
99 | # define ISYNC_ON_SMP "\n\tisync\n" | |
100 | #else | |
101 | # define LWSYNC_ON_SMP | |
102 | # define ISYNC_ON_SMP | |
103 | #endif | |
104 | ||
105 | ||
106 | /* | |
107 | * Atomic exchange | |
108 | * | |
109 | * Changes the memory location '*ptr' to be val and returns | |
110 | * the previous value stored there. | |
111 | */ | |
112 | static __always_inline unsigned long | |
113 | __xchg_u32(volatile void *p, unsigned long val) | |
114 | { | |
115 | unsigned long prev; | |
116 | ||
117 | __asm__ __volatile__( | |
118 | LWSYNC_ON_SMP | |
119 | "1: lwarx %0,0,%2 \n" | |
120 | PPC405_ERR77(0,%2) | |
121 | " stwcx. %3,0,%2 \n\ | |
122 | bne- 1b" | |
123 | ISYNC_ON_SMP | |
124 | : "=&r" (prev), "+m" (*(volatile unsigned int *)p) | |
125 | : "r" (p), "r" (val) | |
126 | : "cc", "memory"); | |
127 | ||
128 | return prev; | |
129 | } | |
130 | ||
131 | /* | |
132 | * Atomic exchange | |
133 | * | |
134 | * Changes the memory location '*ptr' to be val and returns | |
135 | * the previous value stored there. | |
136 | */ | |
137 | static __always_inline unsigned long | |
138 | __xchg_u32_local(volatile void *p, unsigned long val) | |
139 | { | |
140 | unsigned long prev; | |
141 | ||
142 | __asm__ __volatile__( | |
143 | "1: lwarx %0,0,%2 \n" | |
144 | PPC405_ERR77(0,%2) | |
145 | " stwcx. %3,0,%2 \n\ | |
146 | bne- 1b" | |
147 | : "=&r" (prev), "+m" (*(volatile unsigned int *)p) | |
148 | : "r" (p), "r" (val) | |
149 | : "cc", "memory"); | |
150 | ||
151 | return prev; | |
152 | } | |
153 | ||
154 | #ifdef CONFIG_PPC64 | |
155 | static __always_inline unsigned long | |
156 | __xchg_u64(volatile void *p, unsigned long val) | |
157 | { | |
158 | unsigned long prev; | |
159 | ||
160 | __asm__ __volatile__( | |
161 | LWSYNC_ON_SMP | |
162 | "1: ldarx %0,0,%2 \n" | |
163 | PPC405_ERR77(0,%2) | |
164 | " stdcx. %3,0,%2 \n\ | |
165 | bne- 1b" | |
166 | ISYNC_ON_SMP | |
167 | : "=&r" (prev), "+m" (*(volatile unsigned long *)p) | |
168 | : "r" (p), "r" (val) | |
169 | : "cc", "memory"); | |
170 | ||
171 | return prev; | |
172 | } | |
173 | ||
174 | static __always_inline unsigned long | |
175 | __xchg_u64_local(volatile void *p, unsigned long val) | |
176 | { | |
177 | unsigned long prev; | |
178 | ||
179 | __asm__ __volatile__( | |
180 | "1: ldarx %0,0,%2 \n" | |
181 | PPC405_ERR77(0,%2) | |
182 | " stdcx. %3,0,%2 \n\ | |
183 | bne- 1b" | |
184 | : "=&r" (prev), "+m" (*(volatile unsigned long *)p) | |
185 | : "r" (p), "r" (val) | |
186 | : "cc", "memory"); | |
187 | ||
188 | return prev; | |
189 | } | |
190 | #endif | |
191 | ||
192 | /* | |
193 | * This function doesn't exist, so you'll get a linker error | |
194 | * if something tries to do an invalid xchg(). | |
195 | */ | |
196 | extern void __xchg_called_with_bad_pointer(void); | |
197 | ||
198 | static __always_inline unsigned long | |
199 | __xchg(volatile void *ptr, unsigned long x, unsigned int size) | |
200 | { | |
201 | switch (size) { | |
202 | case 4: | |
203 | return __xchg_u32(ptr, x); | |
204 | #ifdef CONFIG_PPC64 | |
205 | case 8: | |
206 | return __xchg_u64(ptr, x); | |
207 | #endif | |
208 | } | |
209 | __xchg_called_with_bad_pointer(); | |
210 | return x; | |
211 | } | |
212 | ||
213 | static __always_inline unsigned long | |
214 | __xchg_local(volatile void *ptr, unsigned long x, unsigned int size) | |
215 | { | |
216 | switch (size) { | |
217 | case 4: | |
218 | return __xchg_u32_local(ptr, x); | |
219 | #ifdef CONFIG_PPC64 | |
220 | case 8: | |
221 | return __xchg_u64_local(ptr, x); | |
222 | #endif | |
223 | } | |
224 | __xchg_called_with_bad_pointer(); | |
225 | return x; | |
226 | } | |
227 | #define xchg(ptr,x) \ | |
228 | ({ \ | |
229 | __typeof__(*(ptr)) _x_ = (x); \ | |
230 | (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \ | |
231 | }) | |
232 | ||
233 | #define xchg_local(ptr,x) \ | |
234 | ({ \ | |
235 | __typeof__(*(ptr)) _x_ = (x); \ | |
236 | (__typeof__(*(ptr))) __xchg_local((ptr), \ | |
237 | (unsigned long)_x_, sizeof(*(ptr))); \ | |
238 | }) | |
239 | ||
240 | /* | |
241 | * Compare and exchange - if *p == old, set it to new, | |
242 | * and return the old value of *p. | |
243 | */ | |
244 | #define __HAVE_ARCH_CMPXCHG 1 | |
245 | ||
246 | static __always_inline unsigned long | |
247 | __cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new) | |
248 | { | |
249 | unsigned int prev; | |
250 | ||
251 | __asm__ __volatile__ ( | |
252 | LWSYNC_ON_SMP | |
253 | "1: lwarx %0,0,%2 # __cmpxchg_u32\n\ | |
254 | cmpw 0,%0,%3\n\ | |
255 | bne- 2f\n" | |
256 | PPC405_ERR77(0,%2) | |
257 | " stwcx. %4,0,%2\n\ | |
258 | bne- 1b" | |
259 | ISYNC_ON_SMP | |
260 | "\n\ | |
261 | 2:" | |
262 | : "=&r" (prev), "+m" (*p) | |
263 | : "r" (p), "r" (old), "r" (new) | |
264 | : "cc", "memory"); | |
265 | ||
266 | return prev; | |
267 | } | |
268 | ||
269 | static __always_inline unsigned long | |
270 | __cmpxchg_u32_local(volatile unsigned int *p, unsigned long old, | |
271 | unsigned long new) | |
272 | { | |
273 | unsigned int prev; | |
274 | ||
275 | __asm__ __volatile__ ( | |
276 | "1: lwarx %0,0,%2 # __cmpxchg_u32\n\ | |
277 | cmpw 0,%0,%3\n\ | |
278 | bne- 2f\n" | |
279 | PPC405_ERR77(0,%2) | |
280 | " stwcx. %4,0,%2\n\ | |
281 | bne- 1b" | |
282 | "\n\ | |
283 | 2:" | |
284 | : "=&r" (prev), "+m" (*p) | |
285 | : "r" (p), "r" (old), "r" (new) | |
286 | : "cc", "memory"); | |
287 | ||
288 | return prev; | |
289 | } | |
290 | ||
291 | #ifdef CONFIG_PPC64 | |
292 | static __always_inline unsigned long | |
293 | __cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new) | |
294 | { | |
295 | unsigned long prev; | |
296 | ||
297 | __asm__ __volatile__ ( | |
298 | LWSYNC_ON_SMP | |
299 | "1: ldarx %0,0,%2 # __cmpxchg_u64\n\ | |
300 | cmpd 0,%0,%3\n\ | |
301 | bne- 2f\n\ | |
302 | stdcx. %4,0,%2\n\ | |
303 | bne- 1b" | |
304 | ISYNC_ON_SMP | |
305 | "\n\ | |
306 | 2:" | |
307 | : "=&r" (prev), "+m" (*p) | |
308 | : "r" (p), "r" (old), "r" (new) | |
309 | : "cc", "memory"); | |
310 | ||
311 | return prev; | |
312 | } | |
313 | ||
314 | static __always_inline unsigned long | |
315 | __cmpxchg_u64_local(volatile unsigned long *p, unsigned long old, | |
316 | unsigned long new) | |
317 | { | |
318 | unsigned long prev; | |
319 | ||
320 | __asm__ __volatile__ ( | |
321 | "1: ldarx %0,0,%2 # __cmpxchg_u64\n\ | |
322 | cmpd 0,%0,%3\n\ | |
323 | bne- 2f\n\ | |
324 | stdcx. %4,0,%2\n\ | |
325 | bne- 1b" | |
326 | "\n\ | |
327 | 2:" | |
328 | : "=&r" (prev), "+m" (*p) | |
329 | : "r" (p), "r" (old), "r" (new) | |
330 | : "cc", "memory"); | |
331 | ||
332 | return prev; | |
333 | } | |
334 | #endif | |
335 | ||
336 | /* This function doesn't exist, so you'll get a linker error | |
337 | if something tries to do an invalid cmpxchg(). */ | |
338 | extern void __cmpxchg_called_with_bad_pointer(void); | |
339 | ||
340 | static __always_inline unsigned long | |
341 | __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, | |
342 | unsigned int size) | |
343 | { | |
344 | switch (size) { | |
345 | case 4: | |
346 | return __cmpxchg_u32(ptr, old, new); | |
347 | #ifdef CONFIG_PPC64 | |
348 | case 8: | |
349 | return __cmpxchg_u64(ptr, old, new); | |
350 | #endif | |
351 | } | |
352 | __cmpxchg_called_with_bad_pointer(); | |
353 | return old; | |
354 | } | |
355 | ||
356 | static __always_inline unsigned long | |
357 | __cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new, | |
358 | unsigned int size) | |
359 | { | |
360 | switch (size) { | |
361 | case 4: | |
362 | return __cmpxchg_u32_local(ptr, old, new); | |
363 | #ifdef CONFIG_PPC64 | |
364 | case 8: | |
365 | return __cmpxchg_u64_local(ptr, old, new); | |
366 | #endif | |
367 | } | |
368 | __cmpxchg_called_with_bad_pointer(); | |
369 | return old; | |
370 | } | |
371 | ||
372 | #define cmpxchg(ptr, o, n) \ | |
373 | ({ \ | |
374 | __typeof__(*(ptr)) _o_ = (o); \ | |
375 | __typeof__(*(ptr)) _n_ = (n); \ | |
376 | (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ | |
377 | (unsigned long)_n_, sizeof(*(ptr))); \ | |
378 | }) | |
379 | ||
380 | ||
381 | #define cmpxchg_local(ptr, o, n) \ | |
382 | ({ \ | |
383 | __typeof__(*(ptr)) _o_ = (o); \ | |
384 | __typeof__(*(ptr)) _n_ = (n); \ | |
385 | (__typeof__(*(ptr))) __cmpxchg_local((ptr), (unsigned long)_o_, \ | |
386 | (unsigned long)_n_, sizeof(*(ptr))); \ | |
387 | }) | |
388 | ||
389 | #ifdef CONFIG_PPC64 | |
390 | /* | |
391 | * We handle most unaligned accesses in hardware. On the other hand | |
392 | * unaligned DMA can be very expensive on some ppc64 IO chips (it does | |
393 | * powers of 2 writes until it reaches sufficient alignment). | |
394 | * | |
395 | * Based on this we disable the IP header alignment in network drivers. | |
396 | * We also modify NET_SKB_PAD to be a cacheline in size, thus maintaining | |
397 | * cacheline alignment of buffers. | |
398 | */ | |
399 | #define NET_IP_ALIGN 0 | |
400 | #define NET_SKB_PAD L1_CACHE_BYTES | |
401 | ||
402 | #define cmpxchg64(ptr, o, n) \ | |
403 | ({ \ | |
404 | BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ | |
405 | cmpxchg((ptr), (o), (n)); \ | |
406 | }) | |
407 | #define cmpxchg64_local(ptr, o, n) \ | |
408 | ({ \ | |
409 | BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ | |
410 | cmpxchg_local((ptr), (o), (n)); \ | |
411 | }) | |
412 | #endif | |
413 | ||
414 | #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n))) | |
415 | #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) | |
416 | ||
417 | /** | |
418 | * atomic_add - add integer to atomic variable | |
419 | * @i: integer value to add | |
420 | * @v: pointer of type atomic_t | |
421 | * | |
422 | * Atomically adds @a to @v. | |
423 | */ | |
424 | static __inline__ void atomic_add(int a, atomic_t *v) | |
425 | { | |
426 | int t; | |
427 | ||
428 | __asm__ __volatile__( | |
429 | "1: lwarx %0,0,%3 # atomic_add\n\ | |
430 | add %0,%2,%0 \n\ | |
431 | stwcx. %0,0,%3 \n\ | |
432 | bne- 1b" | |
433 | : "=&r" (t), "+m" (v->counter) | |
434 | : "r" (a), "r" (&v->counter) | |
435 | : "cc"); | |
436 | } | |
437 | ||
438 | /** | |
439 | * atomic_sub - subtract the atomic variable | |
440 | * @i: integer value to subtract | |
441 | * @v: pointer of type atomic_t | |
442 | * | |
443 | * Atomically subtracts @a from @v. | |
444 | */ | |
445 | static __inline__ void atomic_sub(int a, atomic_t *v) | |
446 | { | |
447 | int t; | |
448 | ||
449 | __asm__ __volatile__( | |
450 | "1: lwarx %0,0,%3 # atomic_sub \n\ | |
451 | subf %0,%2,%0 \n\ | |
452 | stwcx. %0,0,%3 \n\ | |
453 | bne- 1b" | |
454 | : "=&r" (t), "+m" (v->counter) | |
455 | : "r" (a), "r" (&v->counter) | |
456 | : "cc"); | |
457 | } | |
458 | ||
459 | static __inline__ atomic_sub_return(int a, atomic_t *v) | |
460 | { | |
461 | int t; | |
462 | ||
463 | __asm__ __volatile__( | |
464 | "lwsync\n\ | |
465 | 1: lwarx %0,0,%2 # atomic_sub_return\n\ | |
466 | subf %0,%1,%0\n\ | |
467 | stwcx. %0,0,%2 \n\ | |
468 | bne- 1b \n\ | |
469 | isync" | |
470 | : "=&r" (t) | |
471 | : "r" (a), "r" (&v->counter) | |
472 | : "cc", "memory"); | |
473 | ||
474 | return t; | |
475 | } | |
476 | ||
477 | /** | |
478 | * atomic_sub_and_test - subtract value from variable and test result | |
479 | * @i: integer value to subtract | |
480 | * @v: pointer of type atomic_t | |
481 | * | |
482 | * Atomically subtracts @i from @v and returns | |
483 | * true if the result is zero, or false for all | |
484 | * other cases. | |
485 | */ | |
486 | static __inline__ int atomic_sub_and_test(int a, atomic_t *v) | |
487 | { | |
488 | return atomic_sub_return(a, v) == 0; | |
489 | } | |
490 | ||
491 | /** | |
492 | * atomic_inc - increment atomic variable | |
493 | * @v: pointer of type atomic_t | |
494 | * | |
495 | * Atomically increments @v by 1. | |
496 | */ | |
497 | static __inline__ void atomic_inc(atomic_t *v) | |
498 | { | |
499 | atomic_add(1, v); | |
500 | } | |
501 | ||
502 | /** | |
503 | * atomic_dec - decrement atomic variable | |
504 | * @v: pointer of type atomic_t | |
505 | * | |
506 | * Atomically decrements @v by 1. | |
507 | */ | |
508 | static __inline__ void atomic_dec(atomic_t *v) | |
509 | { | |
510 | atomic_sub(1, v); | |
511 | } | |
512 | ||
513 | /** | |
514 | * atomic_dec_and_test - decrement and test | |
515 | * @v: pointer of type atomic_t | |
516 | * | |
517 | * Atomically decrements @v by 1 and | |
518 | * returns true if the result is 0, or false for all other | |
519 | * cases. | |
520 | */ | |
521 | static __inline__ int atomic_dec_and_test(atomic_t *v) | |
522 | { | |
523 | return atomic_sub_and_test(1, v); | |
524 | } | |
525 | ||
526 | /** | |
527 | * atomic_inc_and_test - increment and test | |
528 | * @v: pointer of type atomic_t | |
529 | * | |
530 | * Atomically increments @v by 1 | |
531 | * and returns true if the result is zero, or false for all | |
532 | * other cases. | |
533 | */ | |
534 | static __inline__ int atomic_inc_and_test(atomic_t *v) | |
535 | { | |
536 | return atomic_inc_return(v); | |
537 | } | |
538 | ||
539 | /** | |
540 | * atomic_add_return - add and return | |
541 | * @v: pointer of type atomic_t | |
542 | * @i: integer value to add | |
543 | * | |
544 | * Atomically adds @i to @v and returns @i + @v | |
545 | */ | |
546 | static __inline__ int atomic_add_return(int a, atomic_t *v) | |
547 | { | |
548 | int t; | |
549 | ||
550 | __asm__ __volatile__( | |
551 | "lwsync \n\ | |
552 | 1: lwarx %0,0,%2 # atomic_add_return \n\ | |
553 | add %0,%1,%0 \n\ | |
554 | stwcx. %0,0,%2 \n\ | |
555 | bne- 1b \n\ | |
556 | isync" | |
557 | : "=&r" (t) | |
558 | : "r" (a), "r" (&v->counter) | |
559 | : "cc", "memory"); | |
560 | ||
561 | return t; | |
562 | } | |
563 | ||
564 | /** | |
565 | * atomic_add_negative - add and test if negative | |
566 | * @v: pointer of type atomic_t | |
567 | * @i: integer value to add | |
568 | * | |
569 | * Atomically adds @i to @v and returns true | |
570 | * if the result is negative, or false when | |
571 | * result is greater than or equal to zero. | |
572 | */ | |
573 | static __inline__ int atomic_add_negative(int a, atomic_t *v) | |
574 | { | |
575 | return atomic_add_return(a, v) < 0; | |
576 | } | |
577 | ||
578 | /** | |
579 | * atomic_add_unless - add unless the number is a given value | |
580 | * @v: pointer of type atomic_t | |
581 | * @a: the amount to add to v... | |
582 | * @u: ...unless v is equal to u. | |
583 | * | |
584 | * Atomically adds @a to @v, so long as it was not @u. | |
585 | * Returns non-zero if @v was not @u, and zero otherwise. | |
586 | */ | |
587 | static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) | |
588 | { | |
589 | int t; | |
590 | ||
591 | __asm__ __volatile__( | |
592 | "lwsync \n\ | |
593 | 1: lwarx %0,0,%1 # atomic_add_unless\n\ | |
594 | cmpd 0,%0,%3 \n\ | |
595 | beq- 2f \n\ | |
596 | add %0,%2,%0 \n\ | |
597 | stwcx. %0,0,%1 \n\ | |
598 | bne- 1b \n\ | |
599 | isync \n\ | |
600 | subf %0,%2,%0 \n\ | |
601 | 2:" | |
602 | : "=&r" (t) | |
603 | : "r" (&v->counter), "r" (a), "r" (u) | |
604 | : "cc", "memory"); | |
605 | ||
606 | return t != u; | |
607 | } | |
608 | ||
609 | #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) | |
610 | ||
611 | #define atomic_inc_return(v) (atomic_add_return(1,v)) | |
612 | #define atomic_dec_return(v) (atomic_sub_return(1,v)) | |
613 | ||
614 | /* Atomic operations are already serializing on x86 */ | |
615 | #define smp_mb__before_atomic_dec() smp_mb() | |
616 | #define smp_mb__after_atomic_dec() smp_mb() | |
617 | #define smp_mb__before_atomic_inc() smp_mb() | |
618 | #define smp_mb__after_atomic_inc() smp_mb() | |
619 | ||
6d0ce021 PM |
620 | /* |
621 | * api_pthreads.h: API mapping to pthreads environment. | |
622 | * | |
623 | * This program is free software; you can redistribute it and/or modify | |
624 | * it under the terms of the GNU General Public License as published by | |
625 | * the Free Software Foundation; either version 2 of the License, or | |
626 | * (at your option) any later version. However, please note that much | |
627 | * of the code in this file derives from the Linux kernel, and that such | |
628 | * code may not be available except under GPLv2. | |
629 | * | |
630 | * This program is distributed in the hope that it will be useful, | |
631 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
632 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
633 | * GNU General Public License for more details. | |
634 | * | |
635 | * You should have received a copy of the GNU General Public License | |
636 | * along with this program; if not, write to the Free Software | |
637 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
638 | * | |
639 | * Copyright (c) 2006 Paul E. McKenney, IBM. | |
640 | */ | |
641 | ||
642 | #include <stdio.h> | |
643 | #include <stdlib.h> | |
644 | #include <errno.h> | |
645 | #include <limits.h> | |
646 | #include <sys/types.h> | |
647 | #define __USE_GNU | |
648 | #include <pthread.h> | |
649 | #include <sched.h> | |
650 | #include <sys/param.h> | |
651 | /* #include "atomic.h" */ | |
652 | ||
653 | /* | |
654 | * Compiler magic. | |
655 | */ | |
656 | #define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER) | |
657 | #define container_of(ptr, type, member) ({ \ | |
658 | const typeof( ((type *)0)->member ) *__mptr = (ptr); \ | |
659 | (type *)( (char *)__mptr - offsetof(type,member) );}) | |
6d0ce021 PM |
660 | |
661 | /* | |
662 | * Default machine parameters. | |
663 | */ | |
664 | ||
665 | #ifndef CACHE_LINE_SIZE | |
666 | #define CACHE_LINE_SIZE 128 | |
667 | #endif /* #ifndef CACHE_LINE_SIZE */ | |
668 | ||
669 | /* | |
670 | * Exclusive locking primitives. | |
671 | */ | |
672 | ||
673 | typedef pthread_mutex_t spinlock_t; | |
674 | ||
675 | #define DEFINE_SPINLOCK(lock) spinlock_t lock = PTHREAD_MUTEX_INITIALIZER; | |
676 | #define __SPIN_LOCK_UNLOCKED(lockp) PTHREAD_MUTEX_INITIALIZER | |
677 | ||
678 | static void spin_lock_init(spinlock_t *sp) | |
679 | { | |
680 | if (pthread_mutex_init(sp, NULL) != 0) { | |
681 | perror("spin_lock_init:pthread_mutex_init"); | |
682 | exit(-1); | |
683 | } | |
684 | } | |
685 | ||
686 | static void spin_lock(spinlock_t *sp) | |
687 | { | |
688 | if (pthread_mutex_lock(sp) != 0) { | |
689 | perror("spin_lock:pthread_mutex_lock"); | |
690 | exit(-1); | |
691 | } | |
692 | } | |
693 | ||
6d0ce021 PM |
694 | static void spin_unlock(spinlock_t *sp) |
695 | { | |
696 | if (pthread_mutex_unlock(sp) != 0) { | |
697 | perror("spin_unlock:pthread_mutex_unlock"); | |
698 | exit(-1); | |
699 | } | |
700 | } | |
701 | ||
702 | #define spin_lock_irqsave(l, f) do { f = 1; spin_lock(l); } while (0) | |
703 | #define spin_unlock_irqrestore(l, f) do { f = 0; spin_unlock(l); } while (0) | |
704 | ||
6d0ce021 PM |
705 | /* |
706 | * Thread creation/destruction primitives. | |
707 | */ | |
708 | ||
709 | typedef pthread_t thread_id_t; | |
710 | ||
711 | #define NR_THREADS 128 | |
712 | ||
713 | #define __THREAD_ID_MAP_EMPTY 0 | |
714 | #define __THREAD_ID_MAP_WAITING 1 | |
715 | thread_id_t __thread_id_map[NR_THREADS]; | |
716 | spinlock_t __thread_id_map_mutex; | |
717 | ||
718 | #define for_each_thread(t) \ | |
719 | for (t = 0; t < NR_THREADS; t++) | |
720 | ||
721 | #define for_each_running_thread(t) \ | |
722 | for (t = 0; t < NR_THREADS; t++) \ | |
723 | if ((__thread_id_map[t] != __THREAD_ID_MAP_EMPTY) && \ | |
724 | (__thread_id_map[t] != __THREAD_ID_MAP_WAITING)) | |
725 | ||
726 | #define for_each_tid(t, tid) \ | |
727 | for (t = 0; t < NR_THREADS; t++) \ | |
728 | if ((((tid) = __thread_id_map[t]) != __THREAD_ID_MAP_EMPTY) && \ | |
729 | ((tid) != __THREAD_ID_MAP_WAITING)) | |
730 | ||
731 | pthread_key_t thread_id_key; | |
732 | ||
733 | static int __smp_thread_id(void) | |
734 | { | |
735 | int i; | |
736 | thread_id_t tid = pthread_self(); | |
737 | ||
738 | for (i = 0; i < NR_THREADS; i++) { | |
739 | if (__thread_id_map[i] == tid) { | |
740 | long v = i + 1; /* must be non-NULL. */ | |
741 | ||
742 | if (pthread_setspecific(thread_id_key, (void *)v) != 0) { | |
743 | perror("pthread_setspecific"); | |
744 | exit(-1); | |
745 | } | |
746 | return i; | |
747 | } | |
748 | } | |
749 | spin_lock(&__thread_id_map_mutex); | |
750 | for (i = 0; i < NR_THREADS; i++) { | |
751 | if (__thread_id_map[i] == tid) | |
752 | spin_unlock(&__thread_id_map_mutex); | |
753 | return i; | |
754 | } | |
755 | spin_unlock(&__thread_id_map_mutex); | |
0578089f PM |
756 | fprintf(stderr, "smp_thread_id: Rogue thread, id: %d(%#x)\n", |
757 | (int)tid, (int)tid); | |
6d0ce021 PM |
758 | exit(-1); |
759 | } | |
760 | ||
761 | static int smp_thread_id(void) | |
762 | { | |
763 | void *id; | |
764 | ||
765 | id = pthread_getspecific(thread_id_key); | |
766 | if (id == NULL) | |
767 | return __smp_thread_id(); | |
768 | return (long)(id - 1); | |
769 | } | |
770 | ||
771 | static thread_id_t create_thread(void *(*func)(void *), void *arg) | |
772 | { | |
773 | thread_id_t tid; | |
774 | int i; | |
775 | ||
776 | spin_lock(&__thread_id_map_mutex); | |
777 | for (i = 0; i < NR_THREADS; i++) { | |
778 | if (__thread_id_map[i] == __THREAD_ID_MAP_EMPTY) | |
779 | break; | |
780 | } | |
781 | if (i >= NR_THREADS) { | |
782 | spin_unlock(&__thread_id_map_mutex); | |
783 | fprintf(stderr, "Thread limit of %d exceeded!\n", NR_THREADS); | |
784 | exit(-1); | |
785 | } | |
786 | __thread_id_map[i] = __THREAD_ID_MAP_WAITING; | |
787 | spin_unlock(&__thread_id_map_mutex); | |
788 | if (pthread_create(&tid, NULL, func, arg) != 0) { | |
789 | perror("create_thread:pthread_create"); | |
790 | exit(-1); | |
791 | } | |
792 | __thread_id_map[i] = tid; | |
793 | return tid; | |
794 | } | |
795 | ||
796 | static void *wait_thread(thread_id_t tid) | |
797 | { | |
798 | int i; | |
799 | void *vp; | |
800 | ||
801 | for (i = 0; i < NR_THREADS; i++) { | |
802 | if (__thread_id_map[i] == tid) | |
803 | break; | |
804 | } | |
805 | if (i >= NR_THREADS){ | |
0578089f PM |
806 | fprintf(stderr, "wait_thread: bad tid = %d(%#x)\n", |
807 | (int)tid, (int)tid); | |
6d0ce021 PM |
808 | exit(-1); |
809 | } | |
810 | if (pthread_join(tid, &vp) != 0) { | |
811 | perror("wait_thread:pthread_join"); | |
812 | exit(-1); | |
813 | } | |
814 | __thread_id_map[i] = __THREAD_ID_MAP_EMPTY; | |
815 | return vp; | |
816 | } | |
817 | ||
818 | static void wait_all_threads(void) | |
819 | { | |
820 | int i; | |
821 | thread_id_t tid; | |
822 | ||
823 | for (i = 1; i < NR_THREADS; i++) { | |
824 | tid = __thread_id_map[i]; | |
825 | if (tid != __THREAD_ID_MAP_EMPTY && | |
826 | tid != __THREAD_ID_MAP_WAITING) | |
827 | (void)wait_thread(tid); | |
828 | } | |
829 | } | |
830 | ||
831 | static void run_on(int cpu) | |
832 | { | |
833 | cpu_set_t mask; | |
834 | ||
835 | CPU_ZERO(&mask); | |
836 | CPU_SET(cpu, &mask); | |
837 | sched_setaffinity(0, sizeof(mask), &mask); | |
838 | } | |
839 | ||
840 | /* | |
841 | * timekeeping -- very crude -- should use MONOTONIC... | |
842 | */ | |
843 | ||
844 | long long get_microseconds(void) | |
845 | { | |
846 | struct timeval tv; | |
847 | ||
848 | if (gettimeofday(&tv, NULL) != 0) | |
849 | abort(); | |
850 | return ((long long)tv.tv_sec) * 1000000LL + (long long)tv.tv_usec; | |
851 | } | |
852 | ||
853 | /* | |
854 | * Per-thread variables. | |
855 | */ | |
856 | ||
857 | #define DEFINE_PER_THREAD(type, name) \ | |
858 | struct { \ | |
859 | __typeof__(type) v \ | |
860 | __attribute__((__aligned__(CACHE_LINE_SIZE))); \ | |
861 | } __per_thread_##name[NR_THREADS]; | |
862 | #define DECLARE_PER_THREAD(type, name) extern DEFINE_PER_THREAD(type, name) | |
863 | ||
864 | #define per_thread(name, thread) __per_thread_##name[thread].v | |
865 | #define __get_thread_var(name) per_thread(name, smp_thread_id()) | |
866 | ||
867 | #define init_per_thread(name, v) \ | |
868 | do { \ | |
869 | int __i_p_t_i; \ | |
870 | for (__i_p_t_i = 0; __i_p_t_i < NR_THREADS; __i_p_t_i++) \ | |
871 | per_thread(name, __i_p_t_i) = v; \ | |
872 | } while (0) | |
873 | ||
874 | /* | |
875 | * CPU traversal primitives. | |
876 | */ | |
877 | ||
878 | #ifndef NR_CPUS | |
879 | #define NR_CPUS 16 | |
880 | #endif /* #ifndef NR_CPUS */ | |
881 | ||
882 | #define for_each_possible_cpu(cpu) \ | |
883 | for (cpu = 0; cpu < NR_CPUS; cpu++) | |
884 | #define for_each_online_cpu(cpu) \ | |
885 | for (cpu = 0; cpu < NR_CPUS; cpu++) | |
886 | ||
887 | /* | |
888 | * Per-CPU variables. | |
889 | */ | |
890 | ||
891 | #define DEFINE_PER_CPU(type, name) \ | |
892 | struct { \ | |
893 | __typeof__(type) v \ | |
894 | __attribute__((__aligned__(CACHE_LINE_SIZE))); \ | |
895 | } __per_cpu_##name[NR_CPUS] | |
896 | #define DECLARE_PER_CPU(type, name) extern DEFINE_PER_CPU(type, name) | |
897 | ||
898 | DEFINE_PER_THREAD(int, smp_processor_id); | |
899 | ||
6d0ce021 PM |
900 | #define per_cpu(name, thread) __per_cpu_##name[thread].v |
901 | #define __get_cpu_var(name) per_cpu(name, smp_processor_id()) | |
902 | ||
903 | #define init_per_cpu(name, v) \ | |
904 | do { \ | |
905 | int __i_p_c_i; \ | |
906 | for (__i_p_c_i = 0; __i_p_c_i < NR_CPUS; __i_p_c_i++) \ | |
907 | per_cpu(name, __i_p_c_i) = v; \ | |
908 | } while (0) | |
909 | ||
910 | /* | |
911 | * CPU state checking (crowbarred). | |
912 | */ | |
913 | ||
914 | #define idle_cpu(cpu) 0 | |
915 | #define in_softirq() 1 | |
916 | #define hardirq_count() 0 | |
917 | #define PREEMPT_SHIFT 0 | |
918 | #define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS) | |
919 | #define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS) | |
920 | #define PREEMPT_BITS 8 | |
921 | #define SOFTIRQ_BITS 8 | |
922 | ||
923 | /* | |
924 | * CPU hotplug. | |
925 | */ | |
926 | ||
927 | struct notifier_block { | |
928 | int (*notifier_call)(struct notifier_block *, unsigned long, void *); | |
929 | struct notifier_block *next; | |
930 | int priority; | |
931 | }; | |
932 | ||
933 | #define CPU_ONLINE 0x0002 /* CPU (unsigned)v is up */ | |
934 | #define CPU_UP_PREPARE 0x0003 /* CPU (unsigned)v coming up */ | |
935 | #define CPU_UP_CANCELED 0x0004 /* CPU (unsigned)v NOT coming up */ | |
936 | #define CPU_DOWN_PREPARE 0x0005 /* CPU (unsigned)v going down */ | |
937 | #define CPU_DOWN_FAILED 0x0006 /* CPU (unsigned)v NOT going down */ | |
938 | #define CPU_DEAD 0x0007 /* CPU (unsigned)v dead */ | |
939 | #define CPU_DYING 0x0008 /* CPU (unsigned)v not running any task, | |
940 | * not handling interrupts, soon dead */ | |
941 | #define CPU_POST_DEAD 0x0009 /* CPU (unsigned)v dead, cpu_hotplug | |
942 | * lock is dropped */ | |
943 | ||
944 | /* Used for CPU hotplug events occuring while tasks are frozen due to a suspend | |
945 | * operation in progress | |
946 | */ | |
947 | #define CPU_TASKS_FROZEN 0x0010 | |
948 | ||
949 | #define CPU_ONLINE_FROZEN (CPU_ONLINE | CPU_TASKS_FROZEN) | |
950 | #define CPU_UP_PREPARE_FROZEN (CPU_UP_PREPARE | CPU_TASKS_FROZEN) | |
951 | #define CPU_UP_CANCELED_FROZEN (CPU_UP_CANCELED | CPU_TASKS_FROZEN) | |
952 | #define CPU_DOWN_PREPARE_FROZEN (CPU_DOWN_PREPARE | CPU_TASKS_FROZEN) | |
953 | #define CPU_DOWN_FAILED_FROZEN (CPU_DOWN_FAILED | CPU_TASKS_FROZEN) | |
954 | #define CPU_DEAD_FROZEN (CPU_DEAD | CPU_TASKS_FROZEN) | |
955 | #define CPU_DYING_FROZEN (CPU_DYING | CPU_TASKS_FROZEN) | |
956 | ||
957 | /* Hibernation and suspend events */ | |
958 | #define PM_HIBERNATION_PREPARE 0x0001 /* Going to hibernate */ | |
959 | #define PM_POST_HIBERNATION 0x0002 /* Hibernation finished */ | |
960 | #define PM_SUSPEND_PREPARE 0x0003 /* Going to suspend the system */ | |
961 | #define PM_POST_SUSPEND 0x0004 /* Suspend finished */ | |
962 | #define PM_RESTORE_PREPARE 0x0005 /* Going to restore a saved image */ | |
963 | #define PM_POST_RESTORE 0x0006 /* Restore failed */ | |
964 | ||
965 | #define NOTIFY_DONE 0x0000 /* Don't care */ | |
966 | #define NOTIFY_OK 0x0001 /* Suits me */ | |
967 | #define NOTIFY_STOP_MASK 0x8000 /* Don't call further */ | |
968 | #define NOTIFY_BAD (NOTIFY_STOP_MASK|0x0002) | |
969 | /* Bad/Veto action */ | |
970 | /* | |
971 | * Clean way to return from the notifier and stop further calls. | |
972 | */ | |
973 | #define NOTIFY_STOP (NOTIFY_OK|NOTIFY_STOP_MASK) | |
974 | ||
975 | /* | |
976 | * Bug checks. | |
977 | */ | |
978 | ||
979 | #define BUG_ON(c) do { if (!(c)) abort(); } while (0) | |
980 | ||
981 | /* | |
982 | * Initialization -- Must be called before calling any primitives. | |
983 | */ | |
984 | ||
985 | static void smp_init(void) | |
986 | { | |
987 | int i; | |
988 | ||
989 | spin_lock_init(&__thread_id_map_mutex); | |
990 | __thread_id_map[0] = pthread_self(); | |
991 | for (i = 1; i < NR_THREADS; i++) | |
992 | __thread_id_map[i] = __THREAD_ID_MAP_EMPTY; | |
993 | init_per_thread(smp_processor_id, 0); | |
994 | if (pthread_key_create(&thread_id_key, NULL) != 0) { | |
995 | perror("pthread_key_create"); | |
996 | exit(-1); | |
997 | } | |
998 | } | |
999 | ||
1000 | /* Taken from the Linux kernel source tree, so GPLv2-only!!! */ | |
1001 | ||
1002 | #ifndef _LINUX_LIST_H | |
1003 | #define _LINUX_LIST_H | |
1004 | ||
1005 | #define LIST_POISON1 ((void *) 0x00100100) | |
1006 | #define LIST_POISON2 ((void *) 0x00200200) | |
1007 | ||
1008 | #define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER) | |
1009 | #define container_of(ptr, type, member) ({ \ | |
1010 | const typeof( ((type *)0)->member ) *__mptr = (ptr); \ | |
1011 | (type *)( (char *)__mptr - offsetof(type,member) );}) | |
1012 | ||
1013 | /* | |
1014 | * Simple doubly linked list implementation. | |
1015 | * | |
1016 | * Some of the internal functions ("__xxx") are useful when | |
1017 | * manipulating whole lists rather than single entries, as | |
1018 | * sometimes we already know the next/prev entries and we can | |
1019 | * generate better code by using them directly rather than | |
1020 | * using the generic single-entry routines. | |
1021 | */ | |
1022 | ||
1023 | struct list_head { | |
1024 | struct list_head *next, *prev; | |
1025 | }; | |
1026 | ||
1027 | #define LIST_HEAD_INIT(name) { &(name), &(name) } | |
1028 | ||
1029 | #define LIST_HEAD(name) \ | |
1030 | struct list_head name = LIST_HEAD_INIT(name) | |
1031 | ||
1032 | static inline void INIT_LIST_HEAD(struct list_head *list) | |
1033 | { | |
1034 | list->next = list; | |
1035 | list->prev = list; | |
1036 | } | |
1037 | ||
1038 | /* | |
1039 | * Insert a new entry between two known consecutive entries. | |
1040 | * | |
1041 | * This is only for internal list manipulation where we know | |
1042 | * the prev/next entries already! | |
1043 | */ | |
1044 | #ifndef CONFIG_DEBUG_LIST | |
1045 | static inline void __list_add(struct list_head *new, | |
1046 | struct list_head *prev, | |
1047 | struct list_head *next) | |
1048 | { | |
1049 | next->prev = new; | |
1050 | new->next = next; | |
1051 | new->prev = prev; | |
1052 | prev->next = new; | |
1053 | } | |
1054 | #else | |
1055 | extern void __list_add(struct list_head *new, | |
1056 | struct list_head *prev, | |
1057 | struct list_head *next); | |
1058 | #endif | |
1059 | ||
1060 | /** | |
1061 | * list_add - add a new entry | |
1062 | * @new: new entry to be added | |
1063 | * @head: list head to add it after | |
1064 | * | |
1065 | * Insert a new entry after the specified head. | |
1066 | * This is good for implementing stacks. | |
1067 | */ | |
1068 | static inline void list_add(struct list_head *new, struct list_head *head) | |
1069 | { | |
1070 | __list_add(new, head, head->next); | |
1071 | } | |
1072 | ||
1073 | ||
1074 | /** | |
1075 | * list_add_tail - add a new entry | |
1076 | * @new: new entry to be added | |
1077 | * @head: list head to add it before | |
1078 | * | |
1079 | * Insert a new entry before the specified head. | |
1080 | * This is useful for implementing queues. | |
1081 | */ | |
1082 | static inline void list_add_tail(struct list_head *new, struct list_head *head) | |
1083 | { | |
1084 | __list_add(new, head->prev, head); | |
1085 | } | |
1086 | ||
1087 | /* | |
1088 | * Delete a list entry by making the prev/next entries | |
1089 | * point to each other. | |
1090 | * | |
1091 | * This is only for internal list manipulation where we know | |
1092 | * the prev/next entries already! | |
1093 | */ | |
1094 | static inline void __list_del(struct list_head * prev, struct list_head * next) | |
1095 | { | |
1096 | next->prev = prev; | |
1097 | prev->next = next; | |
1098 | } | |
1099 | ||
1100 | /** | |
1101 | * list_del - deletes entry from list. | |
1102 | * @entry: the element to delete from the list. | |
1103 | * Note: list_empty() on entry does not return true after this, the entry is | |
1104 | * in an undefined state. | |
1105 | */ | |
1106 | #ifndef CONFIG_DEBUG_LIST | |
1107 | static inline void list_del(struct list_head *entry) | |
1108 | { | |
1109 | __list_del(entry->prev, entry->next); | |
1110 | entry->next = LIST_POISON1; | |
1111 | entry->prev = LIST_POISON2; | |
1112 | } | |
1113 | #else | |
1114 | extern void list_del(struct list_head *entry); | |
1115 | #endif | |
1116 | ||
1117 | /** | |
1118 | * list_replace - replace old entry by new one | |
1119 | * @old : the element to be replaced | |
1120 | * @new : the new element to insert | |
1121 | * | |
1122 | * If @old was empty, it will be overwritten. | |
1123 | */ | |
1124 | static inline void list_replace(struct list_head *old, | |
1125 | struct list_head *new) | |
1126 | { | |
1127 | new->next = old->next; | |
1128 | new->next->prev = new; | |
1129 | new->prev = old->prev; | |
1130 | new->prev->next = new; | |
1131 | } | |
1132 | ||
1133 | static inline void list_replace_init(struct list_head *old, | |
1134 | struct list_head *new) | |
1135 | { | |
1136 | list_replace(old, new); | |
1137 | INIT_LIST_HEAD(old); | |
1138 | } | |
1139 | ||
1140 | /** | |
1141 | * list_del_init - deletes entry from list and reinitialize it. | |
1142 | * @entry: the element to delete from the list. | |
1143 | */ | |
1144 | static inline void list_del_init(struct list_head *entry) | |
1145 | { | |
1146 | __list_del(entry->prev, entry->next); | |
1147 | INIT_LIST_HEAD(entry); | |
1148 | } | |
1149 | ||
1150 | /** | |
1151 | * list_move - delete from one list and add as another's head | |
1152 | * @list: the entry to move | |
1153 | * @head: the head that will precede our entry | |
1154 | */ | |
1155 | static inline void list_move(struct list_head *list, struct list_head *head) | |
1156 | { | |
1157 | __list_del(list->prev, list->next); | |
1158 | list_add(list, head); | |
1159 | } | |
1160 | ||
1161 | /** | |
1162 | * list_move_tail - delete from one list and add as another's tail | |
1163 | * @list: the entry to move | |
1164 | * @head: the head that will follow our entry | |
1165 | */ | |
1166 | static inline void list_move_tail(struct list_head *list, | |
1167 | struct list_head *head) | |
1168 | { | |
1169 | __list_del(list->prev, list->next); | |
1170 | list_add_tail(list, head); | |
1171 | } | |
1172 | ||
1173 | /** | |
1174 | * list_is_last - tests whether @list is the last entry in list @head | |
1175 | * @list: the entry to test | |
1176 | * @head: the head of the list | |
1177 | */ | |
1178 | static inline int list_is_last(const struct list_head *list, | |
1179 | const struct list_head *head) | |
1180 | { | |
1181 | return list->next == head; | |
1182 | } | |
1183 | ||
1184 | /** | |
1185 | * list_empty - tests whether a list is empty | |
1186 | * @head: the list to test. | |
1187 | */ | |
1188 | static inline int list_empty(const struct list_head *head) | |
1189 | { | |
1190 | return head->next == head; | |
1191 | } | |
1192 | ||
1193 | /** | |
1194 | * list_empty_careful - tests whether a list is empty and not being modified | |
1195 | * @head: the list to test | |
1196 | * | |
1197 | * Description: | |
1198 | * tests whether a list is empty _and_ checks that no other CPU might be | |
1199 | * in the process of modifying either member (next or prev) | |
1200 | * | |
1201 | * NOTE: using list_empty_careful() without synchronization | |
1202 | * can only be safe if the only activity that can happen | |
1203 | * to the list entry is list_del_init(). Eg. it cannot be used | |
1204 | * if another CPU could re-list_add() it. | |
1205 | */ | |
1206 | static inline int list_empty_careful(const struct list_head *head) | |
1207 | { | |
1208 | struct list_head *next = head->next; | |
1209 | return (next == head) && (next == head->prev); | |
1210 | } | |
1211 | ||
1212 | /** | |
1213 | * list_is_singular - tests whether a list has just one entry. | |
1214 | * @head: the list to test. | |
1215 | */ | |
1216 | static inline int list_is_singular(const struct list_head *head) | |
1217 | { | |
1218 | return !list_empty(head) && (head->next == head->prev); | |
1219 | } | |
1220 | ||
1221 | static inline void __list_cut_position(struct list_head *list, | |
1222 | struct list_head *head, struct list_head *entry) | |
1223 | { | |
1224 | struct list_head *new_first = entry->next; | |
1225 | list->next = head->next; | |
1226 | list->next->prev = list; | |
1227 | list->prev = entry; | |
1228 | entry->next = list; | |
1229 | head->next = new_first; | |
1230 | new_first->prev = head; | |
1231 | } | |
1232 | ||
1233 | /** | |
1234 | * list_cut_position - cut a list into two | |
1235 | * @list: a new list to add all removed entries | |
1236 | * @head: a list with entries | |
1237 | * @entry: an entry within head, could be the head itself | |
1238 | * and if so we won't cut the list | |
1239 | * | |
1240 | * This helper moves the initial part of @head, up to and | |
1241 | * including @entry, from @head to @list. You should | |
1242 | * pass on @entry an element you know is on @head. @list | |
1243 | * should be an empty list or a list you do not care about | |
1244 | * losing its data. | |
1245 | * | |
1246 | */ | |
1247 | static inline void list_cut_position(struct list_head *list, | |
1248 | struct list_head *head, struct list_head *entry) | |
1249 | { | |
1250 | if (list_empty(head)) | |
1251 | return; | |
1252 | if (list_is_singular(head) && | |
1253 | (head->next != entry && head != entry)) | |
1254 | return; | |
1255 | if (entry == head) | |
1256 | INIT_LIST_HEAD(list); | |
1257 | else | |
1258 | __list_cut_position(list, head, entry); | |
1259 | } | |
1260 | ||
1261 | static inline void __list_splice(const struct list_head *list, | |
1262 | struct list_head *prev, | |
1263 | struct list_head *next) | |
1264 | { | |
1265 | struct list_head *first = list->next; | |
1266 | struct list_head *last = list->prev; | |
1267 | ||
1268 | first->prev = prev; | |
1269 | prev->next = first; | |
1270 | ||
1271 | last->next = next; | |
1272 | next->prev = last; | |
1273 | } | |
1274 | ||
1275 | /** | |
1276 | * list_splice - join two lists, this is designed for stacks | |
1277 | * @list: the new list to add. | |
1278 | * @head: the place to add it in the first list. | |
1279 | */ | |
1280 | static inline void list_splice(const struct list_head *list, | |
1281 | struct list_head *head) | |
1282 | { | |
1283 | if (!list_empty(list)) | |
1284 | __list_splice(list, head, head->next); | |
1285 | } | |
1286 | ||
1287 | /** | |
1288 | * list_splice_tail - join two lists, each list being a queue | |
1289 | * @list: the new list to add. | |
1290 | * @head: the place to add it in the first list. | |
1291 | */ | |
1292 | static inline void list_splice_tail(struct list_head *list, | |
1293 | struct list_head *head) | |
1294 | { | |
1295 | if (!list_empty(list)) | |
1296 | __list_splice(list, head->prev, head); | |
1297 | } | |
1298 | ||
1299 | /** | |
1300 | * list_splice_init - join two lists and reinitialise the emptied list. | |
1301 | * @list: the new list to add. | |
1302 | * @head: the place to add it in the first list. | |
1303 | * | |
1304 | * The list at @list is reinitialised | |
1305 | */ | |
1306 | static inline void list_splice_init(struct list_head *list, | |
1307 | struct list_head *head) | |
1308 | { | |
1309 | if (!list_empty(list)) { | |
1310 | __list_splice(list, head, head->next); | |
1311 | INIT_LIST_HEAD(list); | |
1312 | } | |
1313 | } | |
1314 | ||
1315 | /** | |
1316 | * list_splice_tail_init - join two lists and reinitialise the emptied list | |
1317 | * @list: the new list to add. | |
1318 | * @head: the place to add it in the first list. | |
1319 | * | |
1320 | * Each of the lists is a queue. | |
1321 | * The list at @list is reinitialised | |
1322 | */ | |
1323 | static inline void list_splice_tail_init(struct list_head *list, | |
1324 | struct list_head *head) | |
1325 | { | |
1326 | if (!list_empty(list)) { | |
1327 | __list_splice(list, head->prev, head); | |
1328 | INIT_LIST_HEAD(list); | |
1329 | } | |
1330 | } | |
1331 | ||
1332 | /** | |
1333 | * list_entry - get the struct for this entry | |
1334 | * @ptr: the &struct list_head pointer. | |
1335 | * @type: the type of the struct this is embedded in. | |
1336 | * @member: the name of the list_struct within the struct. | |
1337 | */ | |
1338 | #define list_entry(ptr, type, member) \ | |
1339 | container_of(ptr, type, member) | |
1340 | ||
1341 | /** | |
1342 | * list_first_entry - get the first element from a list | |
1343 | * @ptr: the list head to take the element from. | |
1344 | * @type: the type of the struct this is embedded in. | |
1345 | * @member: the name of the list_struct within the struct. | |
1346 | * | |
1347 | * Note, that list is expected to be not empty. | |
1348 | */ | |
1349 | #define list_first_entry(ptr, type, member) \ | |
1350 | list_entry((ptr)->next, type, member) | |
1351 | ||
1352 | /** | |
1353 | * list_for_each - iterate over a list | |
1354 | * @pos: the &struct list_head to use as a loop cursor. | |
1355 | * @head: the head for your list. | |
1356 | */ | |
1357 | #define list_for_each(pos, head) \ | |
1358 | for (pos = (head)->next; prefetch(pos->next), pos != (head); \ | |
1359 | pos = pos->next) | |
1360 | ||
1361 | /** | |
1362 | * __list_for_each - iterate over a list | |
1363 | * @pos: the &struct list_head to use as a loop cursor. | |
1364 | * @head: the head for your list. | |
1365 | * | |
1366 | * This variant differs from list_for_each() in that it's the | |
1367 | * simplest possible list iteration code, no prefetching is done. | |
1368 | * Use this for code that knows the list to be very short (empty | |
1369 | * or 1 entry) most of the time. | |
1370 | */ | |
1371 | #define __list_for_each(pos, head) \ | |
1372 | for (pos = (head)->next; pos != (head); pos = pos->next) | |
1373 | ||
1374 | /** | |
1375 | * list_for_each_prev - iterate over a list backwards | |
1376 | * @pos: the &struct list_head to use as a loop cursor. | |
1377 | * @head: the head for your list. | |
1378 | */ | |
1379 | #define list_for_each_prev(pos, head) \ | |
1380 | for (pos = (head)->prev; prefetch(pos->prev), pos != (head); \ | |
1381 | pos = pos->prev) | |
1382 | ||
1383 | /** | |
1384 | * list_for_each_safe - iterate over a list safe against removal of list entry | |
1385 | * @pos: the &struct list_head to use as a loop cursor. | |
1386 | * @n: another &struct list_head to use as temporary storage | |
1387 | * @head: the head for your list. | |
1388 | */ | |
1389 | #define list_for_each_safe(pos, n, head) \ | |
1390 | for (pos = (head)->next, n = pos->next; pos != (head); \ | |
1391 | pos = n, n = pos->next) | |
1392 | ||
1393 | /** | |
1394 | * list_for_each_prev_safe - iterate over a list backwards safe against removal of list entry | |
1395 | * @pos: the &struct list_head to use as a loop cursor. | |
1396 | * @n: another &struct list_head to use as temporary storage | |
1397 | * @head: the head for your list. | |
1398 | */ | |
1399 | #define list_for_each_prev_safe(pos, n, head) \ | |
1400 | for (pos = (head)->prev, n = pos->prev; \ | |
1401 | prefetch(pos->prev), pos != (head); \ | |
1402 | pos = n, n = pos->prev) | |
1403 | ||
1404 | /** | |
1405 | * list_for_each_entry - iterate over list of given type | |
1406 | * @pos: the type * to use as a loop cursor. | |
1407 | * @head: the head for your list. | |
1408 | * @member: the name of the list_struct within the struct. | |
1409 | */ | |
1410 | #define list_for_each_entry(pos, head, member) \ | |
1411 | for (pos = list_entry((head)->next, typeof(*pos), member); \ | |
1412 | prefetch(pos->member.next), &pos->member != (head); \ | |
1413 | pos = list_entry(pos->member.next, typeof(*pos), member)) | |
1414 | ||
1415 | /** | |
1416 | * list_for_each_entry_reverse - iterate backwards over list of given type. | |
1417 | * @pos: the type * to use as a loop cursor. | |
1418 | * @head: the head for your list. | |
1419 | * @member: the name of the list_struct within the struct. | |
1420 | */ | |
1421 | #define list_for_each_entry_reverse(pos, head, member) \ | |
1422 | for (pos = list_entry((head)->prev, typeof(*pos), member); \ | |
1423 | prefetch(pos->member.prev), &pos->member != (head); \ | |
1424 | pos = list_entry(pos->member.prev, typeof(*pos), member)) | |
1425 | ||
1426 | /** | |
1427 | * list_prepare_entry - prepare a pos entry for use in list_for_each_entry_continue() | |
1428 | * @pos: the type * to use as a start point | |
1429 | * @head: the head of the list | |
1430 | * @member: the name of the list_struct within the struct. | |
1431 | * | |
1432 | * Prepares a pos entry for use as a start point in list_for_each_entry_continue(). | |
1433 | */ | |
1434 | #define list_prepare_entry(pos, head, member) \ | |
1435 | ((pos) ? : list_entry(head, typeof(*pos), member)) | |
1436 | ||
1437 | /** | |
1438 | * list_for_each_entry_continue - continue iteration over list of given type | |
1439 | * @pos: the type * to use as a loop cursor. | |
1440 | * @head: the head for your list. | |
1441 | * @member: the name of the list_struct within the struct. | |
1442 | * | |
1443 | * Continue to iterate over list of given type, continuing after | |
1444 | * the current position. | |
1445 | */ | |
1446 | #define list_for_each_entry_continue(pos, head, member) \ | |
1447 | for (pos = list_entry(pos->member.next, typeof(*pos), member); \ | |
1448 | prefetch(pos->member.next), &pos->member != (head); \ | |
1449 | pos = list_entry(pos->member.next, typeof(*pos), member)) | |
1450 | ||
1451 | /** | |
1452 | * list_for_each_entry_continue_reverse - iterate backwards from the given point | |
1453 | * @pos: the type * to use as a loop cursor. | |
1454 | * @head: the head for your list. | |
1455 | * @member: the name of the list_struct within the struct. | |
1456 | * | |
1457 | * Start to iterate over list of given type backwards, continuing after | |
1458 | * the current position. | |
1459 | */ | |
1460 | #define list_for_each_entry_continue_reverse(pos, head, member) \ | |
1461 | for (pos = list_entry(pos->member.prev, typeof(*pos), member); \ | |
1462 | prefetch(pos->member.prev), &pos->member != (head); \ | |
1463 | pos = list_entry(pos->member.prev, typeof(*pos), member)) | |
1464 | ||
1465 | /** | |
1466 | * list_for_each_entry_from - iterate over list of given type from the current point | |
1467 | * @pos: the type * to use as a loop cursor. | |
1468 | * @head: the head for your list. | |
1469 | * @member: the name of the list_struct within the struct. | |
1470 | * | |
1471 | * Iterate over list of given type, continuing from current position. | |
1472 | */ | |
1473 | #define list_for_each_entry_from(pos, head, member) \ | |
1474 | for (; prefetch(pos->member.next), &pos->member != (head); \ | |
1475 | pos = list_entry(pos->member.next, typeof(*pos), member)) | |
1476 | ||
1477 | /** | |
1478 | * list_for_each_entry_safe - iterate over list of given type safe against removal of list entry | |
1479 | * @pos: the type * to use as a loop cursor. | |
1480 | * @n: another type * to use as temporary storage | |
1481 | * @head: the head for your list. | |
1482 | * @member: the name of the list_struct within the struct. | |
1483 | */ | |
1484 | #define list_for_each_entry_safe(pos, n, head, member) \ | |
1485 | for (pos = list_entry((head)->next, typeof(*pos), member), \ | |
1486 | n = list_entry(pos->member.next, typeof(*pos), member); \ | |
1487 | &pos->member != (head); \ | |
1488 | pos = n, n = list_entry(n->member.next, typeof(*n), member)) | |
1489 | ||
1490 | /** | |
1491 | * list_for_each_entry_safe_continue | |
1492 | * @pos: the type * to use as a loop cursor. | |
1493 | * @n: another type * to use as temporary storage | |
1494 | * @head: the head for your list. | |
1495 | * @member: the name of the list_struct within the struct. | |
1496 | * | |
1497 | * Iterate over list of given type, continuing after current point, | |
1498 | * safe against removal of list entry. | |
1499 | */ | |
1500 | #define list_for_each_entry_safe_continue(pos, n, head, member) \ | |
1501 | for (pos = list_entry(pos->member.next, typeof(*pos), member), \ | |
1502 | n = list_entry(pos->member.next, typeof(*pos), member); \ | |
1503 | &pos->member != (head); \ | |
1504 | pos = n, n = list_entry(n->member.next, typeof(*n), member)) | |
1505 | ||
1506 | /** | |
1507 | * list_for_each_entry_safe_from | |
1508 | * @pos: the type * to use as a loop cursor. | |
1509 | * @n: another type * to use as temporary storage | |
1510 | * @head: the head for your list. | |
1511 | * @member: the name of the list_struct within the struct. | |
1512 | * | |
1513 | * Iterate over list of given type from current point, safe against | |
1514 | * removal of list entry. | |
1515 | */ | |
1516 | #define list_for_each_entry_safe_from(pos, n, head, member) \ | |
1517 | for (n = list_entry(pos->member.next, typeof(*pos), member); \ | |
1518 | &pos->member != (head); \ | |
1519 | pos = n, n = list_entry(n->member.next, typeof(*n), member)) | |
1520 | ||
1521 | /** | |
1522 | * list_for_each_entry_safe_reverse | |
1523 | * @pos: the type * to use as a loop cursor. | |
1524 | * @n: another type * to use as temporary storage | |
1525 | * @head: the head for your list. | |
1526 | * @member: the name of the list_struct within the struct. | |
1527 | * | |
1528 | * Iterate backwards over list of given type, safe against removal | |
1529 | * of list entry. | |
1530 | */ | |
1531 | #define list_for_each_entry_safe_reverse(pos, n, head, member) \ | |
1532 | for (pos = list_entry((head)->prev, typeof(*pos), member), \ | |
1533 | n = list_entry(pos->member.prev, typeof(*pos), member); \ | |
1534 | &pos->member != (head); \ | |
1535 | pos = n, n = list_entry(n->member.prev, typeof(*n), member)) | |
1536 | ||
1537 | /* | |
1538 | * Double linked lists with a single pointer list head. | |
1539 | * Mostly useful for hash tables where the two pointer list head is | |
1540 | * too wasteful. | |
1541 | * You lose the ability to access the tail in O(1). | |
1542 | */ | |
1543 | ||
1544 | struct hlist_head { | |
1545 | struct hlist_node *first; | |
1546 | }; | |
1547 | ||
1548 | struct hlist_node { | |
1549 | struct hlist_node *next, **pprev; | |
1550 | }; | |
1551 | ||
1552 | #define HLIST_HEAD_INIT { .first = NULL } | |
1553 | #define HLIST_HEAD(name) struct hlist_head name = { .first = NULL } | |
1554 | #define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL) | |
1555 | static inline void INIT_HLIST_NODE(struct hlist_node *h) | |
1556 | { | |
1557 | h->next = NULL; | |
1558 | h->pprev = NULL; | |
1559 | } | |
1560 | ||
1561 | static inline int hlist_unhashed(const struct hlist_node *h) | |
1562 | { | |
1563 | return !h->pprev; | |
1564 | } | |
1565 | ||
1566 | static inline int hlist_empty(const struct hlist_head *h) | |
1567 | { | |
1568 | return !h->first; | |
1569 | } | |
1570 | ||
1571 | static inline void __hlist_del(struct hlist_node *n) | |
1572 | { | |
1573 | struct hlist_node *next = n->next; | |
1574 | struct hlist_node **pprev = n->pprev; | |
1575 | *pprev = next; | |
1576 | if (next) | |
1577 | next->pprev = pprev; | |
1578 | } | |
1579 | ||
1580 | static inline void hlist_del(struct hlist_node *n) | |
1581 | { | |
1582 | __hlist_del(n); | |
1583 | n->next = LIST_POISON1; | |
1584 | n->pprev = LIST_POISON2; | |
1585 | } | |
1586 | ||
1587 | static inline void hlist_del_init(struct hlist_node *n) | |
1588 | { | |
1589 | if (!hlist_unhashed(n)) { | |
1590 | __hlist_del(n); | |
1591 | INIT_HLIST_NODE(n); | |
1592 | } | |
1593 | } | |
1594 | ||
1595 | static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h) | |
1596 | { | |
1597 | struct hlist_node *first = h->first; | |
1598 | n->next = first; | |
1599 | if (first) | |
1600 | first->pprev = &n->next; | |
1601 | h->first = n; | |
1602 | n->pprev = &h->first; | |
1603 | } | |
1604 | ||
1605 | /* next must be != NULL */ | |
1606 | static inline void hlist_add_before(struct hlist_node *n, | |
1607 | struct hlist_node *next) | |
1608 | { | |
1609 | n->pprev = next->pprev; | |
1610 | n->next = next; | |
1611 | next->pprev = &n->next; | |
1612 | *(n->pprev) = n; | |
1613 | } | |
1614 | ||
1615 | static inline void hlist_add_after(struct hlist_node *n, | |
1616 | struct hlist_node *next) | |
1617 | { | |
1618 | next->next = n->next; | |
1619 | n->next = next; | |
1620 | next->pprev = &n->next; | |
1621 | ||
1622 | if(next->next) | |
1623 | next->next->pprev = &next->next; | |
1624 | } | |
1625 | ||
1626 | /* | |
1627 | * Move a list from one list head to another. Fixup the pprev | |
1628 | * reference of the first entry if it exists. | |
1629 | */ | |
1630 | static inline void hlist_move_list(struct hlist_head *old, | |
1631 | struct hlist_head *new) | |
1632 | { | |
1633 | new->first = old->first; | |
1634 | if (new->first) | |
1635 | new->first->pprev = &new->first; | |
1636 | old->first = NULL; | |
1637 | } | |
1638 | ||
1639 | #define hlist_entry(ptr, type, member) container_of(ptr,type,member) | |
1640 | ||
1641 | #define hlist_for_each(pos, head) \ | |
1642 | for (pos = (head)->first; pos && ({ prefetch(pos->next); 1; }); \ | |
1643 | pos = pos->next) | |
1644 | ||
1645 | #define hlist_for_each_safe(pos, n, head) \ | |
1646 | for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \ | |
1647 | pos = n) | |
1648 | ||
1649 | /** | |
1650 | * hlist_for_each_entry - iterate over list of given type | |
1651 | * @tpos: the type * to use as a loop cursor. | |
1652 | * @pos: the &struct hlist_node to use as a loop cursor. | |
1653 | * @head: the head for your list. | |
1654 | * @member: the name of the hlist_node within the struct. | |
1655 | */ | |
1656 | #define hlist_for_each_entry(tpos, pos, head, member) \ | |
1657 | for (pos = (head)->first; \ | |
1658 | pos && ({ prefetch(pos->next); 1;}) && \ | |
1659 | ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ | |
1660 | pos = pos->next) | |
1661 | ||
1662 | /** | |
1663 | * hlist_for_each_entry_continue - iterate over a hlist continuing after current point | |
1664 | * @tpos: the type * to use as a loop cursor. | |
1665 | * @pos: the &struct hlist_node to use as a loop cursor. | |
1666 | * @member: the name of the hlist_node within the struct. | |
1667 | */ | |
1668 | #define hlist_for_each_entry_continue(tpos, pos, member) \ | |
1669 | for (pos = (pos)->next; \ | |
1670 | pos && ({ prefetch(pos->next); 1;}) && \ | |
1671 | ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ | |
1672 | pos = pos->next) | |
1673 | ||
1674 | /** | |
1675 | * hlist_for_each_entry_from - iterate over a hlist continuing from current point | |
1676 | * @tpos: the type * to use as a loop cursor. | |
1677 | * @pos: the &struct hlist_node to use as a loop cursor. | |
1678 | * @member: the name of the hlist_node within the struct. | |
1679 | */ | |
1680 | #define hlist_for_each_entry_from(tpos, pos, member) \ | |
1681 | for (; pos && ({ prefetch(pos->next); 1;}) && \ | |
1682 | ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ | |
1683 | pos = pos->next) | |
1684 | ||
1685 | /** | |
1686 | * hlist_for_each_entry_safe - iterate over list of given type safe against removal of list entry | |
1687 | * @tpos: the type * to use as a loop cursor. | |
1688 | * @pos: the &struct hlist_node to use as a loop cursor. | |
1689 | * @n: another &struct hlist_node to use as temporary storage | |
1690 | * @head: the head for your list. | |
1691 | * @member: the name of the hlist_node within the struct. | |
1692 | */ | |
1693 | #define hlist_for_each_entry_safe(tpos, pos, n, head, member) \ | |
1694 | for (pos = (head)->first; \ | |
1695 | pos && ({ n = pos->next; 1; }) && \ | |
1696 | ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ | |
1697 | pos = n) | |
1698 | ||
1699 | #endif |