ppc update
[lttv.git] / ltt-usertrace / ltt / system-ppc.h
1 /*
2 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
3 */
4 #ifndef __PPC_SYSTEM_H
5 #define __PPC_SYSTEM_H
6
7 #include <asm/atomic.h>
8 #include <asm/hw_irq.h>
9
10 /*
11 * Memory barrier.
12 * The sync instruction guarantees that all memory accesses initiated
13 * by this processor have been performed (with respect to all other
14 * mechanisms that access memory). The eieio instruction is a barrier
15 * providing an ordering (separately) for (a) cacheable stores and (b)
16 * loads and stores to non-cacheable memory (e.g. I/O devices).
17 *
18 * mb() prevents loads and stores being reordered across this point.
19 * rmb() prevents loads being reordered across this point.
20 * wmb() prevents stores being reordered across this point.
21 * read_barrier_depends() prevents data-dependent loads being reordered
22 * across this point (nop on PPC).
23 *
24 * We can use the eieio instruction for wmb, but since it doesn't
25 * give any ordering guarantees about loads, we have to use the
26 * stronger but slower sync instruction for mb and rmb.
27 */
28 #define mb() __asm__ __volatile__ ("sync" : : : "memory")
29 #define rmb() __asm__ __volatile__ ("sync" : : : "memory")
30 #define wmb() __asm__ __volatile__ ("eieio" : : : "memory")
31 #define read_barrier_depends() do { } while(0)
32
33 #define set_mb(var, value) do { var = value; mb(); } while (0)
34 #define set_wmb(var, value) do { var = value; wmb(); } while (0)
35
36 #ifdef CONFIG_SMP
37 #define smp_mb() mb()
38 #define smp_rmb() rmb()
39 #define smp_wmb() wmb()
40 #define smp_read_barrier_depends() read_barrier_depends()
41 #else
42 #define smp_mb() barrier()
43 #define smp_rmb() barrier()
44 #define smp_wmb() barrier()
45 #define smp_read_barrier_depends() do { } while(0)
46 #endif /* CONFIG_SMP */
47
48 static inline unsigned long
49 xchg_u32(volatile void *p, unsigned long val)
50 {
51 unsigned long prev;
52
53 __asm__ __volatile__ ("\n\
54 1: lwarx %0,0,%2 \n"
55 PPC405_ERR77(0,%2)
56 " stwcx. %3,0,%2 \n\
57 bne- 1b"
58 : "=&r" (prev), "=m" (*(volatile unsigned long *)p)
59 : "r" (p), "r" (val), "m" (*(volatile unsigned long *)p)
60 : "cc", "memory");
61
62 return prev;
63 }
64
65 /*
66 * This function doesn't exist, so you'll get a linker error
67 * if something tries to do an invalid xchg().
68 */
69 extern void __xchg_called_with_bad_pointer(void);
70
71 #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
72 #define tas(ptr) (xchg((ptr),1))
73
74 static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
75 {
76 switch (size) {
77 case 4:
78 return (unsigned long) xchg_u32(ptr, x);
79 #if 0 /* xchg_u64 doesn't exist on 32-bit PPC */
80 case 8:
81 return (unsigned long) xchg_u64(ptr, x);
82 #endif /* 0 */
83 }
84 __xchg_called_with_bad_pointer();
85 return x;
86
87
88 }
89
90 extern inline void * xchg_ptr(void * m, void * val)
91 {
92 return (void *) xchg_u32(m, (unsigned long) val);
93 }
94
95
96 #define __HAVE_ARCH_CMPXCHG 1
97
98 static inline unsigned long
99 __cmpxchg_u32(volatile unsigned int *p, unsigned int old, unsigned int new)
100 {
101 unsigned int prev;
102
103 __asm__ __volatile__ ("\n\
104 1: lwarx %0,0,%2 \n\
105 cmpw 0,%0,%3 \n\
106 bne 2f \n"
107 PPC405_ERR77(0,%2)
108 " stwcx. %4,0,%2 \n\
109 bne- 1b\n"
110 #if 0 //only using one CPU at a time (LTT) // def CONFIG_SMP
111 " sync\n"
112 #endif /* CONFIG_SMP */
113 "2:"
114 : "=&r" (prev), "=m" (*p)
115 : "r" (p), "r" (old), "r" (new), "m" (*p)
116 : "cc", "memory");
117
118 return prev;
119 }
120
121 /* This function doesn't exist, so you'll get a linker error
122 if something tries to do an invalid cmpxchg(). */
123 extern void __cmpxchg_called_with_bad_pointer(void);
124
125 static inline unsigned long
126 __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
127 {
128 switch (size) {
129 case 4:
130 return __cmpxchg_u32(ptr, old, new);
131 #if 0 /* we don't have __cmpxchg_u64 on 32-bit PPC */
132 case 8:
133 return __cmpxchg_u64(ptr, old, new);
134 #endif /* 0 */
135 }
136 __cmpxchg_called_with_bad_pointer();
137 return old;
138 }
139
140 #define cmpxchg(ptr,o,n) \
141 ({ \
142 __typeof__(*(ptr)) _o_ = (o); \
143 __typeof__(*(ptr)) _n_ = (n); \
144 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
145 (unsigned long)_n_, sizeof(*(ptr))); \
146 })
147
148 #define arch_align_stack(x) (x)
149
150 #endif /* __PPC_SYSTEM_H */
This page took 0.032969 seconds and 5 git commands to generate.