794c9fc7121f48f12801633ccb804d834826ceda
[urcu.git] / arch_ppc.h
1 #ifndef _ARCH_PPC_H
2 #define _ARCH_PPC_H
3
4 /*
5 * arch_ppc.h: trivial definitions for the powerpc architecture.
6 *
7 * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
8 * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
9 *
10 * This library is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with this library; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 */
24
25 #include <compiler.h>
26
27 #define CONFIG_HAVE_FENCE 1
28 #define CONFIG_HAVE_MEM_COHERENCY
29
30 #ifndef BITS_PER_LONG
31 #define BITS_PER_LONG (__SIZEOF_LONG__ * 8)
32 #endif
33
34 #define mb() asm volatile("sync":::"memory")
35 #define rmb() asm volatile("sync":::"memory")
36 #define wmb() asm volatile("sync"::: "memory")
37
38 /*
39 * Architectures without cache coherency need something like the following:
40 *
41 * #define mb() mc()
42 * #define rmb() rmc()
43 * #define wmb() wmc()
44 * #define mc() arch_cache_flush()
45 * #define rmc() arch_cache_flush_read()
46 * #define wmc() arch_cache_flush_write()
47 */
48
49 #define mc() barrier()
50 #define rmc() barrier()
51 #define wmc() barrier()
52
53 /* Assume SMP machine, given we don't have this information */
54 #define CONFIG_SMP 1
55
56 #ifdef CONFIG_SMP
57 #define smp_mb() mb()
58 #define smp_rmb() rmb()
59 #define smp_wmb() wmb()
60 #define smp_mc() mc()
61 #define smp_rmc() rmc()
62 #define smp_wmc() wmc()
63 #else
64 #define smp_mb() barrier()
65 #define smp_rmb() barrier()
66 #define smp_wmb() barrier()
67 #define smp_mc() barrier()
68 #define smp_rmc() barrier()
69 #define smp_wmc() barrier()
70 #endif
71
72 /* Nop everywhere except on alpha. */
73 #define smp_read_barrier_depends()
74
75 static inline void cpu_relax(void)
76 {
77 barrier();
78 }
79
80 #define PPC405_ERR77(ra,rb)
81 #define LWSYNC_ON_SMP "\n\tlwsync\n"
82 #define ISYNC_ON_SMP "\n\tisync\n"
83
84 struct __xchg_dummy {
85 unsigned long a[100];
86 };
87 #define __xg(x) ((struct __xchg_dummy *)(x))
88
89 #ifndef _INCLUDE_API_H
90
91 /*
92 * Exchange the 32-bits value pointed to by p, returns the old value.
93 * Might not work with PPC405 (see err 77).
94 */
95 static __always_inline
96 unsigned int __xchg_u32(volatile void *p, unsigned int val)
97 {
98 unsigned int prev;
99
100 __asm__ __volatile__(LWSYNC_ON_SMP
101 "1:\t" "lwarx %0,0,%2\n"
102 "stwcx. %3,0,%2\n"
103 "bne- 1b"
104 ISYNC_ON_SMP
105 : "=&r" (prev), "+m" (*(volatile unsigned int *)p)
106 : "r" (p), "r" (val)
107 : "cc", "memory");
108 return prev;
109 }
110
111 #if (BITS_PER_LONG == 64)
112 /*
113 * Exchange the 64-bits value pointed to by p, returns the old value.
114 * Might not work with PPC405 (see err 77).
115 */
116 static __always_inline
117 unsigned long __xchg_u64(volatile void *p, unsigned long val)
118 {
119 unsigned long prev;
120
121 __asm__ __volatile__(LWSYNC_ON_SMP
122 "1:\t" "ldarx %0,0,%2\n"
123 "stdcx. %3,0,%2\n"
124 "bne- 1b"
125 ISYNC_ON_SMP
126 : "=&r" (prev), "+m" (*(volatile unsigned long *)p)
127 : "r" (p), "r" (val)
128 : "cc", "memory");
129 return prev;
130 }
131 #endif
132
133 static __always_inline
134 unsigned long __xchg(volatile void *ptr, unsigned long x, int size)
135 {
136 switch (size) {
137 case 4:
138 return __xchg_u32(ptr, x);
139 #if (BITS_PER_LONG == 64)
140 case 8:
141 return __xchg_u64(ptr, x);
142 #endif
143 }
144 return x;
145 }
146
147 /*
148 * note : xchg should only be used with pointers to 32 or 64-bits elements.
149 * No build-time check is done on the element size because depending on
150 * non-referenced unexisting symbol at link time to provide an error message
151 * only work when compiling with optimizations.
152 */
153 #define xchg(ptr, v) \
154 ((__typeof__(*(ptr)))__xchg((ptr), (unsigned long)(v), sizeof(*(ptr))))
155
156 #endif /* #ifndef _INCLUDE_API_H */
157
158 #define mftbl() \
159 ({ \
160 unsigned long rval; \
161 asm volatile("mftbl %0" : "=r" (rval)); \
162 rval; \
163 })
164
165 #define mftbu() \
166 ({ \
167 unsigned long rval; \
168 asm volatile("mftbu %0" : "=r" (rval)); \
169 rval; \
170 })
171
172 typedef unsigned long long cycles_t;
173
174 static inline cycles_t get_cycles (void)
175 {
176 long h, l;
177
178 for (;;) {
179 h = mftbu();
180 barrier();
181 l = mftbl();
182 barrier();
183 if (mftbu() == h)
184 return (((cycles_t) h) << 32) + l;
185 }
186 }
187
188 #endif /* _ARCH_PPC_H */
This page took 0.046502 seconds and 3 git commands to generate.