Commit | Line | Data |
---|---|---|
0114ba7f MD |
1 | #ifndef _ARCH_ATOMIC_PPC_H |
2 | #define _ARCH_ATOMIC_PPC_H | |
3 | ||
4 | /* | |
5 | * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved. | |
6 | * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved. | |
7 | * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P. | |
8 | * Copyright (c) 2009 Mathieu Desnoyers | |
9 | * | |
10 | * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED | |
11 | * OR IMPLIED. ANY USE IS AT YOUR OWN RISK. | |
12 | * | |
13 | * Permission is hereby granted to use or copy this program | |
14 | * for any purpose, provided the above notices are retained on all copies. | |
15 | * Permission to modify the code and to distribute modified code is granted, | |
16 | * provided the above notices are retained, and a notice that the code was | |
17 | * modified is included with the above copyright notice. | |
18 | * | |
19 | * Code inspired from libatomic_ops-1.2, inherited in part from the | |
20 | * Boehm-Demers-Weiser conservative garbage collector. | |
21 | */ | |
22 | ||
bcf80111 SM |
23 | #ifndef __SIZEOF_LONG__ |
24 | #ifdef __powerpc64__ | |
25 | #define __SIZEOF_LONG__ 8 | |
26 | #else | |
27 | #define __SIZEOF_LONG__ 4 | |
28 | #endif | |
29 | #endif | |
30 | ||
0114ba7f MD |
31 | #ifndef BITS_PER_LONG |
32 | #define BITS_PER_LONG (__SIZEOF_LONG__ * 8) | |
33 | #endif | |
34 | ||
e7061ad2 | 35 | #define ILLEGAL_INSTR ".long 0xd00d00" |
0114ba7f MD |
36 | |
37 | #ifndef _INCLUDE_API_H | |
38 | ||
5f8052b4 MD |
39 | #define atomic_set(addr, v) \ |
40 | do { \ | |
41 | ACCESS_ONCE(*(addr)) = (v); \ | |
42 | } while (0) | |
43 | ||
44 | #define atomic_read(addr) ACCESS_ONCE(*(addr)) | |
45 | ||
0114ba7f MD |
46 | /* |
47 | * Using a isync as second barrier for exchange to provide acquire semantic. | |
48 | * According to atomic_ops/sysdeps/gcc/powerpc.h, the documentation is "fairly | |
49 | * explicit that this also has acquire semantics." | |
50 | * Derived from AO_compare_and_swap(), but removed the comparison. | |
51 | */ | |
52 | ||
f689dcbc MD |
53 | /* xchg */ |
54 | ||
da1c1635 | 55 | static inline __attribute__((always_inline)) |
5eb3201d | 56 | unsigned long _atomic_exchange(void *addr, unsigned long val, int len) |
0114ba7f | 57 | { |
f689dcbc MD |
58 | switch (len) { |
59 | case 4: | |
60 | { | |
61 | unsigned int result; | |
62 | ||
63 | __asm__ __volatile__( | |
64 | "lwsync\n" | |
65 | "1:\t" "lwarx %0,0,%1\n" /* load and reserve */ | |
66 | "stwcx. %2,0,%1\n" /* else store conditional */ | |
67 | "bne- 1b\n" /* retry if lost reservation */ | |
68 | "isync\n" | |
69 | : "=&r"(result) | |
70 | : "r"(addr), "r"(val) | |
71 | : "memory", "cc"); | |
72 | ||
73 | return result; | |
74 | } | |
75 | #if (BITS_PER_LONG == 64) | |
76 | case 8: | |
77 | { | |
78 | unsigned long result; | |
79 | ||
80 | __asm__ __volatile__( | |
81 | "lwsync\n" | |
82 | "1:\t" "ldarx %0,0,%1\n" /* load and reserve */ | |
83 | "stdcx. %2,0,%1\n" /* else store conditional */ | |
84 | "bne- 1b\n" /* retry if lost reservation */ | |
85 | "isync\n" | |
86 | : "=&r"(result) | |
87 | : "r"(addr), "r"(val) | |
88 | : "memory", "cc"); | |
89 | ||
90 | return result; | |
91 | } | |
92 | #endif | |
93 | } | |
94 | /* generate an illegal instruction. Cannot catch this with linker tricks | |
95 | * when optimizations are disabled. */ | |
96 | __asm__ __volatile__(ILLEGAL_INSTR); | |
97 | return 0; | |
0114ba7f MD |
98 | } |
99 | ||
da1c1635 MD |
100 | #define xchg(addr, v) \ |
101 | ((__typeof__(*(addr))) _atomic_exchange((addr), (unsigned long)(v), \ | |
102 | sizeof(*(addr)))) | |
f689dcbc | 103 | /* cmpxchg */ |
0114ba7f | 104 | |
da1c1635 | 105 | static inline __attribute__((always_inline)) |
5eb3201d | 106 | unsigned long _atomic_cmpxchg(void *addr, unsigned long old, |
f689dcbc | 107 | unsigned long _new, int len) |
0114ba7f | 108 | { |
f689dcbc MD |
109 | switch (len) { |
110 | case 4: | |
111 | { | |
112 | unsigned int old_val; | |
113 | ||
114 | __asm__ __volatile__( | |
115 | "lwsync\n" | |
116 | "1:\t" "lwarx %0,0,%1\n" /* load and reserve */ | |
117 | "cmpd %0,%3\n" /* if load is not equal to */ | |
118 | "bne 2f\n" /* old, fail */ | |
119 | "stwcx. %2,0,%1\n" /* else store conditional */ | |
120 | "bne- 1b\n" /* retry if lost reservation */ | |
121 | "isync\n" | |
122 | "2:\n" | |
e72f4937 | 123 | : "=&r"(old_val) |
f689dcbc MD |
124 | : "r"(addr), "r"((unsigned int)_new), |
125 | "r"((unsigned int)old) | |
126 | : "memory", "cc"); | |
127 | ||
128 | return old_val; | |
129 | } | |
130 | #if (BITS_PER_LONG == 64) | |
131 | case 8: | |
132 | { | |
133 | unsigned long old_val; | |
134 | ||
135 | __asm__ __volatile__( | |
136 | "lwsync\n" | |
137 | "1:\t" "ldarx %0,0,%1\n" /* load and reserve */ | |
138 | "cmpd %0,%3\n" /* if load is not equal to */ | |
139 | "bne 2f\n" /* old, fail */ | |
140 | "stdcx. %2,0,%1\n" /* else store conditional */ | |
141 | "bne- 1b\n" /* retry if lost reservation */ | |
142 | "isync\n" | |
143 | "2:\n" | |
144 | : "=&r"(old_val), | |
145 | : "r"(addr), "r"((unsigned long)_new), | |
146 | "r"((unsigned long)old) | |
147 | : "memory", "cc"); | |
148 | ||
149 | return old_val; | |
150 | } | |
151 | #endif | |
152 | } | |
153 | /* generate an illegal instruction. Cannot catch this with linker tricks | |
154 | * when optimizations are disabled. */ | |
155 | __asm__ __volatile__(ILLEGAL_INSTR); | |
156 | return 0; | |
0114ba7f MD |
157 | } |
158 | ||
da1c1635 MD |
159 | |
160 | #define cmpxchg(addr, old, _new) \ | |
161 | ((__typeof__(*(addr))) _atomic_cmpxchg((addr), (unsigned long)(old),\ | |
162 | (unsigned long)(_new), \ | |
163 | sizeof(*(addr)))) | |
f689dcbc MD |
164 | |
165 | /* atomic_add_return */ | |
0114ba7f | 166 | |
da1c1635 | 167 | static inline __attribute__((always_inline)) |
5eb3201d | 168 | unsigned long _atomic_add_return(void *addr, unsigned long val, |
f689dcbc | 169 | int len) |
0114ba7f MD |
170 | { |
171 | switch (len) { | |
f689dcbc MD |
172 | case 4: |
173 | { | |
174 | unsigned int result; | |
175 | ||
176 | __asm__ __volatile__( | |
177 | "lwsync\n" | |
178 | "1:\t" "lwarx %0,0,%1\n" /* load and reserve */ | |
179 | "add %0,%2,%0\n" /* add val to value loaded */ | |
180 | "stwcx. %0,0,%1\n" /* store conditional */ | |
181 | "bne- 1b\n" /* retry if lost reservation */ | |
182 | "isync\n" | |
183 | : "=&r"(result) | |
184 | : "r"(addr), "r"(val) | |
185 | : "memory", "cc"); | |
186 | ||
187 | return result; | |
188 | } | |
0114ba7f | 189 | #if (BITS_PER_LONG == 64) |
f689dcbc MD |
190 | case 8: |
191 | { | |
192 | unsigned long result; | |
193 | ||
194 | __asm__ __volatile__( | |
195 | "lwsync\n" | |
196 | "1:\t" "ldarx %0,0,%1\n" /* load and reserve */ | |
197 | "add %0,%2,%0\n" /* add val to value loaded */ | |
198 | "stdcx. %0,0,%1\n" /* store conditional */ | |
199 | "bne- 1b\n" /* retry if lost reservation */ | |
200 | "isync\n" | |
201 | : "=&r"(result) | |
202 | : "r"(addr), "r"(val) | |
203 | : "memory", "cc"); | |
204 | ||
205 | return result; | |
206 | } | |
0114ba7f MD |
207 | #endif |
208 | } | |
209 | /* generate an illegal instruction. Cannot catch this with linker tricks | |
210 | * when optimizations are disabled. */ | |
211 | __asm__ __volatile__(ILLEGAL_INSTR); | |
212 | return 0; | |
213 | } | |
214 | ||
da1c1635 MD |
215 | |
216 | #define atomic_add_return(addr, v) \ | |
217 | ((__typeof__(*(addr))) _atomic_add_return((addr), \ | |
218 | (unsigned long)(v), \ | |
219 | sizeof(*(addr)))) | |
f689dcbc MD |
220 | |
221 | /* atomic_sub_return, atomic_add, atomic_sub, atomic_inc, atomic_dec */ | |
222 | ||
223 | #define atomic_sub_return(addr, v) atomic_add_return((addr), -(v)) | |
224 | ||
225 | #define atomic_add(addr, v) (void)atomic_add_return((addr), (v)) | |
226 | #define atomic_sub(addr, v) (void)atomic_sub_return((addr), (v)) | |
227 | ||
228 | #define atomic_inc(addr, v) atomic_add((addr), 1) | |
229 | #define atomic_dec(addr, v) atomic_add((addr), -1) | |
0114ba7f MD |
230 | |
231 | #endif /* #ifndef _INCLUDE_API_H */ | |
232 | ||
233 | #endif /* ARCH_ATOMIC_PPC_H */ |