Commit | Line | Data |
---|---|---|
0114ba7f MD |
1 | #ifndef _ARCH_ATOMIC_PPC_H |
2 | #define _ARCH_ATOMIC_PPC_H | |
3 | ||
4 | /* | |
5 | * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved. | |
6 | * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved. | |
7 | * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P. | |
8 | * Copyright (c) 2009 Mathieu Desnoyers | |
9 | * | |
10 | * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED | |
11 | * OR IMPLIED. ANY USE IS AT YOUR OWN RISK. | |
12 | * | |
13 | * Permission is hereby granted to use or copy this program | |
14 | * for any purpose, provided the above notices are retained on all copies. | |
15 | * Permission to modify the code and to distribute modified code is granted, | |
16 | * provided the above notices are retained, and a notice that the code was | |
17 | * modified is included with the above copyright notice. | |
18 | * | |
19 | * Code inspired from libatomic_ops-1.2, inherited in part from the | |
20 | * Boehm-Demers-Weiser conservative garbage collector. | |
21 | */ | |
22 | ||
bcf80111 SM |
23 | #ifndef __SIZEOF_LONG__ |
24 | #ifdef __powerpc64__ | |
25 | #define __SIZEOF_LONG__ 8 | |
26 | #else | |
27 | #define __SIZEOF_LONG__ 4 | |
28 | #endif | |
29 | #endif | |
30 | ||
0114ba7f MD |
31 | #ifndef BITS_PER_LONG |
32 | #define BITS_PER_LONG (__SIZEOF_LONG__ * 8) | |
33 | #endif | |
34 | ||
e7061ad2 | 35 | #define ILLEGAL_INSTR ".long 0xd00d00" |
0114ba7f MD |
36 | |
37 | #ifndef _INCLUDE_API_H | |
38 | ||
39 | /* | |
40 | * Using a isync as second barrier for exchange to provide acquire semantic. | |
41 | * According to atomic_ops/sysdeps/gcc/powerpc.h, the documentation is "fairly | |
42 | * explicit that this also has acquire semantics." | |
43 | * Derived from AO_compare_and_swap(), but removed the comparison. | |
44 | */ | |
45 | ||
f689dcbc MD |
46 | /* xchg */ |
47 | ||
0114ba7f | 48 | static __attribute__((always_inline)) |
f689dcbc | 49 | unsigned long _atomic_exchange(volatile void *addr, unsigned long val, int len) |
0114ba7f | 50 | { |
f689dcbc MD |
51 | switch (len) { |
52 | case 4: | |
53 | { | |
54 | unsigned int result; | |
55 | ||
56 | __asm__ __volatile__( | |
57 | "lwsync\n" | |
58 | "1:\t" "lwarx %0,0,%1\n" /* load and reserve */ | |
59 | "stwcx. %2,0,%1\n" /* else store conditional */ | |
60 | "bne- 1b\n" /* retry if lost reservation */ | |
61 | "isync\n" | |
62 | : "=&r"(result) | |
63 | : "r"(addr), "r"(val) | |
64 | : "memory", "cc"); | |
65 | ||
66 | return result; | |
67 | } | |
68 | #if (BITS_PER_LONG == 64) | |
69 | case 8: | |
70 | { | |
71 | unsigned long result; | |
72 | ||
73 | __asm__ __volatile__( | |
74 | "lwsync\n" | |
75 | "1:\t" "ldarx %0,0,%1\n" /* load and reserve */ | |
76 | "stdcx. %2,0,%1\n" /* else store conditional */ | |
77 | "bne- 1b\n" /* retry if lost reservation */ | |
78 | "isync\n" | |
79 | : "=&r"(result) | |
80 | : "r"(addr), "r"(val) | |
81 | : "memory", "cc"); | |
82 | ||
83 | return result; | |
84 | } | |
85 | #endif | |
86 | } | |
87 | /* generate an illegal instruction. Cannot catch this with linker tricks | |
88 | * when optimizations are disabled. */ | |
89 | __asm__ __volatile__(ILLEGAL_INSTR); | |
90 | return 0; | |
0114ba7f MD |
91 | } |
92 | ||
f689dcbc MD |
93 | #define xchg(addr, v) (__typeof__(*(addr))) _atomic_exchange((addr), (v), \ |
94 | sizeof(*(addr))) | |
95 | ||
96 | /* cmpxchg */ | |
0114ba7f MD |
97 | |
98 | static __attribute__((always_inline)) | |
f689dcbc MD |
99 | unsigned long _atomic_cmpxchg(volatile void *addr, unsigned long old, |
100 | unsigned long _new, int len) | |
0114ba7f | 101 | { |
f689dcbc MD |
102 | switch (len) { |
103 | case 4: | |
104 | { | |
105 | unsigned int old_val; | |
106 | ||
107 | __asm__ __volatile__( | |
108 | "lwsync\n" | |
109 | "1:\t" "lwarx %0,0,%1\n" /* load and reserve */ | |
110 | "cmpd %0,%3\n" /* if load is not equal to */ | |
111 | "bne 2f\n" /* old, fail */ | |
112 | "stwcx. %2,0,%1\n" /* else store conditional */ | |
113 | "bne- 1b\n" /* retry if lost reservation */ | |
114 | "isync\n" | |
115 | "2:\n" | |
116 | : "=&r"(old_val), | |
117 | : "r"(addr), "r"((unsigned int)_new), | |
118 | "r"((unsigned int)old) | |
119 | : "memory", "cc"); | |
120 | ||
121 | return old_val; | |
122 | } | |
123 | #if (BITS_PER_LONG == 64) | |
124 | case 8: | |
125 | { | |
126 | unsigned long old_val; | |
127 | ||
128 | __asm__ __volatile__( | |
129 | "lwsync\n" | |
130 | "1:\t" "ldarx %0,0,%1\n" /* load and reserve */ | |
131 | "cmpd %0,%3\n" /* if load is not equal to */ | |
132 | "bne 2f\n" /* old, fail */ | |
133 | "stdcx. %2,0,%1\n" /* else store conditional */ | |
134 | "bne- 1b\n" /* retry if lost reservation */ | |
135 | "isync\n" | |
136 | "2:\n" | |
137 | : "=&r"(old_val), | |
138 | : "r"(addr), "r"((unsigned long)_new), | |
139 | "r"((unsigned long)old) | |
140 | : "memory", "cc"); | |
141 | ||
142 | return old_val; | |
143 | } | |
144 | #endif | |
145 | } | |
146 | /* generate an illegal instruction. Cannot catch this with linker tricks | |
147 | * when optimizations are disabled. */ | |
148 | __asm__ __volatile__(ILLEGAL_INSTR); | |
149 | return 0; | |
0114ba7f MD |
150 | } |
151 | ||
f689dcbc MD |
152 | #define cmpxchg(addr, old, _new) \ |
153 | (__typeof__(*(addr))) _atomic_cmpxchg((addr), (old), (_new), \ | |
154 | sizeof(*(addr))) | |
155 | ||
156 | /* atomic_add_return */ | |
0114ba7f MD |
157 | |
158 | static __attribute__((always_inline)) | |
f689dcbc MD |
159 | unsigned long _atomic_add_return(volatile void *addr, unsigned long val, |
160 | int len) | |
0114ba7f MD |
161 | { |
162 | switch (len) { | |
f689dcbc MD |
163 | case 4: |
164 | { | |
165 | unsigned int result; | |
166 | ||
167 | __asm__ __volatile__( | |
168 | "lwsync\n" | |
169 | "1:\t" "lwarx %0,0,%1\n" /* load and reserve */ | |
170 | "add %0,%2,%0\n" /* add val to value loaded */ | |
171 | "stwcx. %0,0,%1\n" /* store conditional */ | |
172 | "bne- 1b\n" /* retry if lost reservation */ | |
173 | "isync\n" | |
174 | : "=&r"(result) | |
175 | : "r"(addr), "r"(val) | |
176 | : "memory", "cc"); | |
177 | ||
178 | return result; | |
179 | } | |
0114ba7f | 180 | #if (BITS_PER_LONG == 64) |
f689dcbc MD |
181 | case 8: |
182 | { | |
183 | unsigned long result; | |
184 | ||
185 | __asm__ __volatile__( | |
186 | "lwsync\n" | |
187 | "1:\t" "ldarx %0,0,%1\n" /* load and reserve */ | |
188 | "add %0,%2,%0\n" /* add val to value loaded */ | |
189 | "stdcx. %0,0,%1\n" /* store conditional */ | |
190 | "bne- 1b\n" /* retry if lost reservation */ | |
191 | "isync\n" | |
192 | : "=&r"(result) | |
193 | : "r"(addr), "r"(val) | |
194 | : "memory", "cc"); | |
195 | ||
196 | return result; | |
197 | } | |
0114ba7f MD |
198 | #endif |
199 | } | |
200 | /* generate an illegal instruction. Cannot catch this with linker tricks | |
201 | * when optimizations are disabled. */ | |
202 | __asm__ __volatile__(ILLEGAL_INSTR); | |
203 | return 0; | |
204 | } | |
205 | ||
f689dcbc MD |
206 | #define atomic_add_return(addr, v) \ |
207 | (__typeof__(*(addr))) _atomic_add((addr), (v), sizeof(*(addr))) | |
208 | ||
209 | /* atomic_sub_return, atomic_add, atomic_sub, atomic_inc, atomic_dec */ | |
210 | ||
211 | #define atomic_sub_return(addr, v) atomic_add_return((addr), -(v)) | |
212 | ||
213 | #define atomic_add(addr, v) (void)atomic_add_return((addr), (v)) | |
214 | #define atomic_sub(addr, v) (void)atomic_sub_return((addr), (v)) | |
215 | ||
216 | #define atomic_inc(addr, v) atomic_add((addr), 1) | |
217 | #define atomic_dec(addr, v) atomic_add((addr), -1) | |
0114ba7f MD |
218 | |
219 | #endif /* #ifndef _INCLUDE_API_H */ | |
220 | ||
221 | #endif /* ARCH_ATOMIC_PPC_H */ |