Cleanup: remove trailing whitespaces at EOL
[urcu.git] / urcu / uatomic / ppc.h
1 #ifndef _URCU_ARCH_UATOMIC_PPC_H
2 #define _URCU_ARCH_UATOMIC_PPC_H
3
4 /*
5 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
6 * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
7 * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
8 * Copyright (c) 2009 Mathieu Desnoyers
9 *
10 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
11 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
12 *
13 * Permission is hereby granted to use or copy this program
14 * for any purpose, provided the above notices are retained on all copies.
15 * Permission to modify the code and to distribute modified code is granted,
16 * provided the above notices are retained, and a notice that the code was
17 * modified is included with the above copyright notice.
18 *
19 * Code inspired from libuatomic_ops-1.2, inherited in part from the
20 * Boehm-Demers-Weiser conservative garbage collector.
21 */
22
23 #include <urcu/compiler.h>
24 #include <urcu/system.h>
25
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29
30 #define ILLEGAL_INSTR ".long 0xd00d00"
31
32 /*
33 * Providing sequential consistency semantic with respect to other
34 * instructions for cmpxchg and add_return family of atomic primitives.
35 *
36 * This is achieved with:
37 * lwsync (prior loads can be reordered after following load)
38 * lwarx
39 * stwcx.
40 * test if success (retry)
41 * sync
42 *
43 * Explanation of the sequential consistency provided by this scheme
44 * from Paul E. McKenney:
45 *
46 * The reason we can get away with the lwsync before is that if a prior
47 * store reorders with the lwarx, then you have to store to the atomic
48 * variable from some other CPU to detect it.
49 *
50 * And if you do that, the lwarx will lose its reservation, so the stwcx
51 * will fail. The atomic operation will retry, so that the caller won't be
52 * able to see the misordering.
53 */
54
55 /* xchg */
56
57 static inline __attribute__((always_inline))
58 unsigned long _uatomic_exchange(void *addr, unsigned long val, int len)
59 {
60 switch (len) {
61 case 4:
62 {
63 unsigned int result;
64
65 __asm__ __volatile__(
66 LWSYNC_OPCODE
67 "1:\t" "lwarx %0,0,%1\n" /* load and reserve */
68 "stwcx. %2,0,%1\n" /* else store conditional */
69 "bne- 1b\n" /* retry if lost reservation */
70 "sync\n"
71 : "=&r"(result)
72 : "r"(addr), "r"(val)
73 : "memory", "cc");
74
75 return result;
76 }
77 #if (CAA_BITS_PER_LONG == 64)
78 case 8:
79 {
80 unsigned long result;
81
82 __asm__ __volatile__(
83 LWSYNC_OPCODE
84 "1:\t" "ldarx %0,0,%1\n" /* load and reserve */
85 "stdcx. %2,0,%1\n" /* else store conditional */
86 "bne- 1b\n" /* retry if lost reservation */
87 "sync\n"
88 : "=&r"(result)
89 : "r"(addr), "r"(val)
90 : "memory", "cc");
91
92 return result;
93 }
94 #endif
95 }
96 /*
97 * generate an illegal instruction. Cannot catch this with
98 * linker tricks when optimizations are disabled.
99 */
100 __asm__ __volatile__(ILLEGAL_INSTR);
101 return 0;
102 }
103
104 #define uatomic_xchg(addr, v) \
105 ((__typeof__(*(addr))) _uatomic_exchange((addr), \
106 caa_cast_long_keep_sign(v), \
107 sizeof(*(addr))))
108 /* cmpxchg */
109
110 static inline __attribute__((always_inline))
111 unsigned long _uatomic_cmpxchg(void *addr, unsigned long old,
112 unsigned long _new, int len)
113 {
114 switch (len) {
115 case 4:
116 {
117 unsigned int old_val;
118
119 __asm__ __volatile__(
120 LWSYNC_OPCODE
121 "1:\t" "lwarx %0,0,%1\n" /* load and reserve */
122 "cmpw %0,%3\n" /* if load is not equal to */
123 "bne 2f\n" /* old, fail */
124 "stwcx. %2,0,%1\n" /* else store conditional */
125 "bne- 1b\n" /* retry if lost reservation */
126 "sync\n"
127 "2:\n"
128 : "=&r"(old_val)
129 : "r"(addr), "r"((unsigned int)_new),
130 "r"((unsigned int)old)
131 : "memory", "cc");
132
133 return old_val;
134 }
135 #if (CAA_BITS_PER_LONG == 64)
136 case 8:
137 {
138 unsigned long old_val;
139
140 __asm__ __volatile__(
141 LWSYNC_OPCODE
142 "1:\t" "ldarx %0,0,%1\n" /* load and reserve */
143 "cmpd %0,%3\n" /* if load is not equal to */
144 "bne 2f\n" /* old, fail */
145 "stdcx. %2,0,%1\n" /* else store conditional */
146 "bne- 1b\n" /* retry if lost reservation */
147 "sync\n"
148 "2:\n"
149 : "=&r"(old_val)
150 : "r"(addr), "r"((unsigned long)_new),
151 "r"((unsigned long)old)
152 : "memory", "cc");
153
154 return old_val;
155 }
156 #endif
157 }
158 /*
159 * generate an illegal instruction. Cannot catch this with
160 * linker tricks when optimizations are disabled.
161 */
162 __asm__ __volatile__(ILLEGAL_INSTR);
163 return 0;
164 }
165
166
167 #define uatomic_cmpxchg(addr, old, _new) \
168 ((__typeof__(*(addr))) _uatomic_cmpxchg((addr), \
169 caa_cast_long_keep_sign(old), \
170 caa_cast_long_keep_sign(_new),\
171 sizeof(*(addr))))
172
173 /* uatomic_add_return */
174
175 static inline __attribute__((always_inline))
176 unsigned long _uatomic_add_return(void *addr, unsigned long val,
177 int len)
178 {
179 switch (len) {
180 case 4:
181 {
182 unsigned int result;
183
184 __asm__ __volatile__(
185 LWSYNC_OPCODE
186 "1:\t" "lwarx %0,0,%1\n" /* load and reserve */
187 "add %0,%2,%0\n" /* add val to value loaded */
188 "stwcx. %0,0,%1\n" /* store conditional */
189 "bne- 1b\n" /* retry if lost reservation */
190 "sync\n"
191 : "=&r"(result)
192 : "r"(addr), "r"(val)
193 : "memory", "cc");
194
195 return result;
196 }
197 #if (CAA_BITS_PER_LONG == 64)
198 case 8:
199 {
200 unsigned long result;
201
202 __asm__ __volatile__(
203 LWSYNC_OPCODE
204 "1:\t" "ldarx %0,0,%1\n" /* load and reserve */
205 "add %0,%2,%0\n" /* add val to value loaded */
206 "stdcx. %0,0,%1\n" /* store conditional */
207 "bne- 1b\n" /* retry if lost reservation */
208 "sync\n"
209 : "=&r"(result)
210 : "r"(addr), "r"(val)
211 : "memory", "cc");
212
213 return result;
214 }
215 #endif
216 }
217 /*
218 * generate an illegal instruction. Cannot catch this with
219 * linker tricks when optimizations are disabled.
220 */
221 __asm__ __volatile__(ILLEGAL_INSTR);
222 return 0;
223 }
224
225
226 #define uatomic_add_return(addr, v) \
227 ((__typeof__(*(addr))) _uatomic_add_return((addr), \
228 caa_cast_long_keep_sign(v), \
229 sizeof(*(addr))))
230
231 #ifdef __cplusplus
232 }
233 #endif
234
235 #include <urcu/uatomic/generic.h>
236
237 #endif /* _URCU_ARCH_UATOMIC_PPC_H */
This page took 0.048389 seconds and 4 git commands to generate.