bb74934fb19a279dfe43d7f1785ea89865756193
[urcu.git] / urcu / uatomic_arch_ppc.h
1 #ifndef _URCU_ARCH_UATOMIC_PPC_H
2 #define _URCU_ARCH_UATOMIC_PPC_H
3
4 /*
5 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
6 * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
7 * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
8 * Copyright (c) 2009 Mathieu Desnoyers
9 *
10 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
11 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
12 *
13 * Permission is hereby granted to use or copy this program
14 * for any purpose, provided the above notices are retained on all copies.
15 * Permission to modify the code and to distribute modified code is granted,
16 * provided the above notices are retained, and a notice that the code was
17 * modified is included with the above copyright notice.
18 *
19 * Code inspired from libuatomic_ops-1.2, inherited in part from the
20 * Boehm-Demers-Weiser conservative garbage collector.
21 */
22
23 #include <urcu/compiler.h>
24 #include <urcu/system.h>
25
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29
30 #ifdef __NO_LWSYNC__
31 #define LWSYNC_OPCODE "sync\n"
32 #else
33 #define LWSYNC_OPCODE "lwsync\n"
34 #endif
35
36 #define ILLEGAL_INSTR ".long 0xd00d00"
37
38 /*
39 * Using a isync as second barrier for exchange to provide acquire semantic.
40 * According to uatomic_ops/sysdeps/gcc/powerpc.h, the documentation is "fairly
41 * explicit that this also has acquire semantics."
42 * Derived from AO_compare_and_swap(), but removed the comparison.
43 */
44
45 /* xchg */
46
47 static inline __attribute__((always_inline))
48 unsigned long _uatomic_exchange(void *addr, unsigned long val, int len)
49 {
50 switch (len) {
51 case 4:
52 {
53 unsigned int result;
54
55 __asm__ __volatile__(
56 LWSYNC_OPCODE
57 "1:\t" "lwarx %0,0,%1\n" /* load and reserve */
58 "stwcx. %2,0,%1\n" /* else store conditional */
59 "bne- 1b\n" /* retry if lost reservation */
60 "isync\n"
61 : "=&r"(result)
62 : "r"(addr), "r"(val)
63 : "memory", "cc");
64
65 return result;
66 }
67 #if (CAA_BITS_PER_LONG == 64)
68 case 8:
69 {
70 unsigned long result;
71
72 __asm__ __volatile__(
73 LWSYNC_OPCODE
74 "1:\t" "ldarx %0,0,%1\n" /* load and reserve */
75 "stdcx. %2,0,%1\n" /* else store conditional */
76 "bne- 1b\n" /* retry if lost reservation */
77 "isync\n"
78 : "=&r"(result)
79 : "r"(addr), "r"(val)
80 : "memory", "cc");
81
82 return result;
83 }
84 #endif
85 }
86 /* generate an illegal instruction. Cannot catch this with linker tricks
87 * when optimizations are disabled. */
88 __asm__ __volatile__(ILLEGAL_INSTR);
89 return 0;
90 }
91
92 #define uatomic_xchg(addr, v) \
93 ((__typeof__(*(addr))) _uatomic_exchange((addr), (unsigned long)(v), \
94 sizeof(*(addr))))
95 /* cmpxchg */
96
97 static inline __attribute__((always_inline))
98 unsigned long _uatomic_cmpxchg(void *addr, unsigned long old,
99 unsigned long _new, int len)
100 {
101 switch (len) {
102 case 4:
103 {
104 unsigned int old_val;
105
106 __asm__ __volatile__(
107 LWSYNC_OPCODE
108 "1:\t" "lwarx %0,0,%1\n" /* load and reserve */
109 "cmpw %0,%3\n" /* if load is not equal to */
110 "bne 2f\n" /* old, fail */
111 "stwcx. %2,0,%1\n" /* else store conditional */
112 "bne- 1b\n" /* retry if lost reservation */
113 "isync\n"
114 "2:\n"
115 : "=&r"(old_val)
116 : "r"(addr), "r"((unsigned int)_new),
117 "r"((unsigned int)old)
118 : "memory", "cc");
119
120 return old_val;
121 }
122 #if (CAA_BITS_PER_LONG == 64)
123 case 8:
124 {
125 unsigned long old_val;
126
127 __asm__ __volatile__(
128 LWSYNC_OPCODE
129 "1:\t" "ldarx %0,0,%1\n" /* load and reserve */
130 "cmpd %0,%3\n" /* if load is not equal to */
131 "bne 2f\n" /* old, fail */
132 "stdcx. %2,0,%1\n" /* else store conditional */
133 "bne- 1b\n" /* retry if lost reservation */
134 "isync\n"
135 "2:\n"
136 : "=&r"(old_val)
137 : "r"(addr), "r"((unsigned long)_new),
138 "r"((unsigned long)old)
139 : "memory", "cc");
140
141 return old_val;
142 }
143 #endif
144 }
145 /* generate an illegal instruction. Cannot catch this with linker tricks
146 * when optimizations are disabled. */
147 __asm__ __volatile__(ILLEGAL_INSTR);
148 return 0;
149 }
150
151
152 #define uatomic_cmpxchg(addr, old, _new) \
153 ((__typeof__(*(addr))) _uatomic_cmpxchg((addr), (unsigned long)(old),\
154 (unsigned long)(_new), \
155 sizeof(*(addr))))
156
157 /* uatomic_add_return */
158
159 static inline __attribute__((always_inline))
160 unsigned long _uatomic_add_return(void *addr, unsigned long val,
161 int len)
162 {
163 switch (len) {
164 case 4:
165 {
166 unsigned int result;
167
168 __asm__ __volatile__(
169 LWSYNC_OPCODE
170 "1:\t" "lwarx %0,0,%1\n" /* load and reserve */
171 "add %0,%2,%0\n" /* add val to value loaded */
172 "stwcx. %0,0,%1\n" /* store conditional */
173 "bne- 1b\n" /* retry if lost reservation */
174 "isync\n"
175 : "=&r"(result)
176 : "r"(addr), "r"(val)
177 : "memory", "cc");
178
179 return result;
180 }
181 #if (CAA_BITS_PER_LONG == 64)
182 case 8:
183 {
184 unsigned long result;
185
186 __asm__ __volatile__(
187 LWSYNC_OPCODE
188 "1:\t" "ldarx %0,0,%1\n" /* load and reserve */
189 "add %0,%2,%0\n" /* add val to value loaded */
190 "stdcx. %0,0,%1\n" /* store conditional */
191 "bne- 1b\n" /* retry if lost reservation */
192 "isync\n"
193 : "=&r"(result)
194 : "r"(addr), "r"(val)
195 : "memory", "cc");
196
197 return result;
198 }
199 #endif
200 }
201 /* generate an illegal instruction. Cannot catch this with linker tricks
202 * when optimizations are disabled. */
203 __asm__ __volatile__(ILLEGAL_INSTR);
204 return 0;
205 }
206
207
208 #define uatomic_add_return(addr, v) \
209 ((__typeof__(*(addr))) _uatomic_add_return((addr), \
210 (unsigned long)(v), \
211 sizeof(*(addr))))
212
213 #ifdef __cplusplus
214 }
215 #endif
216
217 #include <urcu/uatomic_generic.h>
218
219 #endif /* _URCU_ARCH_UATOMIC_PPC_H */
This page took 0.032344 seconds and 3 git commands to generate.