remove compat_uatomic_cmpxchg #define from non-x86
[urcu.git] / urcu / uatomic_arch_ppc.h
CommitLineData
ec4e58a3
MD
1#ifndef _URCU_ARCH_UATOMIC_PPC_H
2#define _URCU_ARCH_UATOMIC_PPC_H
0114ba7f
MD
3
4/*
5 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
6 * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
7 * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
8 * Copyright (c) 2009 Mathieu Desnoyers
9 *
10 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
11 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
12 *
13 * Permission is hereby granted to use or copy this program
14 * for any purpose, provided the above notices are retained on all copies.
15 * Permission to modify the code and to distribute modified code is granted,
16 * provided the above notices are retained, and a notice that the code was
17 * modified is included with the above copyright notice.
18 *
ec4e58a3 19 * Code inspired from libuatomic_ops-1.2, inherited in part from the
0114ba7f
MD
20 * Boehm-Demers-Weiser conservative garbage collector.
21 */
22
ec4e58a3 23#include <urcu/compiler.h>
b46b23cb 24#include <urcu/system.h>
1315d277 25
36bc70a8
MD
26#ifdef __cplusplus
27extern "C" {
28#endif
29
bcf80111
SM
30#ifndef __SIZEOF_LONG__
31#ifdef __powerpc64__
32#define __SIZEOF_LONG__ 8
33#else
34#define __SIZEOF_LONG__ 4
35#endif
36#endif
37
701dd8de
SAS
38#ifdef __NO_LWSYNC__
39#define LWSYNC_OPCODE "sync\n"
40#else
41#define LWSYNC_OPCODE "lwsync\n"
42#endif
43
0114ba7f
MD
44#ifndef BITS_PER_LONG
45#define BITS_PER_LONG (__SIZEOF_LONG__ * 8)
46#endif
47
e7061ad2 48#define ILLEGAL_INSTR ".long 0xd00d00"
0114ba7f 49
b46b23cb
MD
50#define uatomic_set(addr, v) STORE_SHARED(*(addr), (v))
51#define uatomic_read(addr) LOAD_SHARED(*(addr))
5f8052b4 52
0114ba7f
MD
53/*
54 * Using a isync as second barrier for exchange to provide acquire semantic.
ec4e58a3 55 * According to uatomic_ops/sysdeps/gcc/powerpc.h, the documentation is "fairly
0114ba7f
MD
56 * explicit that this also has acquire semantics."
57 * Derived from AO_compare_and_swap(), but removed the comparison.
58 */
59
f689dcbc
MD
60/* xchg */
61
da1c1635 62static inline __attribute__((always_inline))
ec4e58a3 63unsigned long _uatomic_exchange(void *addr, unsigned long val, int len)
0114ba7f 64{
f689dcbc
MD
65 switch (len) {
66 case 4:
67 {
68 unsigned int result;
69
70 __asm__ __volatile__(
701dd8de 71 LWSYNC_OPCODE
f689dcbc
MD
72 "1:\t" "lwarx %0,0,%1\n" /* load and reserve */
73 "stwcx. %2,0,%1\n" /* else store conditional */
74 "bne- 1b\n" /* retry if lost reservation */
75 "isync\n"
76 : "=&r"(result)
77 : "r"(addr), "r"(val)
78 : "memory", "cc");
79
80 return result;
81 }
82#if (BITS_PER_LONG == 64)
83 case 8:
84 {
85 unsigned long result;
86
87 __asm__ __volatile__(
701dd8de 88 LWSYNC_OPCODE
f689dcbc
MD
89 "1:\t" "ldarx %0,0,%1\n" /* load and reserve */
90 "stdcx. %2,0,%1\n" /* else store conditional */
91 "bne- 1b\n" /* retry if lost reservation */
92 "isync\n"
93 : "=&r"(result)
94 : "r"(addr), "r"(val)
95 : "memory", "cc");
96
97 return result;
98 }
99#endif
100 }
101 /* generate an illegal instruction. Cannot catch this with linker tricks
102 * when optimizations are disabled. */
103 __asm__ __volatile__(ILLEGAL_INSTR);
104 return 0;
0114ba7f
MD
105}
106
ec4e58a3
MD
107#define uatomic_xchg(addr, v) \
108 ((__typeof__(*(addr))) _uatomic_exchange((addr), (unsigned long)(v), \
da1c1635 109 sizeof(*(addr))))
f689dcbc 110/* cmpxchg */
0114ba7f 111
da1c1635 112static inline __attribute__((always_inline))
ec4e58a3 113unsigned long _uatomic_cmpxchg(void *addr, unsigned long old,
f689dcbc 114 unsigned long _new, int len)
0114ba7f 115{
f689dcbc
MD
116 switch (len) {
117 case 4:
118 {
119 unsigned int old_val;
120
121 __asm__ __volatile__(
701dd8de 122 LWSYNC_OPCODE
f689dcbc
MD
123 "1:\t" "lwarx %0,0,%1\n" /* load and reserve */
124 "cmpd %0,%3\n" /* if load is not equal to */
125 "bne 2f\n" /* old, fail */
126 "stwcx. %2,0,%1\n" /* else store conditional */
127 "bne- 1b\n" /* retry if lost reservation */
128 "isync\n"
129 "2:\n"
e72f4937 130 : "=&r"(old_val)
f689dcbc
MD
131 : "r"(addr), "r"((unsigned int)_new),
132 "r"((unsigned int)old)
133 : "memory", "cc");
134
135 return old_val;
136 }
137#if (BITS_PER_LONG == 64)
138 case 8:
139 {
140 unsigned long old_val;
141
142 __asm__ __volatile__(
701dd8de 143 LWSYNC_OPCODE
f689dcbc
MD
144 "1:\t" "ldarx %0,0,%1\n" /* load and reserve */
145 "cmpd %0,%3\n" /* if load is not equal to */
146 "bne 2f\n" /* old, fail */
147 "stdcx. %2,0,%1\n" /* else store conditional */
148 "bne- 1b\n" /* retry if lost reservation */
149 "isync\n"
150 "2:\n"
151 : "=&r"(old_val),
152 : "r"(addr), "r"((unsigned long)_new),
153 "r"((unsigned long)old)
154 : "memory", "cc");
155
156 return old_val;
157 }
158#endif
159 }
160 /* generate an illegal instruction. Cannot catch this with linker tricks
161 * when optimizations are disabled. */
162 __asm__ __volatile__(ILLEGAL_INSTR);
163 return 0;
0114ba7f
MD
164}
165
da1c1635 166
ec4e58a3
MD
167#define uatomic_cmpxchg(addr, old, _new) \
168 ((__typeof__(*(addr))) _uatomic_cmpxchg((addr), (unsigned long)(old),\
da1c1635
MD
169 (unsigned long)(_new), \
170 sizeof(*(addr))))
f689dcbc 171
ec4e58a3 172/* uatomic_add_return */
0114ba7f 173
da1c1635 174static inline __attribute__((always_inline))
ec4e58a3 175unsigned long _uatomic_add_return(void *addr, unsigned long val,
f689dcbc 176 int len)
0114ba7f
MD
177{
178 switch (len) {
f689dcbc
MD
179 case 4:
180 {
181 unsigned int result;
182
183 __asm__ __volatile__(
701dd8de 184 LWSYNC_OPCODE
f689dcbc
MD
185 "1:\t" "lwarx %0,0,%1\n" /* load and reserve */
186 "add %0,%2,%0\n" /* add val to value loaded */
187 "stwcx. %0,0,%1\n" /* store conditional */
188 "bne- 1b\n" /* retry if lost reservation */
189 "isync\n"
190 : "=&r"(result)
191 : "r"(addr), "r"(val)
192 : "memory", "cc");
193
194 return result;
195 }
0114ba7f 196#if (BITS_PER_LONG == 64)
f689dcbc
MD
197 case 8:
198 {
199 unsigned long result;
200
201 __asm__ __volatile__(
701dd8de 202 LWSYNC_OPCODE
f689dcbc
MD
203 "1:\t" "ldarx %0,0,%1\n" /* load and reserve */
204 "add %0,%2,%0\n" /* add val to value loaded */
205 "stdcx. %0,0,%1\n" /* store conditional */
206 "bne- 1b\n" /* retry if lost reservation */
207 "isync\n"
208 : "=&r"(result)
209 : "r"(addr), "r"(val)
210 : "memory", "cc");
211
212 return result;
213 }
0114ba7f
MD
214#endif
215 }
216 /* generate an illegal instruction. Cannot catch this with linker tricks
217 * when optimizations are disabled. */
218 __asm__ __volatile__(ILLEGAL_INSTR);
219 return 0;
220}
221
da1c1635 222
ec4e58a3
MD
223#define uatomic_add_return(addr, v) \
224 ((__typeof__(*(addr))) _uatomic_add_return((addr), \
da1c1635
MD
225 (unsigned long)(v), \
226 sizeof(*(addr))))
f689dcbc 227
ec4e58a3 228/* uatomic_sub_return, uatomic_add, uatomic_sub, uatomic_inc, uatomic_dec */
f689dcbc 229
ec4e58a3 230#define uatomic_sub_return(addr, v) uatomic_add_return((addr), -(v))
f689dcbc 231
ec4e58a3
MD
232#define uatomic_add(addr, v) (void)uatomic_add_return((addr), (v))
233#define uatomic_sub(addr, v) (void)uatomic_sub_return((addr), (v))
f689dcbc 234
ec4e58a3
MD
235#define uatomic_inc(addr) uatomic_add((addr), 1)
236#define uatomic_dec(addr) uatomic_add((addr), -1)
0114ba7f 237
36bc70a8
MD
238#ifdef __cplusplus
239}
240#endif
241
ec4e58a3 242#endif /* _URCU_ARCH_UATOMIC_PPC_H */
This page took 0.035097 seconds and 4 git commands to generate.