Headers: move arch_*.h to urcu/arch/*.h
[urcu.git] / urcu / uatomic_arch_ppc.h
CommitLineData
ec4e58a3
MD
1#ifndef _URCU_ARCH_UATOMIC_PPC_H
2#define _URCU_ARCH_UATOMIC_PPC_H
0114ba7f
MD
3
4/*
5 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
6 * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
7 * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
8 * Copyright (c) 2009 Mathieu Desnoyers
9 *
10 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
11 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
12 *
13 * Permission is hereby granted to use or copy this program
14 * for any purpose, provided the above notices are retained on all copies.
15 * Permission to modify the code and to distribute modified code is granted,
16 * provided the above notices are retained, and a notice that the code was
17 * modified is included with the above copyright notice.
18 *
ec4e58a3 19 * Code inspired from libuatomic_ops-1.2, inherited in part from the
0114ba7f
MD
20 * Boehm-Demers-Weiser conservative garbage collector.
21 */
22
ec4e58a3 23#include <urcu/compiler.h>
b46b23cb 24#include <urcu/system.h>
1315d277 25
36bc70a8
MD
26#ifdef __cplusplus
27extern "C" {
28#endif
29
701dd8de
SAS
30#ifdef __NO_LWSYNC__
31#define LWSYNC_OPCODE "sync\n"
32#else
33#define LWSYNC_OPCODE "lwsync\n"
34#endif
35
e7061ad2 36#define ILLEGAL_INSTR ".long 0xd00d00"
0114ba7f 37
0114ba7f
MD
38/*
39 * Using a isync as second barrier for exchange to provide acquire semantic.
ec4e58a3 40 * According to uatomic_ops/sysdeps/gcc/powerpc.h, the documentation is "fairly
0114ba7f
MD
41 * explicit that this also has acquire semantics."
42 * Derived from AO_compare_and_swap(), but removed the comparison.
43 */
44
f689dcbc
MD
45/* xchg */
46
da1c1635 47static inline __attribute__((always_inline))
ec4e58a3 48unsigned long _uatomic_exchange(void *addr, unsigned long val, int len)
0114ba7f 49{
f689dcbc
MD
50 switch (len) {
51 case 4:
52 {
53 unsigned int result;
54
55 __asm__ __volatile__(
701dd8de 56 LWSYNC_OPCODE
f689dcbc
MD
57 "1:\t" "lwarx %0,0,%1\n" /* load and reserve */
58 "stwcx. %2,0,%1\n" /* else store conditional */
59 "bne- 1b\n" /* retry if lost reservation */
60 "isync\n"
61 : "=&r"(result)
62 : "r"(addr), "r"(val)
63 : "memory", "cc");
64
65 return result;
66 }
b39e1761 67#if (CAA_BITS_PER_LONG == 64)
f689dcbc
MD
68 case 8:
69 {
70 unsigned long result;
71
72 __asm__ __volatile__(
701dd8de 73 LWSYNC_OPCODE
f689dcbc
MD
74 "1:\t" "ldarx %0,0,%1\n" /* load and reserve */
75 "stdcx. %2,0,%1\n" /* else store conditional */
76 "bne- 1b\n" /* retry if lost reservation */
77 "isync\n"
78 : "=&r"(result)
79 : "r"(addr), "r"(val)
80 : "memory", "cc");
81
82 return result;
83 }
84#endif
85 }
86 /* generate an illegal instruction. Cannot catch this with linker tricks
87 * when optimizations are disabled. */
88 __asm__ __volatile__(ILLEGAL_INSTR);
89 return 0;
0114ba7f
MD
90}
91
ec4e58a3
MD
92#define uatomic_xchg(addr, v) \
93 ((__typeof__(*(addr))) _uatomic_exchange((addr), (unsigned long)(v), \
da1c1635 94 sizeof(*(addr))))
f689dcbc 95/* cmpxchg */
0114ba7f 96
da1c1635 97static inline __attribute__((always_inline))
ec4e58a3 98unsigned long _uatomic_cmpxchg(void *addr, unsigned long old,
f689dcbc 99 unsigned long _new, int len)
0114ba7f 100{
f689dcbc
MD
101 switch (len) {
102 case 4:
103 {
104 unsigned int old_val;
105
106 __asm__ __volatile__(
701dd8de 107 LWSYNC_OPCODE
f689dcbc 108 "1:\t" "lwarx %0,0,%1\n" /* load and reserve */
0ecb3fde 109 "cmpw %0,%3\n" /* if load is not equal to */
f689dcbc
MD
110 "bne 2f\n" /* old, fail */
111 "stwcx. %2,0,%1\n" /* else store conditional */
112 "bne- 1b\n" /* retry if lost reservation */
113 "isync\n"
114 "2:\n"
e72f4937 115 : "=&r"(old_val)
f689dcbc
MD
116 : "r"(addr), "r"((unsigned int)_new),
117 "r"((unsigned int)old)
118 : "memory", "cc");
119
120 return old_val;
121 }
b39e1761 122#if (CAA_BITS_PER_LONG == 64)
f689dcbc
MD
123 case 8:
124 {
125 unsigned long old_val;
126
127 __asm__ __volatile__(
701dd8de 128 LWSYNC_OPCODE
f689dcbc
MD
129 "1:\t" "ldarx %0,0,%1\n" /* load and reserve */
130 "cmpd %0,%3\n" /* if load is not equal to */
131 "bne 2f\n" /* old, fail */
132 "stdcx. %2,0,%1\n" /* else store conditional */
133 "bne- 1b\n" /* retry if lost reservation */
134 "isync\n"
135 "2:\n"
b96b22e1 136 : "=&r"(old_val)
f689dcbc
MD
137 : "r"(addr), "r"((unsigned long)_new),
138 "r"((unsigned long)old)
139 : "memory", "cc");
140
141 return old_val;
142 }
143#endif
144 }
145 /* generate an illegal instruction. Cannot catch this with linker tricks
146 * when optimizations are disabled. */
147 __asm__ __volatile__(ILLEGAL_INSTR);
148 return 0;
0114ba7f
MD
149}
150
da1c1635 151
ec4e58a3
MD
152#define uatomic_cmpxchg(addr, old, _new) \
153 ((__typeof__(*(addr))) _uatomic_cmpxchg((addr), (unsigned long)(old),\
da1c1635
MD
154 (unsigned long)(_new), \
155 sizeof(*(addr))))
f689dcbc 156
ec4e58a3 157/* uatomic_add_return */
0114ba7f 158
da1c1635 159static inline __attribute__((always_inline))
ec4e58a3 160unsigned long _uatomic_add_return(void *addr, unsigned long val,
f689dcbc 161 int len)
0114ba7f
MD
162{
163 switch (len) {
f689dcbc
MD
164 case 4:
165 {
166 unsigned int result;
167
168 __asm__ __volatile__(
701dd8de 169 LWSYNC_OPCODE
f689dcbc
MD
170 "1:\t" "lwarx %0,0,%1\n" /* load and reserve */
171 "add %0,%2,%0\n" /* add val to value loaded */
172 "stwcx. %0,0,%1\n" /* store conditional */
173 "bne- 1b\n" /* retry if lost reservation */
174 "isync\n"
175 : "=&r"(result)
176 : "r"(addr), "r"(val)
177 : "memory", "cc");
178
179 return result;
180 }
b39e1761 181#if (CAA_BITS_PER_LONG == 64)
f689dcbc
MD
182 case 8:
183 {
184 unsigned long result;
185
186 __asm__ __volatile__(
701dd8de 187 LWSYNC_OPCODE
f689dcbc
MD
188 "1:\t" "ldarx %0,0,%1\n" /* load and reserve */
189 "add %0,%2,%0\n" /* add val to value loaded */
190 "stdcx. %0,0,%1\n" /* store conditional */
191 "bne- 1b\n" /* retry if lost reservation */
192 "isync\n"
193 : "=&r"(result)
194 : "r"(addr), "r"(val)
195 : "memory", "cc");
196
197 return result;
198 }
0114ba7f
MD
199#endif
200 }
201 /* generate an illegal instruction. Cannot catch this with linker tricks
202 * when optimizations are disabled. */
203 __asm__ __volatile__(ILLEGAL_INSTR);
204 return 0;
205}
206
da1c1635 207
ec4e58a3
MD
208#define uatomic_add_return(addr, v) \
209 ((__typeof__(*(addr))) _uatomic_add_return((addr), \
da1c1635
MD
210 (unsigned long)(v), \
211 sizeof(*(addr))))
f689dcbc 212
36bc70a8
MD
213#ifdef __cplusplus
214}
215#endif
216
8760d94e
PB
217#include <urcu/uatomic_generic.h>
218
ec4e58a3 219#endif /* _URCU_ARCH_UATOMIC_PPC_H */
This page took 0.03477 seconds and 4 git commands to generate.