urcu-defer: ensure callbacks will never be enqueued forever
[urcu.git] / arch_atomic_ppc.h
CommitLineData
0114ba7f
MD
1#ifndef _ARCH_ATOMIC_PPC_H
2#define _ARCH_ATOMIC_PPC_H
3
4/*
5 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
6 * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
7 * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
8 * Copyright (c) 2009 Mathieu Desnoyers
9 *
10 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
11 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
12 *
13 * Permission is hereby granted to use or copy this program
14 * for any purpose, provided the above notices are retained on all copies.
15 * Permission to modify the code and to distribute modified code is granted,
16 * provided the above notices are retained, and a notice that the code was
17 * modified is included with the above copyright notice.
18 *
19 * Code inspired from libatomic_ops-1.2, inherited in part from the
20 * Boehm-Demers-Weiser conservative garbage collector.
21 */
22
bcf80111
SM
23#ifndef __SIZEOF_LONG__
24#ifdef __powerpc64__
25#define __SIZEOF_LONG__ 8
26#else
27#define __SIZEOF_LONG__ 4
28#endif
29#endif
30
0114ba7f
MD
31#ifndef BITS_PER_LONG
32#define BITS_PER_LONG (__SIZEOF_LONG__ * 8)
33#endif
34
e7061ad2 35#define ILLEGAL_INSTR ".long 0xd00d00"
0114ba7f
MD
36
37#ifndef _INCLUDE_API_H
38
5f8052b4
MD
39#define atomic_set(addr, v) \
40do { \
41 ACCESS_ONCE(*(addr)) = (v); \
42} while (0)
43
44#define atomic_read(addr) ACCESS_ONCE(*(addr))
45
0114ba7f
MD
46/*
47 * Using a isync as second barrier for exchange to provide acquire semantic.
48 * According to atomic_ops/sysdeps/gcc/powerpc.h, the documentation is "fairly
49 * explicit that this also has acquire semantics."
50 * Derived from AO_compare_and_swap(), but removed the comparison.
51 */
52
f689dcbc
MD
53/* xchg */
54
0114ba7f 55static __attribute__((always_inline))
5eb3201d 56unsigned long _atomic_exchange(void *addr, unsigned long val, int len)
0114ba7f 57{
f689dcbc
MD
58 switch (len) {
59 case 4:
60 {
61 unsigned int result;
62
63 __asm__ __volatile__(
64 "lwsync\n"
65 "1:\t" "lwarx %0,0,%1\n" /* load and reserve */
66 "stwcx. %2,0,%1\n" /* else store conditional */
67 "bne- 1b\n" /* retry if lost reservation */
68 "isync\n"
69 : "=&r"(result)
70 : "r"(addr), "r"(val)
71 : "memory", "cc");
72
73 return result;
74 }
75#if (BITS_PER_LONG == 64)
76 case 8:
77 {
78 unsigned long result;
79
80 __asm__ __volatile__(
81 "lwsync\n"
82 "1:\t" "ldarx %0,0,%1\n" /* load and reserve */
83 "stdcx. %2,0,%1\n" /* else store conditional */
84 "bne- 1b\n" /* retry if lost reservation */
85 "isync\n"
86 : "=&r"(result)
87 : "r"(addr), "r"(val)
88 : "memory", "cc");
89
90 return result;
91 }
92#endif
93 }
94 /* generate an illegal instruction. Cannot catch this with linker tricks
95 * when optimizations are disabled. */
96 __asm__ __volatile__(ILLEGAL_INSTR);
97 return 0;
0114ba7f
MD
98}
99
f689dcbc
MD
100#define xchg(addr, v) (__typeof__(*(addr))) _atomic_exchange((addr), (v), \
101 sizeof(*(addr)))
102
103/* cmpxchg */
0114ba7f
MD
104
105static __attribute__((always_inline))
5eb3201d 106unsigned long _atomic_cmpxchg(void *addr, unsigned long old,
f689dcbc 107 unsigned long _new, int len)
0114ba7f 108{
f689dcbc
MD
109 switch (len) {
110 case 4:
111 {
112 unsigned int old_val;
113
114 __asm__ __volatile__(
115 "lwsync\n"
116 "1:\t" "lwarx %0,0,%1\n" /* load and reserve */
117 "cmpd %0,%3\n" /* if load is not equal to */
118 "bne 2f\n" /* old, fail */
119 "stwcx. %2,0,%1\n" /* else store conditional */
120 "bne- 1b\n" /* retry if lost reservation */
121 "isync\n"
122 "2:\n"
e72f4937 123 : "=&r"(old_val)
f689dcbc
MD
124 : "r"(addr), "r"((unsigned int)_new),
125 "r"((unsigned int)old)
126 : "memory", "cc");
127
128 return old_val;
129 }
130#if (BITS_PER_LONG == 64)
131 case 8:
132 {
133 unsigned long old_val;
134
135 __asm__ __volatile__(
136 "lwsync\n"
137 "1:\t" "ldarx %0,0,%1\n" /* load and reserve */
138 "cmpd %0,%3\n" /* if load is not equal to */
139 "bne 2f\n" /* old, fail */
140 "stdcx. %2,0,%1\n" /* else store conditional */
141 "bne- 1b\n" /* retry if lost reservation */
142 "isync\n"
143 "2:\n"
144 : "=&r"(old_val),
145 : "r"(addr), "r"((unsigned long)_new),
146 "r"((unsigned long)old)
147 : "memory", "cc");
148
149 return old_val;
150 }
151#endif
152 }
153 /* generate an illegal instruction. Cannot catch this with linker tricks
154 * when optimizations are disabled. */
155 __asm__ __volatile__(ILLEGAL_INSTR);
156 return 0;
0114ba7f
MD
157}
158
f689dcbc
MD
159#define cmpxchg(addr, old, _new) \
160 (__typeof__(*(addr))) _atomic_cmpxchg((addr), (old), (_new), \
161 sizeof(*(addr)))
162
163/* atomic_add_return */
0114ba7f
MD
164
165static __attribute__((always_inline))
5eb3201d 166unsigned long _atomic_add_return(void *addr, unsigned long val,
f689dcbc 167 int len)
0114ba7f
MD
168{
169 switch (len) {
f689dcbc
MD
170 case 4:
171 {
172 unsigned int result;
173
174 __asm__ __volatile__(
175 "lwsync\n"
176 "1:\t" "lwarx %0,0,%1\n" /* load and reserve */
177 "add %0,%2,%0\n" /* add val to value loaded */
178 "stwcx. %0,0,%1\n" /* store conditional */
179 "bne- 1b\n" /* retry if lost reservation */
180 "isync\n"
181 : "=&r"(result)
182 : "r"(addr), "r"(val)
183 : "memory", "cc");
184
185 return result;
186 }
0114ba7f 187#if (BITS_PER_LONG == 64)
f689dcbc
MD
188 case 8:
189 {
190 unsigned long result;
191
192 __asm__ __volatile__(
193 "lwsync\n"
194 "1:\t" "ldarx %0,0,%1\n" /* load and reserve */
195 "add %0,%2,%0\n" /* add val to value loaded */
196 "stdcx. %0,0,%1\n" /* store conditional */
197 "bne- 1b\n" /* retry if lost reservation */
198 "isync\n"
199 : "=&r"(result)
200 : "r"(addr), "r"(val)
201 : "memory", "cc");
202
203 return result;
204 }
0114ba7f
MD
205#endif
206 }
207 /* generate an illegal instruction. Cannot catch this with linker tricks
208 * when optimizations are disabled. */
209 __asm__ __volatile__(ILLEGAL_INSTR);
210 return 0;
211}
212
f689dcbc
MD
213#define atomic_add_return(addr, v) \
214 (__typeof__(*(addr))) _atomic_add((addr), (v), sizeof(*(addr)))
215
216/* atomic_sub_return, atomic_add, atomic_sub, atomic_inc, atomic_dec */
217
218#define atomic_sub_return(addr, v) atomic_add_return((addr), -(v))
219
220#define atomic_add(addr, v) (void)atomic_add_return((addr), (v))
221#define atomic_sub(addr, v) (void)atomic_sub_return((addr), (v))
222
223#define atomic_inc(addr, v) atomic_add((addr), 1)
224#define atomic_dec(addr, v) atomic_add((addr), -1)
0114ba7f
MD
225
226#endif /* #ifndef _INCLUDE_API_H */
227
228#endif /* ARCH_ATOMIC_PPC_H */
This page took 0.054578 seconds and 4 git commands to generate.