Commit | Line | Data |
---|---|---|
ec4e58a3 MD |
1 | #ifndef _URCU_ARCH_ATOMIC_S390_H |
2 | #define _URCU_ARCH_ATOMIC_S390_H | |
ac26f1a8 JB |
3 | |
4 | /* | |
7039fa6f JB |
5 | * Atomic exchange operations for the S390 architecture. Based on information |
6 | * taken from the Principles of Operation Appendix A "Conditional Swapping | |
7 | * Instructions (CS, CDS)". | |
ac26f1a8 | 8 | * |
7039fa6f | 9 | * Copyright (c) 2009 Novell, Inc. |
ac26f1a8 JB |
10 | * Author: Jan Blunck <jblunck@suse.de> |
11 | * | |
7039fa6f JB |
12 | * Permission is hereby granted, free of charge, to any person obtaining a copy |
13 | * of this software and associated documentation files (the "Software"), to | |
14 | * deal in the Software without restriction, including without limitation the | |
15 | * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or | |
16 | * sell copies of the Software, and to permit persons to whom the Software is | |
17 | * furnished to do so, subject to the following conditions: | |
ac26f1a8 | 18 | * |
7039fa6f JB |
19 | * The above copyright notice and this permission notice shall be included in |
20 | * all copies or substantial portions of the Software. | |
21 | * | |
22 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
23 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
24 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | |
25 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
26 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
27 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | |
28 | * IN THE SOFTWARE. | |
ac26f1a8 JB |
29 | */ |
30 | ||
b46b23cb MD |
31 | #include <urcu/compiler.h> |
32 | #include <urcu/system.h> | |
33 | ||
ac26f1a8 JB |
34 | #ifndef __SIZEOF_LONG__ |
35 | #ifdef __s390x__ | |
36 | #define __SIZEOF_LONG__ 8 | |
37 | #else | |
38 | #define __SIZEOF_LONG__ 4 | |
39 | #endif | |
40 | #endif | |
41 | ||
42 | #ifndef BITS_PER_LONG | |
43 | #define BITS_PER_LONG (__SIZEOF_LONG__ * 8) | |
44 | #endif | |
45 | ||
b46b23cb MD |
46 | #define uatomic_set(addr, v) STORE_SHARED(*(addr), (v)) |
47 | #define uatomic_read(addr) LOAD_SHARED(*(addr)) | |
8af57509 | 48 | |
a9a05d42 | 49 | static inline __attribute__((always_inline)) |
ec4e58a3 | 50 | unsigned int uatomic_exchange_32(volatile unsigned int *addr, unsigned int val) |
ac26f1a8 JB |
51 | { |
52 | unsigned int result; | |
53 | ||
54 | __asm__ __volatile__( | |
55 | "0: cs %0,%2,%1\n" | |
56 | " brc 4,0b\n" | |
57 | : "=&r"(result), "=m" (*addr) | |
58 | : "r"(val), "m" (*addr) | |
59 | : "memory", "cc"); | |
60 | ||
61 | return result; | |
62 | } | |
63 | ||
64 | #if (BITS_PER_LONG == 64) | |
65 | ||
a9a05d42 | 66 | static inline __attribute__((always_inline)) |
ec4e58a3 | 67 | unsigned long uatomic_exchange_64(volatile unsigned long *addr, |
ac26f1a8 JB |
68 | unsigned long val) |
69 | { | |
70 | unsigned long result; | |
71 | ||
72 | __asm__ __volatile__( | |
73 | "0: csg %0,%2,%1\n" | |
74 | " brc 4,0b\n" | |
75 | : "=&r"(result), "=m" (*addr) | |
76 | : "r"(val), "m" (*addr) | |
77 | : "memory", "cc"); | |
78 | ||
79 | return result; | |
80 | } | |
81 | ||
82 | #endif | |
83 | ||
a9a05d42 | 84 | static inline __attribute__((always_inline)) |
ec4e58a3 | 85 | unsigned long _uatomic_exchange(volatile void *addr, unsigned long val, int len) |
ac26f1a8 JB |
86 | { |
87 | switch (len) { | |
88 | case 4: | |
ec4e58a3 | 89 | return uatomic_exchange_32(addr, val); |
ac26f1a8 JB |
90 | #if (BITS_PER_LONG == 64) |
91 | case 8: | |
ec4e58a3 | 92 | return uatomic_exchange_64(addr, val); |
ac26f1a8 JB |
93 | #endif |
94 | default: | |
95 | __asm__ __volatile__(".long 0xd00d00"); | |
96 | } | |
97 | ||
98 | return 0; | |
99 | } | |
100 | ||
ec4e58a3 MD |
101 | #define uatomic_xchg(addr, v) \ |
102 | (__typeof__(*(addr))) _uatomic_exchange((addr), (unsigned long)(v), \ | |
a9a05d42 | 103 | sizeof(*(addr))) |
ac26f1a8 | 104 | |
8af57509 JB |
105 | |
106 | static inline __attribute__((always_inline)) | |
107 | void uatomic_add_32(volatile unsigned int *addr, unsigned int val) | |
108 | { | |
109 | unsigned int result, old; | |
110 | ||
111 | __asm__ __volatile__( | |
112 | " l %0, %1\n" | |
113 | "0: lr %2, %0\n" | |
114 | " ar %2, %3\n" | |
115 | " cs %0,%2,%1\n" | |
116 | " brc 4,0b\n" | |
117 | : "=&r"(old), "+m" (*addr), | |
118 | "=&r"(result) | |
119 | : "r"(val) | |
120 | : "memory", "cc"); | |
121 | } | |
122 | ||
123 | #if (BITS_PER_LONG == 64) | |
124 | ||
125 | static inline __attribute__((always_inline)) | |
126 | void uatomic_add_64(volatile unsigned long *addr, unsigned long val) | |
127 | { | |
128 | unsigned long result, old; | |
129 | ||
130 | __asm__ __volatile__( | |
131 | " lg %0, %1\n" | |
132 | "0: lgr %2, %0\n" | |
133 | " agr %2, %3\n" | |
134 | " csg %0,%2,%1\n" | |
135 | " brc 4,0b\n" | |
136 | : "=&r"(old), "+m" (*addr), | |
137 | "=&r"(result) | |
138 | : "r"(val) | |
139 | : "memory", "cc"); | |
140 | } | |
141 | ||
142 | #endif | |
143 | ||
144 | static inline __attribute__((always_inline)) | |
145 | void _uatomic_add(void *addr, unsigned long val, int len) | |
146 | { | |
147 | switch (len) { | |
148 | case 4: | |
149 | uatomic_add_32(addr, val); | |
150 | return; | |
151 | #if (BITS_PER_LONG == 64) | |
152 | case 8: | |
153 | uatomic_add_64(addr, val); | |
154 | return; | |
155 | #endif | |
156 | default: | |
157 | __asm__ __volatile__(".long 0xd00d00"); | |
158 | } | |
159 | ||
160 | return; | |
161 | } | |
162 | ||
163 | #define uatomic_add(addr, val) \ | |
164 | _uatomic_add((addr), (unsigned long)(val), sizeof(*(addr))) | |
165 | ||
166 | static inline __attribute__((always_inline)) | |
167 | unsigned int uatomic_cmpxchg_32(volatile unsigned int *addr, unsigned int old, | |
168 | unsigned int new) | |
169 | { | |
170 | __asm__ __volatile__( | |
171 | " cs %0,%2,%1\n" | |
172 | : "+r"(old), "+m"(*addr) | |
173 | : "r"(new) | |
174 | : "memory", "cc"); | |
175 | ||
176 | return old; | |
177 | } | |
178 | ||
179 | #if (BITS_PER_LONG == 64) | |
180 | ||
181 | static inline __attribute__((always_inline)) | |
182 | unsigned long uatomic_cmpxchg_64(volatile unsigned long *addr, | |
183 | unsigned long old, unsigned long new) | |
184 | { | |
185 | __asm__ __volatile__( | |
186 | " csg %0,%2,%1\n" | |
187 | : "+r"(old), "+m"(*addr) | |
188 | : "r"(new) | |
189 | : "memory", "cc"); | |
190 | ||
191 | return old; | |
192 | } | |
193 | ||
194 | #endif | |
195 | ||
196 | unsigned long _uatomic_cmpxchg(void *addr, unsigned long old, | |
197 | unsigned long new, int len) | |
198 | { | |
199 | switch (len) { | |
200 | case 4: | |
201 | return uatomic_cmpxchg_32(addr, old, new); | |
202 | #if (BITS_PER_LONG == 64) | |
203 | case 8: | |
204 | return uatomic_cmpxchg_64(addr, old, new); | |
205 | #endif | |
206 | default: | |
207 | __asm__ __volatile__(".long 0xd00d00"); | |
208 | } | |
209 | ||
210 | return 0; | |
211 | } | |
212 | ||
213 | #define uatomic_cmpxchg(addr, old, new) \ | |
214 | (__typeof__(*(addr))) _uatomic_cmpxchg((addr), \ | |
215 | (unsigned long)(old), \ | |
216 | (unsigned long)(new), \ | |
217 | sizeof(*(addr))) | |
218 | ||
7d413817 MD |
219 | #define URCU_CAS_AVAIL() 1 |
220 | ||
ec4e58a3 | 221 | #endif /* _URCU_ARCH_ATOMIC_S390_H */ |