ab7e9427613cc4f2926e3f4f0cfa98b5007152ef
[urcu.git] / urcu / uatomic_arch_s390.h
1 #ifndef _URCU_UATOMIC_ARCH_S390_H
2 #define _URCU_UATOMIC_ARCH_S390_H
3
4 /*
5 * Atomic exchange operations for the S390 architecture. Based on information
6 * taken from the Principles of Operation Appendix A "Conditional Swapping
7 * Instructions (CS, CDS)".
8 *
9 * Copyright (c) 2009 Novell, Inc.
10 * Author: Jan Blunck <jblunck@suse.de>
11 * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
12 *
13 * Permission is hereby granted, free of charge, to any person obtaining a copy
14 * of this software and associated documentation files (the "Software"), to
15 * deal in the Software without restriction, including without limitation the
16 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
17 * sell copies of the Software, and to permit persons to whom the Software is
18 * furnished to do so, subject to the following conditions:
19 *
20 * The above copyright notice and this permission notice shall be included in
21 * all copies or substantial portions of the Software.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
26 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
28 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
29 * IN THE SOFTWARE.
30 */
31
32 #include <urcu/compiler.h>
33 #include <urcu/system.h>
34
35 #ifndef __SIZEOF_LONG__
36 #ifdef __s390x__
37 #define __SIZEOF_LONG__ 8
38 #else
39 #define __SIZEOF_LONG__ 4
40 #endif
41 #endif
42
43 #ifndef BITS_PER_LONG
44 #define BITS_PER_LONG (__SIZEOF_LONG__ * 8)
45 #endif
46
47 #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
48 #define COMPILER_HAVE_SHORT_MEM_OPERAND
49 #endif
50
51 /*
52 * MEMOP assembler operand rules:
53 * - op refer to MEMOP_IN operand
54 * - MEMOP_IN can expand to more than a single operand. Use it at the end of
55 * operand list only.
56 */
57
58 #ifdef COMPILER_HAVE_SHORT_MEM_OPERAND
59
60 #define MEMOP_OUT(addr) "=Q" (*(addr))
61 #define MEMOP_IN "Q" (*(addr))
62 #define MEMOP_REF(op) #op /* op refer to MEMOP_IN operand */
63
64 #else /* !COMPILER_HAVE_SHORT_MEM_OPERAND */
65
66 #define MEMOP_OUT(addr) "=m" (*(addr))
67 #define MEMOP_IN "a" (addr), "m" (*(addr))
68 #define MEMOP_REF(op) "0(" #op ")" /* op refer to MEMOP_IN operand */
69
70 #endif /* !COMPILER_HAVE_SHORT_MEM_OPERAND */
71
72 struct __uatomic_dummy {
73 unsigned long v[10];
74 };
75 #define __hp(x) ((struct __uatomic_dummy *)(x))
76
77 #define uatomic_set(addr, v) STORE_SHARED(*(addr), (v))
78 #define uatomic_read(addr) LOAD_SHARED(*(addr))
79
80 /* xchg */
81
82 static inline __attribute__((always_inline))
83 unsigned long _uatomic_exchange(volatile void *addr, unsigned long val, int len)
84 {
85 switch (len) {
86 case 4:
87 {
88 unsigned int old_val;
89
90 __asm__ __volatile__(
91 "0: cs %0,%2," MEMOP_REF(%3) "\n"
92 " brc 4,0b\n"
93 : "=&r" (old_val), MEMOP_OUT (__hp(addr))
94 : "r" (val), MEMOP_IN (__hp(addr))
95 : "memory", "cc");
96 return old_val;
97 }
98 #if (BITS_PER_LONG == 64)
99 case 8:
100 {
101 unsigned long old_val;
102
103 __asm__ __volatile__(
104 "0: csg %0,%2," MEMOP_REF(%3) "\n"
105 " brc 4,0b\n"
106 : "=&r" (old_val), MEMOP_OUT (__hp(addr))
107 : "r" (val), MEMOP_IN (__hp(addr))
108 : "memory", "cc");
109 return old_val;
110 }
111 #endif
112 default:
113 __asm__ __volatile__(".long 0xd00d00");
114 }
115
116 return 0;
117 }
118
119 #define uatomic_xchg(addr, v) \
120 (__typeof__(*(addr))) _uatomic_exchange((addr), (unsigned long)(v), \
121 sizeof(*(addr)))
122
123 /* cmpxchg */
124
125 static inline __attribute__((always_inline))
126 unsigned long _uatomic_cmpxchg(void *addr, unsigned long old,
127 unsigned long _new, int len)
128 {
129 switch (len) {
130 case 4:
131 {
132 unsigned int old_val = (unsigned int)old;
133
134 __asm__ __volatile__(
135 " cs %0,%2," MEMOP_REF(%3) "\n"
136 : "+r" (old_val), MEMOP_OUT (__hp(addr))
137 : "r" (_new), MEMOP_IN (__hp(addr))
138 : "memory", "cc");
139 return old_val;
140 }
141 #if (BITS_PER_LONG == 64)
142 case 8:
143 {
144 __asm__ __volatile__(
145 " csg %0,%2," MEMOP_REF(%3) "\n"
146 : "+r" (old), MEMOP_OUT (__hp(addr))
147 : "r" (_new), MEMOP_IN (__hp(addr))
148 : "memory", "cc");
149 return old;
150 }
151 #endif
152 default:
153 __asm__ __volatile__(".long 0xd00d00");
154 }
155
156 return 0;
157 }
158
159 #define uatomic_cmpxchg(addr, old, _new) \
160 (__typeof__(*(addr))) _uatomic_cmpxchg((addr), \
161 (unsigned long)(old), \
162 (unsigned long)(_new), \
163 sizeof(*(addr)))
164
165 /* uatomic_add_return */
166
167 static inline __attribute__((always_inline))
168 unsigned long _uatomic_add_return(void *addr, unsigned long val, int len)
169 {
170 switch (len) {
171 case 4:
172 {
173 unsigned int old, oldt;
174
175 oldt = uatomic_read((unsigned int *)addr);
176 do {
177 old = oldt;
178 oldt = _uatomic_cmpxchg(addr, old, old + val, 4);
179 } while (oldt != old);
180
181 return old + val;
182 }
183 #if (BITS_PER_LONG == 64)
184 case 8:
185 {
186 unsigned long old, oldt;
187
188 oldt = uatomic_read((unsigned long *)addr);
189 do {
190 old = oldt;
191 oldt = _uatomic_cmpxchg(addr, old, old + val, 8);
192 } while (oldt != old);
193
194 return old + val;
195 }
196 #endif
197 }
198 __builtin_trap();
199 return 0;
200 }
201
202 #define uatomic_add_return(addr, v) \
203 ((__typeof__(*(addr))) _uatomic_add_return((addr), \
204 (unsigned long)(v), \
205 sizeof(*(addr))))
206
207 /* uatomic_sub_return, uatomic_add, uatomic_sub, uatomic_inc, uatomic_dec */
208
209 #define uatomic_sub_return(addr, v) uatomic_add_return((addr), -(v))
210
211 #define uatomic_add(addr, v) (void)uatomic_add_return((addr), (v))
212 #define uatomic_sub(addr, v) (void)uatomic_sub_return((addr), (v))
213
214 #define uatomic_inc(addr) uatomic_add((addr), 1)
215 #define uatomic_dec(addr) uatomic_add((addr), -1)
216
217 #define compat_uatomic_cmpxchg(ptr, old, _new) uatomic_cmpxchg(ptr, old, _new)
218
219 #endif /* _URCU_UATOMIC_ARCH_S390_H */
This page took 0.032901 seconds and 3 git commands to generate.