2378ebd202ba256310f5edfcffcd7d278f5fef7f
[urcu.git] / urcu / uatomic_arch_s390.h
1 #ifndef _URCU_UATOMIC_ARCH_S390_H
2 #define _URCU_UATOMIC_ARCH_S390_H
3
4 /*
5 * Atomic exchange operations for the S390 architecture. Based on information
6 * taken from the Principles of Operation Appendix A "Conditional Swapping
7 * Instructions (CS, CDS)".
8 *
9 * Copyright (c) 2009 Novell, Inc.
10 * Author: Jan Blunck <jblunck@suse.de>
11 * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
12 *
13 * Permission is hereby granted, free of charge, to any person obtaining a copy
14 * of this software and associated documentation files (the "Software"), to
15 * deal in the Software without restriction, including without limitation the
16 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
17 * sell copies of the Software, and to permit persons to whom the Software is
18 * furnished to do so, subject to the following conditions:
19 *
20 * The above copyright notice and this permission notice shall be included in
21 * all copies or substantial portions of the Software.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
26 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
28 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
29 * IN THE SOFTWARE.
30 */
31
32 #include <urcu/compiler.h>
33 #include <urcu/system.h>
34
35 #ifdef __cplusplus
36 extern "C" {
37 #endif
38
39 #ifndef __SIZEOF_LONG__
40 #ifdef __s390x__
41 #define __SIZEOF_LONG__ 8
42 #else
43 #define __SIZEOF_LONG__ 4
44 #endif
45 #endif
46
47 #ifndef BITS_PER_LONG
48 #define BITS_PER_LONG (__SIZEOF_LONG__ * 8)
49 #endif
50
51 #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
52 #define COMPILER_HAVE_SHORT_MEM_OPERAND
53 #endif
54
55 /*
56 * MEMOP assembler operand rules:
57 * - op refer to MEMOP_IN operand
58 * - MEMOP_IN can expand to more than a single operand. Use it at the end of
59 * operand list only.
60 */
61
62 #ifdef COMPILER_HAVE_SHORT_MEM_OPERAND
63
64 #define MEMOP_OUT(addr) "=Q" (*(addr))
65 #define MEMOP_IN(addr) "Q" (*(addr))
66 #define MEMOP_REF(op) #op /* op refer to MEMOP_IN operand */
67
68 #else /* !COMPILER_HAVE_SHORT_MEM_OPERAND */
69
70 #define MEMOP_OUT(addr) "=m" (*(addr))
71 #define MEMOP_IN(addr) "a" (addr), "m" (*(addr))
72 #define MEMOP_REF(op) "0(" #op ")" /* op refer to MEMOP_IN operand */
73
74 #endif /* !COMPILER_HAVE_SHORT_MEM_OPERAND */
75
76 struct __uatomic_dummy {
77 unsigned long v[10];
78 };
79 #define __hp(x) ((struct __uatomic_dummy *)(x))
80
81 /* xchg */
82
83 static inline __attribute__((always_inline))
84 unsigned long _uatomic_exchange(volatile void *addr, unsigned long val, int len)
85 {
86 switch (len) {
87 case 4:
88 {
89 unsigned int old_val;
90
91 __asm__ __volatile__(
92 "0: cs %0,%2," MEMOP_REF(%3) "\n"
93 " brc 4,0b\n"
94 : "=&r" (old_val), MEMOP_OUT (__hp(addr))
95 : "r" (val), MEMOP_IN (__hp(addr))
96 : "memory", "cc");
97 return old_val;
98 }
99 #if (BITS_PER_LONG == 64)
100 case 8:
101 {
102 unsigned long old_val;
103
104 __asm__ __volatile__(
105 "0: csg %0,%2," MEMOP_REF(%3) "\n"
106 " brc 4,0b\n"
107 : "=&r" (old_val), MEMOP_OUT (__hp(addr))
108 : "r" (val), MEMOP_IN (__hp(addr))
109 : "memory", "cc");
110 return old_val;
111 }
112 #endif
113 default:
114 __asm__ __volatile__(".long 0xd00d00");
115 }
116
117 return 0;
118 }
119
120 #define uatomic_xchg(addr, v) \
121 (__typeof__(*(addr))) _uatomic_exchange((addr), (unsigned long)(v), \
122 sizeof(*(addr)))
123
124 /* cmpxchg */
125
126 static inline __attribute__((always_inline))
127 unsigned long _uatomic_cmpxchg(void *addr, unsigned long old,
128 unsigned long _new, int len)
129 {
130 switch (len) {
131 case 4:
132 {
133 unsigned int old_val = (unsigned int)old;
134
135 __asm__ __volatile__(
136 " cs %0,%2," MEMOP_REF(%3) "\n"
137 : "+r" (old_val), MEMOP_OUT (__hp(addr))
138 : "r" (_new), MEMOP_IN (__hp(addr))
139 : "memory", "cc");
140 return old_val;
141 }
142 #if (BITS_PER_LONG == 64)
143 case 8:
144 {
145 __asm__ __volatile__(
146 " csg %0,%2," MEMOP_REF(%3) "\n"
147 : "+r" (old), MEMOP_OUT (__hp(addr))
148 : "r" (_new), MEMOP_IN (__hp(addr))
149 : "memory", "cc");
150 return old;
151 }
152 #endif
153 default:
154 __asm__ __volatile__(".long 0xd00d00");
155 }
156
157 return 0;
158 }
159
160 #define uatomic_cmpxchg(addr, old, _new) \
161 (__typeof__(*(addr))) _uatomic_cmpxchg((addr), \
162 (unsigned long)(old), \
163 (unsigned long)(_new), \
164 sizeof(*(addr)))
165
166 /* uatomic_add_return */
167
168 static inline __attribute__((always_inline))
169 unsigned long _uatomic_add_return(void *addr, unsigned long val, int len)
170 {
171 switch (len) {
172 case 4:
173 {
174 unsigned int old, oldt;
175
176 oldt = uatomic_read((unsigned int *)addr);
177 do {
178 old = oldt;
179 oldt = _uatomic_cmpxchg(addr, old, old + val, 4);
180 } while (oldt != old);
181
182 return old + val;
183 }
184 #if (BITS_PER_LONG == 64)
185 case 8:
186 {
187 unsigned long old, oldt;
188
189 oldt = uatomic_read((unsigned long *)addr);
190 do {
191 old = oldt;
192 oldt = _uatomic_cmpxchg(addr, old, old + val, 8);
193 } while (oldt != old);
194
195 return old + val;
196 }
197 #endif
198 }
199 __builtin_trap();
200 return 0;
201 }
202
203 #define uatomic_add_return(addr, v) \
204 ((__typeof__(*(addr))) _uatomic_add_return((addr), \
205 (unsigned long)(v), \
206 sizeof(*(addr))))
207
208 #ifdef __cplusplus
209 }
210 #endif
211
212 #include <urcu/uatomic_generic.h>
213
214 #endif /* _URCU_UATOMIC_ARCH_S390_H */
This page took 0.032939 seconds and 4 git commands to generate.