s390 uatomic: short memory operand support for new compilers
[urcu.git] / urcu / uatomic_arch_s390.h
... / ...
CommitLineData
1#ifndef _URCU_UATOMIC_ARCH_S390_H
2#define _URCU_UATOMIC_ARCH_S390_H
3
4/*
5 * Atomic exchange operations for the S390 architecture. Based on information
6 * taken from the Principles of Operation Appendix A "Conditional Swapping
7 * Instructions (CS, CDS)".
8 *
9 * Copyright (c) 2009 Novell, Inc.
10 * Author: Jan Blunck <jblunck@suse.de>
11 * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
12 *
13 * Permission is hereby granted, free of charge, to any person obtaining a copy
14 * of this software and associated documentation files (the "Software"), to
15 * deal in the Software without restriction, including without limitation the
16 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
17 * sell copies of the Software, and to permit persons to whom the Software is
18 * furnished to do so, subject to the following conditions:
19 *
20 * The above copyright notice and this permission notice shall be included in
21 * all copies or substantial portions of the Software.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
26 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
28 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
29 * IN THE SOFTWARE.
30 */
31
32#include <urcu/compiler.h>
33#include <urcu/system.h>
34
35#ifndef __SIZEOF_LONG__
36#ifdef __s390x__
37#define __SIZEOF_LONG__ 8
38#else
39#define __SIZEOF_LONG__ 4
40#endif
41#endif
42
43#ifndef BITS_PER_LONG
44#define BITS_PER_LONG (__SIZEOF_LONG__ * 8)
45#endif
46
47#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
48#define COMPILER_HAVE_SHORT_MEM_OPERAND
49#endif
50
51/*
52 * MEMOP assembler operand rules:
53 * - op refer to MEMOP_IN operand
54 * - MEMOP_IN can expand to more than a single operand. Use it at the end of
55 * operand list only.
56 */
57
58#ifdef COMPILER_HAVE_SHORT_MEM_OPERAND
59
60#define MEMOP_OUT(addr) "=Q" (*(addr))
61#define MEMOP_IN "Q" (*(addr))
62#define MEMOP_REF(op) op /* op refer to MEMOP_IN operand */
63
64#else /* !COMPILER_HAVE_SHORT_MEM_OPERAND */
65
66#define MEMOP_OUT(addr) "=m" (*(addr))
67#define MEMOP_IN "a" (addr), "m" (*(addr))
68#define MEMOP_REF(op) "0(" #op ")" /* op refer to MEMOP_IN operand */
69
70#endif /* !COMPILER_HAVE_SHORT_MEM_OPERAND */
71
72#define uatomic_set(addr, v) STORE_SHARED(*(addr), (v))
73#define uatomic_read(addr) LOAD_SHARED(*(addr))
74
75/* xchg */
76
77static inline __attribute__((always_inline))
78unsigned long _uatomic_exchange(volatile void *addr, unsigned long val, int len)
79{
80 switch (len) {
81 case 4:
82 {
83 unsigned int old_val;
84
85 __asm__ __volatile__(
86 "0: cs %0,%2," MEMOP_REF(%3) "\n"
87 " brc 4,0b\n"
88 : "=&r" (old_val), MEMOP_OUT (addr)
89 : "r" (val), MEMOP_IN (addr)
90 : "memory", "cc");
91 return old_val;
92 }
93#if (BITS_PER_LONG == 64)
94 case 8:
95 {
96 unsigned long old_val;
97
98 __asm__ __volatile__(
99 "0: csg %0,%2," MEMOP_REF(%3) "\n"
100 " brc 4,0b\n"
101 : "=&r" (old_val), MEMOP_OUT (addr)
102 : "r" (val), MEMOP_IN (addr)
103 : "memory", "cc");
104 return old_val;
105 }
106#endif
107 default:
108 __asm__ __volatile__(".long 0xd00d00");
109 }
110
111 return 0;
112}
113
114#define uatomic_xchg(addr, v) \
115 (__typeof__(*(addr))) _uatomic_exchange((addr), (unsigned long)(v), \
116 sizeof(*(addr)))
117
118/* cmpxchg */
119
120static inline __attribute__((always_inline))
121unsigned long _uatomic_cmpxchg(void *addr, unsigned long old,
122 unsigned long _new, int len)
123{
124 switch (len) {
125 case 4:
126 {
127 unsigned int old_val = (unsigned int)old;
128
129 __asm__ __volatile__(
130 " cs %0,%2," MEMOP_REF(%3) "\n"
131 : "+r" (old_val), MEMOP_OUT (addr)
132 : "r" (_new), MEMOP_IN (addr)
133 : "memory", "cc");
134 return old_val;
135 }
136#if (BITS_PER_LONG == 64)
137 case 8:
138 {
139 __asm__ __volatile__(
140 " csg %0,%2," MEMOP_REF(%3) "\n"
141 : "+r" (old), MEMOP_OUT (addr)
142 : "r" (_new), MEMOP_IN (addr)
143 : "memory", "cc");
144 return old;
145 }
146#endif
147 default:
148 __asm__ __volatile__(".long 0xd00d00");
149 }
150
151 return 0;
152}
153
154#define uatomic_cmpxchg(addr, old, _new) \
155 (__typeof__(*(addr))) _uatomic_cmpxchg((addr), \
156 (unsigned long)(old), \
157 (unsigned long)(_new), \
158 sizeof(*(addr)))
159
160/* uatomic_add_return */
161
162static inline __attribute__((always_inline))
163unsigned long _uatomic_add_return(void *addr, unsigned long val, int len)
164{
165 switch (len) {
166 case 4:
167 {
168 unsigned int old, oldt;
169
170 oldt = uatomic_read((unsigned int *)addr);
171 do {
172 old = oldt;
173 oldt = _uatomic_cmpxchg(addr, old, old + val, 4);
174 } while (oldt != old);
175
176 return old + val;
177 }
178#if (BITS_PER_LONG == 64)
179 case 8:
180 {
181 unsigned long old, oldt;
182
183 oldt = uatomic_read((unsigned long *)addr);
184 do {
185 old = oldt;
186 oldt = _uatomic_cmpxchg(addr, old, old + val, 8);
187 } while (oldt != old);
188
189 return old + val;
190 }
191#endif
192 }
193 __builtin_trap();
194 return 0;
195}
196
197#define uatomic_add_return(addr, v) \
198 ((__typeof__(*(addr))) _uatomic_add_return((addr), \
199 (unsigned long)(v), \
200 sizeof(*(addr))))
201
202/* uatomic_sub_return, uatomic_add, uatomic_sub, uatomic_inc, uatomic_dec */
203
204#define uatomic_sub_return(addr, v) uatomic_add_return((addr), -(v))
205
206#define uatomic_add(addr, v) (void)uatomic_add_return((addr), (v))
207#define uatomic_sub(addr, v) (void)uatomic_sub_return((addr), (v))
208
209#define uatomic_inc(addr) uatomic_add((addr), 1)
210#define uatomic_dec(addr) uatomic_add((addr), -1)
211
212#define compat_uatomic_cmpxchg(ptr, old, _new) uatomic_cmpxchg(ptr, old, _new)
213
214#endif /* _URCU_UATOMIC_ARCH_S390_H */
This page took 0.023739 seconds and 4 git commands to generate.