Fix: x86 and s390: uatomic __hp() macro clang support
[urcu.git] / include / urcu / uatomic / s390.h
CommitLineData
9c697e4d
MD
1#ifndef _URCU_UATOMIC_ARCH_S390_H
2#define _URCU_UATOMIC_ARCH_S390_H
ac26f1a8
JB
3
4/*
7039fa6f
JB
5 * Atomic exchange operations for the S390 architecture. Based on information
6 * taken from the Principles of Operation Appendix A "Conditional Swapping
7 * Instructions (CS, CDS)".
ac26f1a8 8 *
7039fa6f 9 * Copyright (c) 2009 Novell, Inc.
ac26f1a8 10 * Author: Jan Blunck <jblunck@suse.de>
6982d6d7 11 * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
ac26f1a8 12 *
7039fa6f
JB
13 * Permission is hereby granted, free of charge, to any person obtaining a copy
14 * of this software and associated documentation files (the "Software"), to
15 * deal in the Software without restriction, including without limitation the
16 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
17 * sell copies of the Software, and to permit persons to whom the Software is
18 * furnished to do so, subject to the following conditions:
ac26f1a8 19 *
7039fa6f
JB
20 * The above copyright notice and this permission notice shall be included in
21 * all copies or substantial portions of the Software.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
26 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
28 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
29 * IN THE SOFTWARE.
ac26f1a8
JB
30 */
31
b46b23cb
MD
32#include <urcu/compiler.h>
33#include <urcu/system.h>
34
36bc70a8
MD
35#ifdef __cplusplus
36extern "C" {
67ecffc0 37#endif
36bc70a8 38
6ee8df54
MD
39#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
40#define COMPILER_HAVE_SHORT_MEM_OPERAND
41#endif
42
43/*
44 * MEMOP assembler operand rules:
45 * - op refer to MEMOP_IN operand
46 * - MEMOP_IN can expand to more than a single operand. Use it at the end of
47 * operand list only.
48 */
49
50#ifdef COMPILER_HAVE_SHORT_MEM_OPERAND
51
52#define MEMOP_OUT(addr) "=Q" (*(addr))
a44291d8 53#define MEMOP_IN(addr) "Q" (*(addr))
0cdbb97c 54#define MEMOP_REF(op) #op /* op refer to MEMOP_IN operand */
6ee8df54
MD
55
56#else /* !COMPILER_HAVE_SHORT_MEM_OPERAND */
57
58#define MEMOP_OUT(addr) "=m" (*(addr))
a44291d8 59#define MEMOP_IN(addr) "a" (addr), "m" (*(addr))
6ee8df54
MD
60#define MEMOP_REF(op) "0(" #op ")" /* op refer to MEMOP_IN operand */
61
62#endif /* !COMPILER_HAVE_SHORT_MEM_OPERAND */
63
835b9ab3 64/*
71323499 65 * The __hp() macro casts the void pointer @x to a pointer to a structure
835b9ab3
MD
66 * containing an array of char of the specified size. This allows passing the
67 * @addr arguments of the following inline functions as "m" and "+m" operands
71323499
MD
68 * to the assembly. The @size parameter should be a constant to support
69 * compilers such as clang which do not support VLA.
835b9ab3
MD
70 */
71
72#define __hp(size, x) ((struct { char v[size]; } *)(x))
53b8ed68 73
9c697e4d 74/* xchg */
f64acda4
MD
75
76static inline __attribute__((always_inline))
ec4e58a3 77unsigned long _uatomic_exchange(volatile void *addr, unsigned long val, int len)
ac26f1a8
JB
78{
79 switch (len) {
80 case 4:
2837ec40 81 {
9c697e4d
MD
82 unsigned int old_val;
83
84 __asm__ __volatile__(
6ee8df54 85 "0: cs %0,%2," MEMOP_REF(%3) "\n"
9c697e4d 86 " brc 4,0b\n"
71323499
MD
87 : "=&r" (old_val), MEMOP_OUT (__hp(4, addr))
88 : "r" (val), MEMOP_IN (__hp(4, addr))
9c697e4d 89 : "memory", "cc");
1cf421dc 90 return old_val;
2837ec40 91 }
b39e1761 92#if (CAA_BITS_PER_LONG == 64)
ac26f1a8 93 case 8:
2837ec40 94 {
9c697e4d
MD
95 unsigned long old_val;
96
97 __asm__ __volatile__(
6ee8df54 98 "0: csg %0,%2," MEMOP_REF(%3) "\n"
9c697e4d 99 " brc 4,0b\n"
71323499
MD
100 : "=&r" (old_val), MEMOP_OUT (__hp(8, addr))
101 : "r" (val), MEMOP_IN (__hp(8, addr))
9c697e4d 102 : "memory", "cc");
1cf421dc 103 return old_val;
2837ec40 104 }
ac26f1a8
JB
105#endif
106 default:
107 __asm__ __volatile__(".long 0xd00d00");
108 }
109
110 return 0;
111}
112
9c697e4d 113#define uatomic_xchg(addr, v) \
e56d99bf
MD
114 (__typeof__(*(addr))) _uatomic_exchange((addr), \
115 caa_cast_long_keep_sign(v), \
116 sizeof(*(addr)))
ac26f1a8 117
9c697e4d 118/* cmpxchg */
8af57509
JB
119
120static inline __attribute__((always_inline))
9c697e4d 121unsigned long _uatomic_cmpxchg(void *addr, unsigned long old,
1cf421dc 122 unsigned long _new, int len)
8af57509
JB
123{
124 switch (len) {
125 case 4:
2837ec40 126 {
9c697e4d
MD
127 unsigned int old_val = (unsigned int)old;
128
129 __asm__ __volatile__(
6ee8df54 130 " cs %0,%2," MEMOP_REF(%3) "\n"
71323499
MD
131 : "+r" (old_val), MEMOP_OUT (__hp(4, addr))
132 : "r" (_new), MEMOP_IN (__hp(4, addr))
9c697e4d
MD
133 : "memory", "cc");
134 return old_val;
2837ec40 135 }
b39e1761 136#if (CAA_BITS_PER_LONG == 64)
8af57509 137 case 8:
f64acda4 138 {
9c697e4d 139 __asm__ __volatile__(
6ee8df54 140 " csg %0,%2," MEMOP_REF(%3) "\n"
71323499
MD
141 : "+r" (old), MEMOP_OUT (__hp(8, addr))
142 : "r" (_new), MEMOP_IN (__hp(8, addr))
9c697e4d
MD
143 : "memory", "cc");
144 return old;
f64acda4 145 }
8af57509
JB
146#endif
147 default:
148 __asm__ __volatile__(".long 0xd00d00");
149 }
150
9c697e4d 151 return 0;
8af57509
JB
152}
153
e56d99bf
MD
154#define uatomic_cmpxchg(addr, old, _new) \
155 (__typeof__(*(addr))) _uatomic_cmpxchg((addr), \
156 caa_cast_long_keep_sign(old), \
157 caa_cast_long_keep_sign(_new),\
9c697e4d 158 sizeof(*(addr)))
8af57509 159
67ecffc0 160#ifdef __cplusplus
36bc70a8
MD
161}
162#endif
163
a2e7bf9c 164#include <urcu/uatomic/generic.h>
8760d94e 165
9c697e4d 166#endif /* _URCU_UATOMIC_ARCH_S390_H */
This page took 0.078592 seconds and 4 git commands to generate.