Fix: x86 and s390 uatomic: __hp() macro warning with gcc 11
[urcu.git] / include / urcu / uatomic / s390.h
CommitLineData
9c697e4d
MD
1#ifndef _URCU_UATOMIC_ARCH_S390_H
2#define _URCU_UATOMIC_ARCH_S390_H
ac26f1a8
JB
3
4/*
7039fa6f
JB
5 * Atomic exchange operations for the S390 architecture. Based on information
6 * taken from the Principles of Operation Appendix A "Conditional Swapping
7 * Instructions (CS, CDS)".
ac26f1a8 8 *
7039fa6f 9 * Copyright (c) 2009 Novell, Inc.
ac26f1a8 10 * Author: Jan Blunck <jblunck@suse.de>
6982d6d7 11 * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
ac26f1a8 12 *
7039fa6f
JB
13 * Permission is hereby granted, free of charge, to any person obtaining a copy
14 * of this software and associated documentation files (the "Software"), to
15 * deal in the Software without restriction, including without limitation the
16 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
17 * sell copies of the Software, and to permit persons to whom the Software is
18 * furnished to do so, subject to the following conditions:
ac26f1a8 19 *
7039fa6f
JB
20 * The above copyright notice and this permission notice shall be included in
21 * all copies or substantial portions of the Software.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
26 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
28 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
29 * IN THE SOFTWARE.
ac26f1a8
JB
30 */
31
b46b23cb
MD
32#include <urcu/compiler.h>
33#include <urcu/system.h>
34
36bc70a8
MD
35#ifdef __cplusplus
36extern "C" {
67ecffc0 37#endif
36bc70a8 38
6ee8df54
MD
39#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
40#define COMPILER_HAVE_SHORT_MEM_OPERAND
41#endif
42
43/*
44 * MEMOP assembler operand rules:
45 * - op refer to MEMOP_IN operand
46 * - MEMOP_IN can expand to more than a single operand. Use it at the end of
47 * operand list only.
48 */
49
50#ifdef COMPILER_HAVE_SHORT_MEM_OPERAND
51
52#define MEMOP_OUT(addr) "=Q" (*(addr))
a44291d8 53#define MEMOP_IN(addr) "Q" (*(addr))
0cdbb97c 54#define MEMOP_REF(op) #op /* op refer to MEMOP_IN operand */
6ee8df54
MD
55
56#else /* !COMPILER_HAVE_SHORT_MEM_OPERAND */
57
58#define MEMOP_OUT(addr) "=m" (*(addr))
a44291d8 59#define MEMOP_IN(addr) "a" (addr), "m" (*(addr))
6ee8df54
MD
60#define MEMOP_REF(op) "0(" #op ")" /* op refer to MEMOP_IN operand */
61
62#endif /* !COMPILER_HAVE_SHORT_MEM_OPERAND */
63
835b9ab3
MD
64/*
65 * The __hp() macro casts the void pointer "x" to a pointer to a structure
66 * containing an array of char of the specified size. This allows passing the
67 * @addr arguments of the following inline functions as "m" and "+m" operands
68 * to the assembly.
69 */
70
71#define __hp(size, x) ((struct { char v[size]; } *)(x))
53b8ed68 72
9c697e4d 73/* xchg */
f64acda4
MD
74
75static inline __attribute__((always_inline))
ec4e58a3 76unsigned long _uatomic_exchange(volatile void *addr, unsigned long val, int len)
ac26f1a8
JB
77{
78 switch (len) {
79 case 4:
2837ec40 80 {
9c697e4d
MD
81 unsigned int old_val;
82
83 __asm__ __volatile__(
6ee8df54 84 "0: cs %0,%2," MEMOP_REF(%3) "\n"
9c697e4d 85 " brc 4,0b\n"
835b9ab3
MD
86 : "=&r" (old_val), MEMOP_OUT (__hp(len, addr))
87 : "r" (val), MEMOP_IN (__hp(len, addr))
9c697e4d 88 : "memory", "cc");
1cf421dc 89 return old_val;
2837ec40 90 }
b39e1761 91#if (CAA_BITS_PER_LONG == 64)
ac26f1a8 92 case 8:
2837ec40 93 {
9c697e4d
MD
94 unsigned long old_val;
95
96 __asm__ __volatile__(
6ee8df54 97 "0: csg %0,%2," MEMOP_REF(%3) "\n"
9c697e4d 98 " brc 4,0b\n"
835b9ab3
MD
99 : "=&r" (old_val), MEMOP_OUT (__hp(len, addr))
100 : "r" (val), MEMOP_IN (__hp(len, addr))
9c697e4d 101 : "memory", "cc");
1cf421dc 102 return old_val;
2837ec40 103 }
ac26f1a8
JB
104#endif
105 default:
106 __asm__ __volatile__(".long 0xd00d00");
107 }
108
109 return 0;
110}
111
9c697e4d 112#define uatomic_xchg(addr, v) \
e56d99bf
MD
113 (__typeof__(*(addr))) _uatomic_exchange((addr), \
114 caa_cast_long_keep_sign(v), \
115 sizeof(*(addr)))
ac26f1a8 116
9c697e4d 117/* cmpxchg */
8af57509
JB
118
119static inline __attribute__((always_inline))
9c697e4d 120unsigned long _uatomic_cmpxchg(void *addr, unsigned long old,
1cf421dc 121 unsigned long _new, int len)
8af57509
JB
122{
123 switch (len) {
124 case 4:
2837ec40 125 {
9c697e4d
MD
126 unsigned int old_val = (unsigned int)old;
127
128 __asm__ __volatile__(
6ee8df54 129 " cs %0,%2," MEMOP_REF(%3) "\n"
835b9ab3
MD
130 : "+r" (old_val), MEMOP_OUT (__hp(len, addr))
131 : "r" (_new), MEMOP_IN (__hp(len, addr))
9c697e4d
MD
132 : "memory", "cc");
133 return old_val;
2837ec40 134 }
b39e1761 135#if (CAA_BITS_PER_LONG == 64)
8af57509 136 case 8:
f64acda4 137 {
9c697e4d 138 __asm__ __volatile__(
6ee8df54 139 " csg %0,%2," MEMOP_REF(%3) "\n"
835b9ab3
MD
140 : "+r" (old), MEMOP_OUT (__hp(len, addr))
141 : "r" (_new), MEMOP_IN (__hp(len, addr))
9c697e4d
MD
142 : "memory", "cc");
143 return old;
f64acda4 144 }
8af57509
JB
145#endif
146 default:
147 __asm__ __volatile__(".long 0xd00d00");
148 }
149
9c697e4d 150 return 0;
8af57509
JB
151}
152
e56d99bf
MD
153#define uatomic_cmpxchg(addr, old, _new) \
154 (__typeof__(*(addr))) _uatomic_cmpxchg((addr), \
155 caa_cast_long_keep_sign(old), \
156 caa_cast_long_keep_sign(_new),\
9c697e4d 157 sizeof(*(addr)))
8af57509 158
67ecffc0 159#ifdef __cplusplus
36bc70a8
MD
160}
161#endif
162
a2e7bf9c 163#include <urcu/uatomic/generic.h>
8760d94e 164
9c697e4d 165#endif /* _URCU_UATOMIC_ARCH_S390_H */
This page took 0.052007 seconds and 4 git commands to generate.