From 713234991cce999c376345400f9367f1e2317076 Mon Sep 17 00:00:00 2001 From: Mathieu Desnoyers Date: Tue, 15 Jun 2021 16:00:28 -0400 Subject: [PATCH] Fix: x86 and s390: uatomic __hp() macro clang support The __hp macro should receive contant size arguments to support clang, which does not implement VLA support. Signed-off-by: Mathieu Desnoyers Change-Id: Ifa3d5b5b7921f54849e0f331bef8f07c059b998f --- include/urcu/uatomic/s390.h | 21 +++++------ include/urcu/uatomic/x86.h | 69 +++++++++++++++++++------------------ 2 files changed, 46 insertions(+), 44 deletions(-) diff --git a/include/urcu/uatomic/s390.h b/include/urcu/uatomic/s390.h index 47b97b1..d7d545a 100644 --- a/include/urcu/uatomic/s390.h +++ b/include/urcu/uatomic/s390.h @@ -62,10 +62,11 @@ extern "C" { #endif /* !COMPILER_HAVE_SHORT_MEM_OPERAND */ /* - * The __hp() macro casts the void pointer "x" to a pointer to a structure + * The __hp() macro casts the void pointer @x to a pointer to a structure * containing an array of char of the specified size. This allows passing the * @addr arguments of the following inline functions as "m" and "+m" operands - * to the assembly. + * to the assembly. The @size parameter should be a constant to support + * compilers such as clang which do not support VLA. */ #define __hp(size, x) ((struct { char v[size]; } *)(x)) @@ -83,8 +84,8 @@ unsigned long _uatomic_exchange(volatile void *addr, unsigned long val, int len) __asm__ __volatile__( "0: cs %0,%2," MEMOP_REF(%3) "\n" " brc 4,0b\n" - : "=&r" (old_val), MEMOP_OUT (__hp(len, addr)) - : "r" (val), MEMOP_IN (__hp(len, addr)) + : "=&r" (old_val), MEMOP_OUT (__hp(4, addr)) + : "r" (val), MEMOP_IN (__hp(4, addr)) : "memory", "cc"); return old_val; } @@ -96,8 +97,8 @@ unsigned long _uatomic_exchange(volatile void *addr, unsigned long val, int len) __asm__ __volatile__( "0: csg %0,%2," MEMOP_REF(%3) "\n" " brc 4,0b\n" - : "=&r" (old_val), MEMOP_OUT (__hp(len, addr)) - : "r" (val), MEMOP_IN (__hp(len, addr)) + : "=&r" (old_val), MEMOP_OUT (__hp(8, addr)) + : "r" (val), MEMOP_IN (__hp(8, addr)) : "memory", "cc"); return old_val; } @@ -127,8 +128,8 @@ unsigned long _uatomic_cmpxchg(void *addr, unsigned long old, __asm__ __volatile__( " cs %0,%2," MEMOP_REF(%3) "\n" - : "+r" (old_val), MEMOP_OUT (__hp(len, addr)) - : "r" (_new), MEMOP_IN (__hp(len, addr)) + : "+r" (old_val), MEMOP_OUT (__hp(4, addr)) + : "r" (_new), MEMOP_IN (__hp(4, addr)) : "memory", "cc"); return old_val; } @@ -137,8 +138,8 @@ unsigned long _uatomic_cmpxchg(void *addr, unsigned long old, { __asm__ __volatile__( " csg %0,%2," MEMOP_REF(%3) "\n" - : "+r" (old), MEMOP_OUT (__hp(len, addr)) - : "r" (_new), MEMOP_IN (__hp(len, addr)) + : "+r" (old), MEMOP_OUT (__hp(8, addr)) + : "r" (_new), MEMOP_IN (__hp(8, addr)) : "memory", "cc"); return old; } diff --git a/include/urcu/uatomic/x86.h b/include/urcu/uatomic/x86.h index e9f2f78..c02c96d 100644 --- a/include/urcu/uatomic/x86.h +++ b/include/urcu/uatomic/x86.h @@ -37,10 +37,11 @@ extern "C" { */ /* - * The __hp() macro casts the void pointer "x" to a pointer to a structure + * The __hp() macro casts the void pointer @x to a pointer to a structure * containing an array of char of the specified size. This allows passing the * @addr arguments of the following inline functions as "m" and "+m" operands - * to the assembly. + * to the assembly. The @size parameter should be a constant to support + * compilers such as clang which do not support VLA. */ #define __hp(size, x) ((struct { char v[size]; } *)(x)) @@ -60,7 +61,7 @@ unsigned long __uatomic_cmpxchg(void *addr, unsigned long old, __asm__ __volatile__( "lock; cmpxchgb %2, %1" - : "+a"(result), "+m"(*__hp(len, addr)) + : "+a"(result), "+m"(*__hp(1, addr)) : "q"((unsigned char)_new) : "memory"); return result; @@ -71,7 +72,7 @@ unsigned long __uatomic_cmpxchg(void *addr, unsigned long old, __asm__ __volatile__( "lock; cmpxchgw %2, %1" - : "+a"(result), "+m"(*__hp(len, addr)) + : "+a"(result), "+m"(*__hp(2, addr)) : "r"((unsigned short)_new) : "memory"); return result; @@ -82,7 +83,7 @@ unsigned long __uatomic_cmpxchg(void *addr, unsigned long old, __asm__ __volatile__( "lock; cmpxchgl %2, %1" - : "+a"(result), "+m"(*__hp(len, addr)) + : "+a"(result), "+m"(*__hp(4, addr)) : "r"((unsigned int)_new) : "memory"); return result; @@ -94,7 +95,7 @@ unsigned long __uatomic_cmpxchg(void *addr, unsigned long old, __asm__ __volatile__( "lock; cmpxchgq %2, %1" - : "+a"(result), "+m"(*__hp(len, addr)) + : "+a"(result), "+m"(*__hp(8, addr)) : "r"((unsigned long)_new) : "memory"); return result; @@ -127,7 +128,7 @@ unsigned long __uatomic_exchange(void *addr, unsigned long val, int len) unsigned char result; __asm__ __volatile__( "xchgb %0, %1" - : "=q"(result), "+m"(*__hp(len, addr)) + : "=q"(result), "+m"(*__hp(1, addr)) : "0" ((unsigned char)val) : "memory"); return result; @@ -137,7 +138,7 @@ unsigned long __uatomic_exchange(void *addr, unsigned long val, int len) unsigned short result; __asm__ __volatile__( "xchgw %0, %1" - : "=r"(result), "+m"(*__hp(len, addr)) + : "=r"(result), "+m"(*__hp(2, addr)) : "0" ((unsigned short)val) : "memory"); return result; @@ -147,7 +148,7 @@ unsigned long __uatomic_exchange(void *addr, unsigned long val, int len) unsigned int result; __asm__ __volatile__( "xchgl %0, %1" - : "=r"(result), "+m"(*__hp(len, addr)) + : "=r"(result), "+m"(*__hp(4, addr)) : "0" ((unsigned int)val) : "memory"); return result; @@ -158,7 +159,7 @@ unsigned long __uatomic_exchange(void *addr, unsigned long val, int len) unsigned long result; __asm__ __volatile__( "xchgq %0, %1" - : "=r"(result), "+m"(*__hp(len, addr)) + : "=r"(result), "+m"(*__hp(8, addr)) : "0" ((unsigned long)val) : "memory"); return result; @@ -191,7 +192,7 @@ unsigned long __uatomic_add_return(void *addr, unsigned long val, __asm__ __volatile__( "lock; xaddb %1, %0" - : "+m"(*__hp(len, addr)), "+q" (result) + : "+m"(*__hp(1, addr)), "+q" (result) : : "memory"); return result + (unsigned char)val; @@ -202,7 +203,7 @@ unsigned long __uatomic_add_return(void *addr, unsigned long val, __asm__ __volatile__( "lock; xaddw %1, %0" - : "+m"(*__hp(len, addr)), "+r" (result) + : "+m"(*__hp(2, addr)), "+r" (result) : : "memory"); return result + (unsigned short)val; @@ -213,7 +214,7 @@ unsigned long __uatomic_add_return(void *addr, unsigned long val, __asm__ __volatile__( "lock; xaddl %1, %0" - : "+m"(*__hp(len, addr)), "+r" (result) + : "+m"(*__hp(4, addr)), "+r" (result) : : "memory"); return result + (unsigned int)val; @@ -225,7 +226,7 @@ unsigned long __uatomic_add_return(void *addr, unsigned long val, __asm__ __volatile__( "lock; xaddq %1, %0" - : "+m"(*__hp(len, addr)), "+r" (result) + : "+m"(*__hp(8, addr)), "+r" (result) : : "memory"); return result + (unsigned long)val; @@ -255,7 +256,7 @@ void __uatomic_and(void *addr, unsigned long val, int len) { __asm__ __volatile__( "lock; andb %1, %0" - : "=m"(*__hp(len, addr)) + : "=m"(*__hp(1, addr)) : "iq" ((unsigned char)val) : "memory"); return; @@ -264,7 +265,7 @@ void __uatomic_and(void *addr, unsigned long val, int len) { __asm__ __volatile__( "lock; andw %1, %0" - : "=m"(*__hp(len, addr)) + : "=m"(*__hp(2, addr)) : "ir" ((unsigned short)val) : "memory"); return; @@ -273,7 +274,7 @@ void __uatomic_and(void *addr, unsigned long val, int len) { __asm__ __volatile__( "lock; andl %1, %0" - : "=m"(*__hp(len, addr)) + : "=m"(*__hp(4, addr)) : "ir" ((unsigned int)val) : "memory"); return; @@ -283,7 +284,7 @@ void __uatomic_and(void *addr, unsigned long val, int len) { __asm__ __volatile__( "lock; andq %1, %0" - : "=m"(*__hp(len, addr)) + : "=m"(*__hp(8, addr)) : "er" ((unsigned long)val) : "memory"); return; @@ -311,7 +312,7 @@ void __uatomic_or(void *addr, unsigned long val, int len) { __asm__ __volatile__( "lock; orb %1, %0" - : "=m"(*__hp(len, addr)) + : "=m"(*__hp(1, addr)) : "iq" ((unsigned char)val) : "memory"); return; @@ -320,7 +321,7 @@ void __uatomic_or(void *addr, unsigned long val, int len) { __asm__ __volatile__( "lock; orw %1, %0" - : "=m"(*__hp(len, addr)) + : "=m"(*__hp(2, addr)) : "ir" ((unsigned short)val) : "memory"); return; @@ -329,7 +330,7 @@ void __uatomic_or(void *addr, unsigned long val, int len) { __asm__ __volatile__( "lock; orl %1, %0" - : "=m"(*__hp(len, addr)) + : "=m"(*__hp(4, addr)) : "ir" ((unsigned int)val) : "memory"); return; @@ -339,7 +340,7 @@ void __uatomic_or(void *addr, unsigned long val, int len) { __asm__ __volatile__( "lock; orq %1, %0" - : "=m"(*__hp(len, addr)) + : "=m"(*__hp(8, addr)) : "er" ((unsigned long)val) : "memory"); return; @@ -367,7 +368,7 @@ void __uatomic_add(void *addr, unsigned long val, int len) { __asm__ __volatile__( "lock; addb %1, %0" - : "=m"(*__hp(len, addr)) + : "=m"(*__hp(1, addr)) : "iq" ((unsigned char)val) : "memory"); return; @@ -376,7 +377,7 @@ void __uatomic_add(void *addr, unsigned long val, int len) { __asm__ __volatile__( "lock; addw %1, %0" - : "=m"(*__hp(len, addr)) + : "=m"(*__hp(2, addr)) : "ir" ((unsigned short)val) : "memory"); return; @@ -385,7 +386,7 @@ void __uatomic_add(void *addr, unsigned long val, int len) { __asm__ __volatile__( "lock; addl %1, %0" - : "=m"(*__hp(len, addr)) + : "=m"(*__hp(4, addr)) : "ir" ((unsigned int)val) : "memory"); return; @@ -395,7 +396,7 @@ void __uatomic_add(void *addr, unsigned long val, int len) { __asm__ __volatile__( "lock; addq %1, %0" - : "=m"(*__hp(len, addr)) + : "=m"(*__hp(8, addr)) : "er" ((unsigned long)val) : "memory"); return; @@ -424,7 +425,7 @@ void __uatomic_inc(void *addr, int len) { __asm__ __volatile__( "lock; incb %0" - : "=m"(*__hp(len, addr)) + : "=m"(*__hp(1, addr)) : : "memory"); return; @@ -433,7 +434,7 @@ void __uatomic_inc(void *addr, int len) { __asm__ __volatile__( "lock; incw %0" - : "=m"(*__hp(len, addr)) + : "=m"(*__hp(2, addr)) : : "memory"); return; @@ -442,7 +443,7 @@ void __uatomic_inc(void *addr, int len) { __asm__ __volatile__( "lock; incl %0" - : "=m"(*__hp(len, addr)) + : "=m"(*__hp(4, addr)) : : "memory"); return; @@ -452,7 +453,7 @@ void __uatomic_inc(void *addr, int len) { __asm__ __volatile__( "lock; incq %0" - : "=m"(*__hp(len, addr)) + : "=m"(*__hp(8, addr)) : : "memory"); return; @@ -477,7 +478,7 @@ void __uatomic_dec(void *addr, int len) { __asm__ __volatile__( "lock; decb %0" - : "=m"(*__hp(len, addr)) + : "=m"(*__hp(1, addr)) : : "memory"); return; @@ -486,7 +487,7 @@ void __uatomic_dec(void *addr, int len) { __asm__ __volatile__( "lock; decw %0" - : "=m"(*__hp(len, addr)) + : "=m"(*__hp(2, addr)) : : "memory"); return; @@ -495,7 +496,7 @@ void __uatomic_dec(void *addr, int len) { __asm__ __volatile__( "lock; decl %0" - : "=m"(*__hp(len, addr)) + : "=m"(*__hp(4, addr)) : : "memory"); return; @@ -505,7 +506,7 @@ void __uatomic_dec(void *addr, int len) { __asm__ __volatile__( "lock; decq %0" - : "=m"(*__hp(len, addr)) + : "=m"(*__hp(8, addr)) : : "memory"); return; -- 2.34.1