#endif /* !COMPILER_HAVE_SHORT_MEM_OPERAND */
/*
- * The __hp() macro casts the void pointer "x" to a pointer to a structure
+ * The __hp() macro casts the void pointer @x to a pointer to a structure
* containing an array of char of the specified size. This allows passing the
* @addr arguments of the following inline functions as "m" and "+m" operands
- * to the assembly.
+ * to the assembly. The @size parameter should be a constant to support
+ * compilers such as clang which do not support VLA.
*/
#define __hp(size, x) ((struct { char v[size]; } *)(x))
__asm__ __volatile__(
"0: cs %0,%2," MEMOP_REF(%3) "\n"
" brc 4,0b\n"
- : "=&r" (old_val), MEMOP_OUT (__hp(len, addr))
- : "r" (val), MEMOP_IN (__hp(len, addr))
+ : "=&r" (old_val), MEMOP_OUT (__hp(4, addr))
+ : "r" (val), MEMOP_IN (__hp(4, addr))
: "memory", "cc");
return old_val;
}
__asm__ __volatile__(
"0: csg %0,%2," MEMOP_REF(%3) "\n"
" brc 4,0b\n"
- : "=&r" (old_val), MEMOP_OUT (__hp(len, addr))
- : "r" (val), MEMOP_IN (__hp(len, addr))
+ : "=&r" (old_val), MEMOP_OUT (__hp(8, addr))
+ : "r" (val), MEMOP_IN (__hp(8, addr))
: "memory", "cc");
return old_val;
}
__asm__ __volatile__(
" cs %0,%2," MEMOP_REF(%3) "\n"
- : "+r" (old_val), MEMOP_OUT (__hp(len, addr))
- : "r" (_new), MEMOP_IN (__hp(len, addr))
+ : "+r" (old_val), MEMOP_OUT (__hp(4, addr))
+ : "r" (_new), MEMOP_IN (__hp(4, addr))
: "memory", "cc");
return old_val;
}
{
__asm__ __volatile__(
" csg %0,%2," MEMOP_REF(%3) "\n"
- : "+r" (old), MEMOP_OUT (__hp(len, addr))
- : "r" (_new), MEMOP_IN (__hp(len, addr))
+ : "+r" (old), MEMOP_OUT (__hp(8, addr))
+ : "r" (_new), MEMOP_IN (__hp(8, addr))
: "memory", "cc");
return old;
}
*/
/*
- * The __hp() macro casts the void pointer "x" to a pointer to a structure
+ * The __hp() macro casts the void pointer @x to a pointer to a structure
* containing an array of char of the specified size. This allows passing the
* @addr arguments of the following inline functions as "m" and "+m" operands
- * to the assembly.
+ * to the assembly. The @size parameter should be a constant to support
+ * compilers such as clang which do not support VLA.
*/
#define __hp(size, x) ((struct { char v[size]; } *)(x))
__asm__ __volatile__(
"lock; cmpxchgb %2, %1"
- : "+a"(result), "+m"(*__hp(len, addr))
+ : "+a"(result), "+m"(*__hp(1, addr))
: "q"((unsigned char)_new)
: "memory");
return result;
__asm__ __volatile__(
"lock; cmpxchgw %2, %1"
- : "+a"(result), "+m"(*__hp(len, addr))
+ : "+a"(result), "+m"(*__hp(2, addr))
: "r"((unsigned short)_new)
: "memory");
return result;
__asm__ __volatile__(
"lock; cmpxchgl %2, %1"
- : "+a"(result), "+m"(*__hp(len, addr))
+ : "+a"(result), "+m"(*__hp(4, addr))
: "r"((unsigned int)_new)
: "memory");
return result;
__asm__ __volatile__(
"lock; cmpxchgq %2, %1"
- : "+a"(result), "+m"(*__hp(len, addr))
+ : "+a"(result), "+m"(*__hp(8, addr))
: "r"((unsigned long)_new)
: "memory");
return result;
unsigned char result;
__asm__ __volatile__(
"xchgb %0, %1"
- : "=q"(result), "+m"(*__hp(len, addr))
+ : "=q"(result), "+m"(*__hp(1, addr))
: "0" ((unsigned char)val)
: "memory");
return result;
unsigned short result;
__asm__ __volatile__(
"xchgw %0, %1"
- : "=r"(result), "+m"(*__hp(len, addr))
+ : "=r"(result), "+m"(*__hp(2, addr))
: "0" ((unsigned short)val)
: "memory");
return result;
unsigned int result;
__asm__ __volatile__(
"xchgl %0, %1"
- : "=r"(result), "+m"(*__hp(len, addr))
+ : "=r"(result), "+m"(*__hp(4, addr))
: "0" ((unsigned int)val)
: "memory");
return result;
unsigned long result;
__asm__ __volatile__(
"xchgq %0, %1"
- : "=r"(result), "+m"(*__hp(len, addr))
+ : "=r"(result), "+m"(*__hp(8, addr))
: "0" ((unsigned long)val)
: "memory");
return result;
__asm__ __volatile__(
"lock; xaddb %1, %0"
- : "+m"(*__hp(len, addr)), "+q" (result)
+ : "+m"(*__hp(1, addr)), "+q" (result)
:
: "memory");
return result + (unsigned char)val;
__asm__ __volatile__(
"lock; xaddw %1, %0"
- : "+m"(*__hp(len, addr)), "+r" (result)
+ : "+m"(*__hp(2, addr)), "+r" (result)
:
: "memory");
return result + (unsigned short)val;
__asm__ __volatile__(
"lock; xaddl %1, %0"
- : "+m"(*__hp(len, addr)), "+r" (result)
+ : "+m"(*__hp(4, addr)), "+r" (result)
:
: "memory");
return result + (unsigned int)val;
__asm__ __volatile__(
"lock; xaddq %1, %0"
- : "+m"(*__hp(len, addr)), "+r" (result)
+ : "+m"(*__hp(8, addr)), "+r" (result)
:
: "memory");
return result + (unsigned long)val;
{
__asm__ __volatile__(
"lock; andb %1, %0"
- : "=m"(*__hp(len, addr))
+ : "=m"(*__hp(1, addr))
: "iq" ((unsigned char)val)
: "memory");
return;
{
__asm__ __volatile__(
"lock; andw %1, %0"
- : "=m"(*__hp(len, addr))
+ : "=m"(*__hp(2, addr))
: "ir" ((unsigned short)val)
: "memory");
return;
{
__asm__ __volatile__(
"lock; andl %1, %0"
- : "=m"(*__hp(len, addr))
+ : "=m"(*__hp(4, addr))
: "ir" ((unsigned int)val)
: "memory");
return;
{
__asm__ __volatile__(
"lock; andq %1, %0"
- : "=m"(*__hp(len, addr))
+ : "=m"(*__hp(8, addr))
: "er" ((unsigned long)val)
: "memory");
return;
{
__asm__ __volatile__(
"lock; orb %1, %0"
- : "=m"(*__hp(len, addr))
+ : "=m"(*__hp(1, addr))
: "iq" ((unsigned char)val)
: "memory");
return;
{
__asm__ __volatile__(
"lock; orw %1, %0"
- : "=m"(*__hp(len, addr))
+ : "=m"(*__hp(2, addr))
: "ir" ((unsigned short)val)
: "memory");
return;
{
__asm__ __volatile__(
"lock; orl %1, %0"
- : "=m"(*__hp(len, addr))
+ : "=m"(*__hp(4, addr))
: "ir" ((unsigned int)val)
: "memory");
return;
{
__asm__ __volatile__(
"lock; orq %1, %0"
- : "=m"(*__hp(len, addr))
+ : "=m"(*__hp(8, addr))
: "er" ((unsigned long)val)
: "memory");
return;
{
__asm__ __volatile__(
"lock; addb %1, %0"
- : "=m"(*__hp(len, addr))
+ : "=m"(*__hp(1, addr))
: "iq" ((unsigned char)val)
: "memory");
return;
{
__asm__ __volatile__(
"lock; addw %1, %0"
- : "=m"(*__hp(len, addr))
+ : "=m"(*__hp(2, addr))
: "ir" ((unsigned short)val)
: "memory");
return;
{
__asm__ __volatile__(
"lock; addl %1, %0"
- : "=m"(*__hp(len, addr))
+ : "=m"(*__hp(4, addr))
: "ir" ((unsigned int)val)
: "memory");
return;
{
__asm__ __volatile__(
"lock; addq %1, %0"
- : "=m"(*__hp(len, addr))
+ : "=m"(*__hp(8, addr))
: "er" ((unsigned long)val)
: "memory");
return;
{
__asm__ __volatile__(
"lock; incb %0"
- : "=m"(*__hp(len, addr))
+ : "=m"(*__hp(1, addr))
:
: "memory");
return;
{
__asm__ __volatile__(
"lock; incw %0"
- : "=m"(*__hp(len, addr))
+ : "=m"(*__hp(2, addr))
:
: "memory");
return;
{
__asm__ __volatile__(
"lock; incl %0"
- : "=m"(*__hp(len, addr))
+ : "=m"(*__hp(4, addr))
:
: "memory");
return;
{
__asm__ __volatile__(
"lock; incq %0"
- : "=m"(*__hp(len, addr))
+ : "=m"(*__hp(8, addr))
:
: "memory");
return;
{
__asm__ __volatile__(
"lock; decb %0"
- : "=m"(*__hp(len, addr))
+ : "=m"(*__hp(1, addr))
:
: "memory");
return;
{
__asm__ __volatile__(
"lock; decw %0"
- : "=m"(*__hp(len, addr))
+ : "=m"(*__hp(2, addr))
:
: "memory");
return;
{
__asm__ __volatile__(
"lock; decl %0"
- : "=m"(*__hp(len, addr))
+ : "=m"(*__hp(4, addr))
:
: "memory");
return;
{
__asm__ __volatile__(
"lock; decq %0"
- : "=m"(*__hp(len, addr))
+ : "=m"(*__hp(8, addr))
:
: "memory");
return;