/* atomic_add */
static inline __attribute__((always_inline))
-unsigned long _atomic_add(volatile void *addr, unsigned long val, int len)
+void _atomic_add(volatile void *addr, unsigned long val, int len)
{
switch (len) {
case 1:
__asm__ __volatile__(
"lock; addb %1, %0"
: "=m"(*__hp(addr))
- : "q" ((unsigned char)val));
+ : "iq" ((unsigned char)val)
+ : "memory");
return;
}
case 2:
__asm__ __volatile__(
"lock; addw %1, %0"
: "=m"(*__hp(addr))
- : "r" ((unsigned short)val));
+ : "ir" ((unsigned short)val)
+ : "memory");
return;
}
case 4:
__asm__ __volatile__(
"lock; addl %1, %0"
: "=m"(*__hp(addr))
- : "r" ((unsigned int)val));
+ : "ir" ((unsigned int)val)
+ : "memory");
return;
}
#if (BITS_PER_LONG == 64)
__asm__ __volatile__(
"lock; addq %1, %0"
: "=m"(*__hp(addr))
- : "r" ((unsigned long)val));
+ : "er" ((unsigned long)val)
+ : "memory");
return;
}
#endif
/* generate an illegal instruction. Cannot catch this with linker tricks
* when optimizations are disabled. */
__asm__ __volatile__("ud2");
- return 0;
+ return;
}
#define atomic_add(addr, v) \