{
unsigned int result = old;
__asm__ __volatile__(
- "lock; cmpxchgl %2, %1"
+ "lock; cmpxchgq %2, %1"
: "+a"(result), "+m"(*__hp(addr))
: "r"((unsigned long)_new)
: "memory");
((__typeof__(*(addr))) _atomic_exchange((addr), (unsigned long)(v), \
sizeof(*(addr))))
-/* atomic_add */
+/* atomic_add, atomic_sub */
static inline __attribute__((always_inline))
void _atomic_add(volatile void *addr, unsigned long val, int len)
__asm__ __volatile__(
"lock; addb %1, %0"
: "=m"(*__hp(addr))
- : "q" ((unsigned char)val));
+ : "iq" ((unsigned char)val)
+ : "memory");
return;
}
case 2:
__asm__ __volatile__(
"lock; addw %1, %0"
: "=m"(*__hp(addr))
- : "r" ((unsigned short)val));
+ : "ir" ((unsigned short)val)
+ : "memory");
return;
}
case 4:
__asm__ __volatile__(
"lock; addl %1, %0"
: "=m"(*__hp(addr))
- : "r" ((unsigned int)val));
+ : "ir" ((unsigned int)val)
+ : "memory");
return;
}
#if (BITS_PER_LONG == 64)
__asm__ __volatile__(
"lock; addq %1, %0"
: "=m"(*__hp(addr))
- : "r" ((unsigned long)val));
+ : "er" ((unsigned long)val)
+ : "memory");
return;
}
#endif
#define atomic_add(addr, v) \
(_atomic_add((addr), (unsigned long)(v), sizeof(*(addr))))
+#define atomic_sub(addr, v) atomic_add((addr), -(v))
+
+
+/* atomic_inc */
+
+static inline __attribute__((always_inline))
+void _atomic_inc(volatile void *addr, int len)
+{
+ switch (len) {
+ case 1:
+ {
+ __asm__ __volatile__(
+ "lock; incb %0"
+ : "=m"(*__hp(addr))
+ :
+ : "memory");
+ return;
+ }
+ case 2:
+ {
+ __asm__ __volatile__(
+ "lock; incw %0"
+ : "=m"(*__hp(addr))
+ :
+ : "memory");
+ return;
+ }
+ case 4:
+ {
+ __asm__ __volatile__(
+ "lock; incl %0"
+ : "=m"(*__hp(addr))
+ :
+ : "memory");
+ return;
+ }
+#if (BITS_PER_LONG == 64)
+ case 8:
+ {
+ __asm__ __volatile__(
+ "lock; incq %0"
+ : "=m"(*__hp(addr))
+ :
+ : "memory");
+ return;
+ }
+#endif
+ }
+ /* generate an illegal instruction. Cannot catch this with linker tricks
+ * when optimizations are disabled. */
+ __asm__ __volatile__("ud2");
+ return;
+}
+
+#define atomic_inc(addr) (_atomic_inc((addr), sizeof(*(addr))))
+
+/* atomic_dec */
+
+static inline __attribute__((always_inline))
+void _atomic_dec(volatile void *addr, int len)
+{
+ switch (len) {
+ case 1:
+ {
+ __asm__ __volatile__(
+ "lock; decb %0"
+ : "=m"(*__hp(addr))
+ :
+ : "memory");
+ return;
+ }
+ case 2:
+ {
+ __asm__ __volatile__(
+ "lock; decw %0"
+ : "=m"(*__hp(addr))
+ :
+ : "memory");
+ return;
+ }
+ case 4:
+ {
+ __asm__ __volatile__(
+ "lock; decl %0"
+ : "=m"(*__hp(addr))
+ :
+ : "memory");
+ return;
+ }
+#if (BITS_PER_LONG == 64)
+ case 8:
+ {
+ __asm__ __volatile__(
+ "lock; decq %0"
+ : "=m"(*__hp(addr))
+ :
+ : "memory");
+ return;
+ }
+#endif
+ }
+ /* generate an illegal instruction. Cannot catch this with linker tricks
+ * when optimizations are disabled. */
+ __asm__ __volatile__("ud2");
+ return;
+}
+
+#define atomic_dec(addr) (_atomic_dec((addr), sizeof(*(addr))))
+
#endif /* #ifndef _INCLUDE_API_H */
#endif /* ARCH_ATOMIC_X86_H */