Update atomic x86_64 cmpxchg
authorMathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
Tue, 22 Sep 2009 21:23:08 +0000 (17:23 -0400)
committerMathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
Tue, 22 Sep 2009 21:23:08 +0000 (17:23 -0400)
Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
arch_atomic_x86.h

index fdd3d6b58ecd28eaf620e10001920eae0193a073..97a7cdd019129b8f2c7cf190991e89d5fef52a2a 100644 (file)
@@ -77,7 +77,7 @@ unsigned long _atomic_cmpxchg(volatile void *addr, unsigned long old,
        {
                unsigned int result = old;
                __asm__ __volatile__(
        {
                unsigned int result = old;
                __asm__ __volatile__(
-               "lock; cmpxchgl %2, %1"
+               "lock; cmpxchgq %2, %1"
                        : "+a"(result), "+m"(*__hp(addr))
                        : "r"((unsigned long)_new)
                        : "memory");
                        : "+a"(result), "+m"(*__hp(addr))
                        : "r"((unsigned long)_new)
                        : "memory");
@@ -156,7 +156,7 @@ unsigned long _atomic_exchange(volatile void *addr, unsigned long val, int len)
        ((__typeof__(*(addr))) _atomic_exchange((addr), (unsigned long)(v), \
                                                sizeof(*(addr))))
 
        ((__typeof__(*(addr))) _atomic_exchange((addr), (unsigned long)(v), \
                                                sizeof(*(addr))))
 
-/* atomic_add */
+/* atomic_add, atomic_sub */
 
 static inline __attribute__((always_inline))
 void _atomic_add(volatile void *addr, unsigned long val, int len)
 
 static inline __attribute__((always_inline))
 void _atomic_add(volatile void *addr, unsigned long val, int len)
@@ -210,6 +210,115 @@ void _atomic_add(volatile void *addr, unsigned long val, int len)
 #define atomic_add(addr, v)                                               \
        (_atomic_add((addr), (unsigned long)(v), sizeof(*(addr))))
 
 #define atomic_add(addr, v)                                               \
        (_atomic_add((addr), (unsigned long)(v), sizeof(*(addr))))
 
+#define atomic_sub(addr, v)    atomic_add((addr), -(v))
+
+
+/* atomic_inc */
+
+static inline __attribute__((always_inline))
+void _atomic_inc(volatile void *addr, int len)
+{
+       switch (len) {
+       case 1:
+       {
+               __asm__ __volatile__(
+               "lock; incb %0"
+                       : "=m"(*__hp(addr))
+                       :
+                       : "memory");
+               return;
+       }
+       case 2:
+       {
+               __asm__ __volatile__(
+               "lock; incw %0"
+                       : "=m"(*__hp(addr))
+                       :
+                       : "memory");
+               return;
+       }
+       case 4:
+       {
+               __asm__ __volatile__(
+               "lock; incl %0"
+                       : "=m"(*__hp(addr))
+                       :
+                       : "memory");
+               return;
+       }
+#if (BITS_PER_LONG == 64)
+       case 8:
+       {
+               __asm__ __volatile__(
+               "lock; incq %0"
+                       : "=m"(*__hp(addr))
+                       :
+                       : "memory");
+               return;
+       }
+#endif
+       }
+       /* generate an illegal instruction. Cannot catch this with linker tricks
+        * when optimizations are disabled. */
+       __asm__ __volatile__("ud2");
+       return;
+}
+
+#define atomic_inc(addr)       (_atomic_inc((addr), sizeof(*(addr))))
+
+/* atomic_dec */
+
+static inline __attribute__((always_inline))
+void _atomic_dec(volatile void *addr, int len)
+{
+       switch (len) {
+       case 1:
+       {
+               __asm__ __volatile__(
+               "lock; decb %0"
+                       : "=m"(*__hp(addr))
+                       :
+                       : "memory");
+               return;
+       }
+       case 2:
+       {
+               __asm__ __volatile__(
+               "lock; decw %0"
+                       : "=m"(*__hp(addr))
+                       :
+                       : "memory");
+               return;
+       }
+       case 4:
+       {
+               __asm__ __volatile__(
+               "lock; decl %0"
+                       : "=m"(*__hp(addr))
+                       :
+                       : "memory");
+               return;
+       }
+#if (BITS_PER_LONG == 64)
+       case 8:
+       {
+               __asm__ __volatile__(
+               "lock; decq %0"
+                       : "=m"(*__hp(addr))
+                       :
+                       : "memory");
+               return;
+       }
+#endif
+       }
+       /* generate an illegal instruction. Cannot catch this with linker tricks
+        * when optimizations are disabled. */
+       __asm__ __volatile__("ud2");
+       return;
+}
+
+#define atomic_dec(addr)       (_atomic_dec((addr), sizeof(*(addr))))
+
 #endif /* #ifndef _INCLUDE_API_H */
 
 #endif /* ARCH_ATOMIC_X86_H */
 #endif /* #ifndef _INCLUDE_API_H */
 
 #endif /* ARCH_ATOMIC_X86_H */
This page took 0.028087 seconds and 4 git commands to generate.