uatomic: add uatomic_and
authorPaolo Bonzini <pbonzini@redhat.com>
Thu, 9 Jun 2011 13:30:40 +0000 (09:30 -0400)
committerMathieu Desnoyers <mathieu.desnoyers@efficios.com>
Thu, 9 Jun 2011 13:30:40 +0000 (09:30 -0400)
Only x86 support is arch-specific for now, the rest uses either cmpxchg
or gcc __sync_ builtins.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
compat_arch_x86.c
tests/test_uatomic.c
urcu/uatomic_arch_x86.h
urcu/uatomic_generic.h

index 33bf13df41b7bcae348e3d4e6dcb3661d97b64d5..692417ee5067607293ef3c9129f34f8a8b7bb739 100644 (file)
@@ -226,6 +226,31 @@ void _compat_uatomic_or(void *addr, unsigned long v, int len)
        mutex_lock_signal_restore(&compat_mutex, &mask);
 }
 
        mutex_lock_signal_restore(&compat_mutex, &mask);
 }
 
+void _compat_uatomic_and(void *addr, unsigned long v, int len)
+{
+       sigset_t mask;
+
+       mutex_lock_signal_save(&compat_mutex, &mask);
+       switch (len) {
+       case 1:
+               *(unsigned char *)addr &= (unsigned char)v;
+               break;
+       case 2:
+               *(unsigned short *)addr &= (unsigned short)v;
+               break;
+       case 4:
+               *(unsigned int *)addr &= (unsigned int)v;
+               break;
+       default:
+               /*
+                * generate an illegal instruction. Cannot catch this with
+                * linker tricks when optimizations are disabled.
+                */
+               __asm__ __volatile__("ud2");
+       }
+       mutex_lock_signal_restore(&compat_mutex, &mask);
+}
+
 unsigned long _compat_uatomic_add_return(void *addr, unsigned long v, int len)
 {
        sigset_t mask;
 unsigned long _compat_uatomic_add_return(void *addr, unsigned long v, int len)
 {
        sigset_t mask;
index 37f95a6d849699f7756676a09f0fee78803f4054..2c8c2324c3d658aa4ea1f0a3f3caf9c8c84dbf0d 100644 (file)
@@ -41,6 +41,8 @@ do {                                          \
        v = uatomic_sub_return(ptr, 1);         \
        assert(v == 121);                       \
        assert(uatomic_read(ptr) == 121);       \
        v = uatomic_sub_return(ptr, 1);         \
        assert(v == 121);                       \
        assert(uatomic_read(ptr) == 121);       \
+       uatomic_and(ptr, 129);                  \
+       assert(uatomic_read(ptr) == 1);         \
 } while (0)
 
 int main(int argc, char **argv)
 } while (0)
 
 int main(int argc, char **argv)
index f57d46dd1f8cd876e8969ddb158e6267acd110c7..9fedee65fac00d72a95545b2bcd38c909a786d30 100644 (file)
@@ -231,6 +231,60 @@ unsigned long __uatomic_add_return(void *addr, unsigned long val,
                                                  (unsigned long)(v),   \
                                                  sizeof(*(addr))))
 
                                                  (unsigned long)(v),   \
                                                  sizeof(*(addr))))
 
+/* uatomic_and */
+
+static inline __attribute__((always_inline))
+void __uatomic_and(void *addr, unsigned long val, int len)
+{
+       switch (len) {
+       case 1:
+       {
+               __asm__ __volatile__(
+               "lock; andb %1, %0"
+                       : "=m"(*__hp(addr))
+                       : "iq" ((unsigned char)val)
+                       : "memory");
+               return;
+       }
+       case 2:
+       {
+               __asm__ __volatile__(
+               "lock; andw %1, %0"
+                       : "=m"(*__hp(addr))
+                       : "ir" ((unsigned short)val)
+                       : "memory");
+               return;
+       }
+       case 4:
+       {
+               __asm__ __volatile__(
+               "lock; andl %1, %0"
+                       : "=m"(*__hp(addr))
+                       : "ir" ((unsigned int)val)
+                       : "memory");
+               return;
+       }
+#if (CAA_BITS_PER_LONG == 64)
+       case 8:
+       {
+               __asm__ __volatile__(
+               "lock; andq %1, %0"
+                       : "=m"(*__hp(addr))
+                       : "er" ((unsigned long)val)
+                       : "memory");
+               return;
+       }
+#endif
+       }
+       /* generate an illegal instruction. Cannot catch this with linker tricks
+        * when optimizations are disabled. */
+       __asm__ __volatile__("ud2");
+       return;
+}
+
+#define _uatomic_and(addr, v)                                             \
+       (__uatomic_and((addr), (unsigned long)(v), sizeof(*(addr))))
+
 /* uatomic_or */
 
 static inline __attribute__((always_inline))
 /* uatomic_or */
 
 static inline __attribute__((always_inline))
@@ -482,6 +536,13 @@ extern unsigned long _compat_uatomic_cmpxchg(void *addr, unsigned long old,
                                                (unsigned long)(_new),         \
                                                sizeof(*(addr))))
 
                                                (unsigned long)(_new),         \
                                                sizeof(*(addr))))
 
+extern unsigned long _compat_uatomic_and(void *addr,
+                                        unsigned long _new, int len);
+#define compat_uatomic_and(addr, v)                                   \
+       ((__typeof__(*(addr))) _compat_uatomic_and((addr),             \
+                                                  (unsigned long)(v), \
+                                                  sizeof(*(addr))))
+
 extern unsigned long _compat_uatomic_or(void *addr,
                                        unsigned long _new, int len);
 #define compat_uatomic_or(addr, v)                                    \
 extern unsigned long _compat_uatomic_or(void *addr,
                                        unsigned long _new, int len);
 #define compat_uatomic_or(addr, v)                                    \
@@ -515,6 +576,8 @@ extern unsigned long _compat_uatomic_add_return(void *addr,
                UATOMIC_COMPAT(cmpxchg(addr, old, _new))
 #define uatomic_xchg(addr, v)                  \
                UATOMIC_COMPAT(xchg(addr, v))
                UATOMIC_COMPAT(cmpxchg(addr, old, _new))
 #define uatomic_xchg(addr, v)                  \
                UATOMIC_COMPAT(xchg(addr, v))
+#define uatomic_and(addr, v)           \
+               UATOMIC_COMPAT(and(addr, v))
 #define uatomic_or(addr, v)            \
                UATOMIC_COMPAT(or(addr, v))
 #define uatomic_add_return(addr, v)            \
 #define uatomic_or(addr, v)            \
                UATOMIC_COMPAT(or(addr, v))
 #define uatomic_add_return(addr, v)            \
index 556846ff7260dc34c0753c1b3c310f8362f9bb1f..cef58f382180ba1061b1bb2e37c7cc87c0b7f354 100644 (file)
@@ -87,6 +87,39 @@ unsigned long _uatomic_cmpxchg(void *addr, unsigned long old,
                                                sizeof(*(addr))))
 
 
                                                sizeof(*(addr))))
 
 
+/* uatomic_and */
+
+#ifndef uatomic_and
+static inline __attribute__((always_inline))
+void _uatomic_and(void *addr, unsigned long val,
+                 int len)
+{
+       switch (len) {
+#ifdef UATOMIC_HAS_ATOMIC_BYTE
+       case 1:
+               __sync_and_and_fetch_1(addr, val);
+#endif
+#ifdef UATOMIC_HAS_ATOMIC_SHORT
+       case 2:
+               __sync_and_and_fetch_2(addr, val);
+#endif
+       case 4:
+               __sync_and_and_fetch_4(addr, val);
+#if (CAA_BITS_PER_LONG == 64)
+       case 8:
+               __sync_and_and_fetch_8(addr, val);
+#endif
+       }
+       _uatomic_link_error();
+       return 0;
+}
+
+#define uatomic_and(addr, v)                   \
+       (_uatomic_and((addr),                   \
+                     (unsigned long)(v),       \
+                     sizeof(*(addr))))
+#endif
+
 /* uatomic_or */
 
 #ifndef uatomic_or
 /* uatomic_or */
 
 #ifndef uatomic_or
@@ -219,6 +252,70 @@ unsigned long _uatomic_exchange(void *addr, unsigned long val, int len)
 
 #else /* #ifndef uatomic_cmpxchg */
 
 
 #else /* #ifndef uatomic_cmpxchg */
 
+#ifndef uatomic_and
+/* uatomic_and */
+
+static inline __attribute__((always_inline))
+void _uatomic_and(void *addr, unsigned long val, int len)
+{
+       switch (len) {
+#ifdef UATOMIC_HAS_ATOMIC_BYTE
+       case 1:
+       {
+               unsigned char old, oldt;
+
+               oldt = uatomic_read((unsigned char *)addr);
+               do {
+                       old = oldt;
+                       oldt = _uatomic_cmpxchg(addr, old, old & val, 1);
+               } while (oldt != old);
+       }
+#endif
+#ifdef UATOMIC_HAS_ATOMIC_SHORT
+       case 2:
+       {
+               unsigned short old, oldt;
+
+               oldt = uatomic_read((unsigned short *)addr);
+               do {
+                       old = oldt;
+                       oldt = _uatomic_cmpxchg(addr, old, old & val, 2);
+               } while (oldt != old);
+       }
+#endif
+       case 4:
+       {
+               unsigned int old, oldt;
+
+               oldt = uatomic_read((unsigned int *)addr);
+               do {
+                       old = oldt;
+                       oldt = _uatomic_cmpxchg(addr, old, old & val, 4);
+               } while (oldt != old);
+       }
+#if (CAA_BITS_PER_LONG == 64)
+       case 8:
+       {
+               unsigned long old, oldt;
+
+               oldt = uatomic_read((unsigned long *)addr);
+               do {
+                       old = oldt;
+                       oldt = _uatomic_cmpxchg(addr, old, old & val, 8);
+               } while (oldt != old);
+       }
+#endif
+       }
+       _uatomic_link_error();
+       return 0;
+}
+
+#define uatomic_and(addr, v)           \
+       (uatomic_and((addr),            \
+                   (unsigned long)(v), \
+                   sizeof(*(addr))))
+#endif /* #ifndef uatomic_and */
+
 #ifndef uatomic_or
 /* uatomic_or */
 
 #ifndef uatomic_or
 /* uatomic_or */
 
This page took 0.029587 seconds and 4 git commands to generate.