Headers: move arch_*.h to urcu/arch/*.h
[urcu.git] / urcu / uatomic_arch_x86.h
index 666e498f326c4c3b8ce025352b3f385416dbc32b..9fedee65fac00d72a95545b2bcd38c909a786d30 100644 (file)
 extern "C" {
 #endif 
 
-#ifndef __SIZEOF_LONG__
-#if defined(__x86_64__) || defined(__amd64__)
-#define __SIZEOF_LONG__ 8
-#else
-#define __SIZEOF_LONG__ 4
-#endif
-#endif
-
-#ifndef BITS_PER_LONG
-#define BITS_PER_LONG  (__SIZEOF_LONG__ * 8)
-#endif
-
 /*
  * Derived from AO_compare_and_swap() and AO_test_and_set_full().
  */
@@ -51,7 +39,7 @@ struct __uatomic_dummy {
 };
 #define __hp(x)        ((struct __uatomic_dummy *)(x))
 
-#define _uatomic_set(addr, v)  STORE_SHARED(*(addr), (v))
+#define _uatomic_set(addr, v)  CMM_STORE_SHARED(*(addr), (v))
 
 /* cmpxchg */
 
@@ -93,7 +81,7 @@ unsigned long __uatomic_cmpxchg(void *addr, unsigned long old,
                        : "memory");
                return result;
        }
-#if (BITS_PER_LONG == 64)
+#if (CAA_BITS_PER_LONG == 64)
        case 8:
        {
                unsigned long result = old;
@@ -155,7 +143,7 @@ unsigned long __uatomic_exchange(void *addr, unsigned long val, int len)
                        : "memory");
                return result;
        }
-#if (BITS_PER_LONG == 64)
+#if (CAA_BITS_PER_LONG == 64)
        case 8:
        {
                unsigned long result;
@@ -218,7 +206,7 @@ unsigned long __uatomic_add_return(void *addr, unsigned long val,
                        : "memory");
                return result + (unsigned int)val;
        }
-#if (BITS_PER_LONG == 64)
+#if (CAA_BITS_PER_LONG == 64)
        case 8:
        {
                unsigned long result = val;
@@ -243,6 +231,114 @@ unsigned long __uatomic_add_return(void *addr, unsigned long val,
                                                  (unsigned long)(v),   \
                                                  sizeof(*(addr))))
 
+/* uatomic_and */
+
+static inline __attribute__((always_inline))
+void __uatomic_and(void *addr, unsigned long val, int len)
+{
+       switch (len) {
+       case 1:
+       {
+               __asm__ __volatile__(
+               "lock; andb %1, %0"
+                       : "=m"(*__hp(addr))
+                       : "iq" ((unsigned char)val)
+                       : "memory");
+               return;
+       }
+       case 2:
+       {
+               __asm__ __volatile__(
+               "lock; andw %1, %0"
+                       : "=m"(*__hp(addr))
+                       : "ir" ((unsigned short)val)
+                       : "memory");
+               return;
+       }
+       case 4:
+       {
+               __asm__ __volatile__(
+               "lock; andl %1, %0"
+                       : "=m"(*__hp(addr))
+                       : "ir" ((unsigned int)val)
+                       : "memory");
+               return;
+       }
+#if (CAA_BITS_PER_LONG == 64)
+       case 8:
+       {
+               __asm__ __volatile__(
+               "lock; andq %1, %0"
+                       : "=m"(*__hp(addr))
+                       : "er" ((unsigned long)val)
+                       : "memory");
+               return;
+       }
+#endif
+       }
+       /* generate an illegal instruction. Cannot catch this with linker tricks
+        * when optimizations are disabled. */
+       __asm__ __volatile__("ud2");
+       return;
+}
+
+#define _uatomic_and(addr, v)                                             \
+       (__uatomic_and((addr), (unsigned long)(v), sizeof(*(addr))))
+
+/* uatomic_or */
+
+static inline __attribute__((always_inline))
+void __uatomic_or(void *addr, unsigned long val, int len)
+{
+       switch (len) {
+       case 1:
+       {
+               __asm__ __volatile__(
+               "lock; orb %1, %0"
+                       : "=m"(*__hp(addr))
+                       : "iq" ((unsigned char)val)
+                       : "memory");
+               return;
+       }
+       case 2:
+       {
+               __asm__ __volatile__(
+               "lock; orw %1, %0"
+                       : "=m"(*__hp(addr))
+                       : "ir" ((unsigned short)val)
+                       : "memory");
+               return;
+       }
+       case 4:
+       {
+               __asm__ __volatile__(
+               "lock; orl %1, %0"
+                       : "=m"(*__hp(addr))
+                       : "ir" ((unsigned int)val)
+                       : "memory");
+               return;
+       }
+#if (CAA_BITS_PER_LONG == 64)
+       case 8:
+       {
+               __asm__ __volatile__(
+               "lock; orq %1, %0"
+                       : "=m"(*__hp(addr))
+                       : "er" ((unsigned long)val)
+                       : "memory");
+               return;
+       }
+#endif
+       }
+       /* generate an illegal instruction. Cannot catch this with linker tricks
+        * when optimizations are disabled. */
+       __asm__ __volatile__("ud2");
+       return;
+}
+
+#define _uatomic_or(addr, v)                                              \
+       (__uatomic_or((addr), (unsigned long)(v), sizeof(*(addr))))
+
 /* uatomic_add */
 
 static inline __attribute__((always_inline))
@@ -276,7 +372,7 @@ void __uatomic_add(void *addr, unsigned long val, int len)
                        : "memory");
                return;
        }
-#if (BITS_PER_LONG == 64)
+#if (CAA_BITS_PER_LONG == 64)
        case 8:
        {
                __asm__ __volatile__(
@@ -331,7 +427,7 @@ void __uatomic_inc(void *addr, int len)
                        : "memory");
                return;
        }
-#if (BITS_PER_LONG == 64)
+#if (CAA_BITS_PER_LONG == 64)
        case 8:
        {
                __asm__ __volatile__(
@@ -384,7 +480,7 @@ void __uatomic_dec(void *addr, int len)
                        : "memory");
                return;
        }
-#if (BITS_PER_LONG == 64)
+#if (CAA_BITS_PER_LONG == 64)
        case 8:
        {
                __asm__ __volatile__(
@@ -404,7 +500,7 @@ void __uatomic_dec(void *addr, int len)
 
 #define _uatomic_dec(addr)     (__uatomic_dec((addr), sizeof(*(addr))))
 
-#if ((BITS_PER_LONG != 64) && defined(CONFIG_RCU_COMPAT_ARCH))
+#if ((CAA_BITS_PER_LONG != 64) && defined(CONFIG_RCU_COMPAT_ARCH))
 extern int __rcu_cas_avail;
 extern int __rcu_cas_init(void);
 
@@ -440,8 +536,22 @@ extern unsigned long _compat_uatomic_cmpxchg(void *addr, unsigned long old,
                                                (unsigned long)(_new),         \
                                                sizeof(*(addr))))
 
-extern unsigned long _compat_uatomic_xchg(void *addr,
-                                         unsigned long _new, int len);
+extern unsigned long _compat_uatomic_and(void *addr,
+                                        unsigned long _new, int len);
+#define compat_uatomic_and(addr, v)                                   \
+       ((__typeof__(*(addr))) _compat_uatomic_and((addr),             \
+                                                  (unsigned long)(v), \
+                                                  sizeof(*(addr))))
+
+extern unsigned long _compat_uatomic_or(void *addr,
+                                       unsigned long _new, int len);
+#define compat_uatomic_or(addr, v)                                    \
+       ((__typeof__(*(addr))) _compat_uatomic_or((addr),              \
+                                                 (unsigned long)(v),  \
+                                                 sizeof(*(addr))))
+
+extern unsigned long _compat_uatomic_add_return(void *addr,
+                                               unsigned long _new, int len);
 #define compat_uatomic_add_return(addr, v)                                    \
        ((__typeof__(*(addr))) _compat_uatomic_add_return((addr),              \
                                                (unsigned long)(v),            \
@@ -466,6 +576,10 @@ extern unsigned long _compat_uatomic_xchg(void *addr,
                UATOMIC_COMPAT(cmpxchg(addr, old, _new))
 #define uatomic_xchg(addr, v)                  \
                UATOMIC_COMPAT(xchg(addr, v))
+#define uatomic_and(addr, v)           \
+               UATOMIC_COMPAT(and(addr, v))
+#define uatomic_or(addr, v)            \
+               UATOMIC_COMPAT(or(addr, v))
 #define uatomic_add_return(addr, v)            \
                UATOMIC_COMPAT(add_return(addr, v))
 
This page took 0.026482 seconds and 4 git commands to generate.