};
#define __hp(x) ((struct __uatomic_dummy *)(x))
-#define _uatomic_set(addr, v) STORE_SHARED(*(addr), (v))
+#define _uatomic_set(addr, v) CMM_STORE_SHARED(*(addr), (v))
/* cmpxchg */
: "memory");
return result;
}
-#if (BITS_PER_LONG == 64)
+#if (CAA_BITS_PER_LONG == 64)
case 8:
{
unsigned long result = old;
: "memory");
return result;
}
-#if (BITS_PER_LONG == 64)
+#if (CAA_BITS_PER_LONG == 64)
case 8:
{
unsigned long result;
: "memory");
return result + (unsigned int)val;
}
-#if (BITS_PER_LONG == 64)
+#if (CAA_BITS_PER_LONG == 64)
case 8:
{
unsigned long result = val;
(unsigned long)(v), \
sizeof(*(addr))))
+/* uatomic_and */
+
+static inline __attribute__((always_inline))
+void __uatomic_and(void *addr, unsigned long val, int len)
+{
+ switch (len) {
+ case 1:
+ {
+ __asm__ __volatile__(
+ "lock; andb %1, %0"
+ : "=m"(*__hp(addr))
+ : "iq" ((unsigned char)val)
+ : "memory");
+ return;
+ }
+ case 2:
+ {
+ __asm__ __volatile__(
+ "lock; andw %1, %0"
+ : "=m"(*__hp(addr))
+ : "ir" ((unsigned short)val)
+ : "memory");
+ return;
+ }
+ case 4:
+ {
+ __asm__ __volatile__(
+ "lock; andl %1, %0"
+ : "=m"(*__hp(addr))
+ : "ir" ((unsigned int)val)
+ : "memory");
+ return;
+ }
+#if (CAA_BITS_PER_LONG == 64)
+ case 8:
+ {
+ __asm__ __volatile__(
+ "lock; andq %1, %0"
+ : "=m"(*__hp(addr))
+ : "er" ((unsigned long)val)
+ : "memory");
+ return;
+ }
+#endif
+ }
+ /* generate an illegal instruction. Cannot catch this with linker tricks
+ * when optimizations are disabled. */
+ __asm__ __volatile__("ud2");
+ return;
+}
+
+#define _uatomic_and(addr, v) \
+ (__uatomic_and((addr), (unsigned long)(v), sizeof(*(addr))))
+
+/* uatomic_or */
+
+static inline __attribute__((always_inline))
+void __uatomic_or(void *addr, unsigned long val, int len)
+{
+ switch (len) {
+ case 1:
+ {
+ __asm__ __volatile__(
+ "lock; orb %1, %0"
+ : "=m"(*__hp(addr))
+ : "iq" ((unsigned char)val)
+ : "memory");
+ return;
+ }
+ case 2:
+ {
+ __asm__ __volatile__(
+ "lock; orw %1, %0"
+ : "=m"(*__hp(addr))
+ : "ir" ((unsigned short)val)
+ : "memory");
+ return;
+ }
+ case 4:
+ {
+ __asm__ __volatile__(
+ "lock; orl %1, %0"
+ : "=m"(*__hp(addr))
+ : "ir" ((unsigned int)val)
+ : "memory");
+ return;
+ }
+#if (CAA_BITS_PER_LONG == 64)
+ case 8:
+ {
+ __asm__ __volatile__(
+ "lock; orq %1, %0"
+ : "=m"(*__hp(addr))
+ : "er" ((unsigned long)val)
+ : "memory");
+ return;
+ }
+#endif
+ }
+ /* generate an illegal instruction. Cannot catch this with linker tricks
+ * when optimizations are disabled. */
+ __asm__ __volatile__("ud2");
+ return;
+}
+
+#define _uatomic_or(addr, v) \
+ (__uatomic_or((addr), (unsigned long)(v), sizeof(*(addr))))
+
/* uatomic_add */
static inline __attribute__((always_inline))
: "memory");
return;
}
-#if (BITS_PER_LONG == 64)
+#if (CAA_BITS_PER_LONG == 64)
case 8:
{
__asm__ __volatile__(
: "memory");
return;
}
-#if (BITS_PER_LONG == 64)
+#if (CAA_BITS_PER_LONG == 64)
case 8:
{
__asm__ __volatile__(
: "memory");
return;
}
-#if (BITS_PER_LONG == 64)
+#if (CAA_BITS_PER_LONG == 64)
case 8:
{
__asm__ __volatile__(
#define _uatomic_dec(addr) (__uatomic_dec((addr), sizeof(*(addr))))
-#if ((BITS_PER_LONG != 64) && defined(CONFIG_RCU_COMPAT_ARCH))
+#if ((CAA_BITS_PER_LONG != 64) && defined(CONFIG_RCU_COMPAT_ARCH))
extern int __rcu_cas_avail;
extern int __rcu_cas_init(void);
(unsigned long)(_new), \
sizeof(*(addr))))
-extern unsigned long _compat_uatomic_xchg(void *addr,
- unsigned long _new, int len);
+extern unsigned long _compat_uatomic_and(void *addr,
+ unsigned long _new, int len);
+#define compat_uatomic_and(addr, v) \
+ ((__typeof__(*(addr))) _compat_uatomic_and((addr), \
+ (unsigned long)(v), \
+ sizeof(*(addr))))
+
+extern unsigned long _compat_uatomic_or(void *addr,
+ unsigned long _new, int len);
+#define compat_uatomic_or(addr, v) \
+ ((__typeof__(*(addr))) _compat_uatomic_or((addr), \
+ (unsigned long)(v), \
+ sizeof(*(addr))))
+
+extern unsigned long _compat_uatomic_add_return(void *addr,
+ unsigned long _new, int len);
#define compat_uatomic_add_return(addr, v) \
((__typeof__(*(addr))) _compat_uatomic_add_return((addr), \
(unsigned long)(v), \
UATOMIC_COMPAT(cmpxchg(addr, old, _new))
#define uatomic_xchg(addr, v) \
UATOMIC_COMPAT(xchg(addr, v))
+#define uatomic_and(addr, v) \
+ UATOMIC_COMPAT(and(addr, v))
+#define uatomic_or(addr, v) \
+ UATOMIC_COMPAT(or(addr, v))
#define uatomic_add_return(addr, v) \
UATOMIC_COMPAT(add_return(addr, v))