summaryrefslogtreecommitdiff
path: root/include/asm-i386
diff options
context:
space:
mode:
authorJan Beulich <JBeulich@novell.com>2005-10-30 14:59:27 -0800
committerLinus Torvalds <torvalds@g5.osdl.org>2005-10-30 17:37:11 -0800
commit8896fab35e62aa748a5ce62ac773508e51f10be1 (patch)
treebb8f2dce7b5d0aa6ffe68aa6b97920d2ce32e164 /include/asm-i386
parentdacb16b1a034fa7a0b868ee30758119fbfd90bc1 (diff)
downloadlinux-3.10-8896fab35e62aa748a5ce62ac773508e51f10be1.tar.gz
linux-3.10-8896fab35e62aa748a5ce62ac773508e51f10be1.tar.bz2
linux-3.10-8896fab35e62aa748a5ce62ac773508e51f10be1.zip
[PATCH] x86: cmpxchg improvements
This adjusts i386's cmpxchg patterns so that - for word and long cmpxchg-es the compiler can utilize all possible registers - cmpxchg8b gets disabled when the minimum specified hardware architectur doesn't support it (like was already happening for the byte, word, and long ones). Signed-off-by: Jan Beulich <jbeulich@novell.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include/asm-i386')
-rw-r--r--include/asm-i386/system.h33
1 files changed, 30 insertions, 3 deletions
diff --git a/include/asm-i386/system.h b/include/asm-i386/system.h
index acd5c26b69b..97d52ac49e4 100644
--- a/include/asm-i386/system.h
+++ b/include/asm-i386/system.h
@@ -167,6 +167,8 @@ struct __xchg_dummy { unsigned long a[100]; };
#define __xg(x) ((struct __xchg_dummy *)(x))
+#ifdef CONFIG_X86_CMPXCHG64
+
/*
* The semantics of XCHGCMP8B are a bit strange, this is why
* there is a loop and the loading of %%eax and %%edx has to
@@ -221,6 +223,8 @@ static inline void __set_64bit_var (unsigned long long *ptr,
__set_64bit(ptr, (unsigned int)(value), (unsigned int)((value)>>32ULL) ) : \
__set_64bit(ptr, ll_low(value), ll_high(value)) )
+#endif
+
/*
* Note: no "lock" prefix even on SMP: xchg always implies lock anyway
* Note 2: xchg has side effect, so that attribute volatile is necessary,
@@ -259,7 +263,6 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz
#ifdef CONFIG_X86_CMPXCHG
#define __HAVE_ARCH_CMPXCHG 1
-#endif
static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
unsigned long new, int size)
@@ -275,13 +278,13 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
case 2:
__asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
: "=a"(prev)
- : "q"(new), "m"(*__xg(ptr)), "0"(old)
+ : "r"(new), "m"(*__xg(ptr)), "0"(old)
: "memory");
return prev;
case 4:
__asm__ __volatile__(LOCK_PREFIX "cmpxchgl %1,%2"
: "=a"(prev)
- : "q"(new), "m"(*__xg(ptr)), "0"(old)
+ : "r"(new), "m"(*__xg(ptr)), "0"(old)
: "memory");
return prev;
}
@@ -291,6 +294,30 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
#define cmpxchg(ptr,o,n)\
((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
(unsigned long)(n),sizeof(*(ptr))))
+
+#endif
+
+#ifdef CONFIG_X86_CMPXCHG64
+
+static inline unsigned long long __cmpxchg64(volatile void *ptr, unsigned long long old,
+ unsigned long long new)
+{
+ unsigned long long prev;
+ __asm__ __volatile__(LOCK_PREFIX "cmpxchg8b %3"
+ : "=A"(prev)
+ : "b"((unsigned long)new),
+ "c"((unsigned long)(new >> 32)),
+ "m"(*__xg(ptr)),
+ "0"(old)
+ : "memory");
+ return prev;
+}
+
+#define cmpxchg64(ptr,o,n)\
+ ((__typeof__(*(ptr)))__cmpxchg64((ptr),(unsigned long long)(o),\
+ (unsigned long long)(n)))
+
+#endif
#ifdef __KERNEL__
struct alt_instr {