summaryrefslogtreecommitdiff
path: root/include/asm-x86/spinlock.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-x86/spinlock.h')
-rw-r--r--include/asm-x86/spinlock.h79
1 files changed, 37 insertions, 42 deletions
diff --git a/include/asm-x86/spinlock.h b/include/asm-x86/spinlock.h
index e39c790dbfd..157ff7fab97 100644
--- a/include/asm-x86/spinlock.h
+++ b/include/asm-x86/spinlock.h
@@ -1,5 +1,5 @@
-#ifndef _X86_SPINLOCK_H_
-#define _X86_SPINLOCK_H_
+#ifndef ASM_X86__SPINLOCK_H
+#define ASM_X86__SPINLOCK_H
#include <asm/atomic.h>
#include <asm/rwlock.h>
@@ -21,8 +21,10 @@
#ifdef CONFIG_X86_32
# define LOCK_PTR_REG "a"
+# define REG_PTR_MODE "k"
#else
# define LOCK_PTR_REG "D"
+# define REG_PTR_MODE "q"
#endif
#if defined(CONFIG_X86_32) && \
@@ -54,19 +56,7 @@
* much between them in performance though, especially as locks are out of line.
*/
#if (NR_CPUS < 256)
-static inline int __ticket_spin_is_locked(raw_spinlock_t *lock)
-{
- int tmp = ACCESS_ONCE(lock->slock);
-
- return (((tmp >> 8) & 0xff) != (tmp & 0xff));
-}
-
-static inline int __ticket_spin_is_contended(raw_spinlock_t *lock)
-{
- int tmp = ACCESS_ONCE(lock->slock);
-
- return (((tmp >> 8) - tmp) & 0xff) > 1;
-}
+#define TICKET_SHIFT 8
static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
{
@@ -89,19 +79,17 @@ static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
{
- int tmp;
- short new;
+ int tmp, new;
- asm volatile("movw %2,%w0\n\t"
+ asm volatile("movzwl %2, %0\n\t"
"cmpb %h0,%b0\n\t"
+ "leal 0x100(%" REG_PTR_MODE "0), %1\n\t"
"jne 1f\n\t"
- "movw %w0,%w1\n\t"
- "incb %h1\n\t"
- "lock ; cmpxchgw %w1,%2\n\t"
+ LOCK_PREFIX "cmpxchgw %w1,%2\n\t"
"1:"
"sete %b1\n\t"
"movzbl %b1,%0\n\t"
- : "=&a" (tmp), "=Q" (new), "+m" (lock->slock)
+ : "=&a" (tmp), "=&q" (new), "+m" (lock->slock)
:
: "memory", "cc");
@@ -116,26 +104,14 @@ static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
: "memory", "cc");
}
#else
-static inline int __ticket_spin_is_locked(raw_spinlock_t *lock)
-{
- int tmp = ACCESS_ONCE(lock->slock);
-
- return (((tmp >> 16) & 0xffff) != (tmp & 0xffff));
-}
-
-static inline int __ticket_spin_is_contended(raw_spinlock_t *lock)
-{
- int tmp = ACCESS_ONCE(lock->slock);
-
- return (((tmp >> 16) - tmp) & 0xffff) > 1;
-}
+#define TICKET_SHIFT 16
static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
{
int inc = 0x00010000;
int tmp;
- asm volatile("lock ; xaddl %0, %1\n"
+ asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
"movzwl %w0, %2\n\t"
"shrl $16, %0\n\t"
"1:\t"
@@ -146,7 +122,7 @@ static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
/* don't need lfence here, because loads are in-order */
"jmp 1b\n"
"2:"
- : "+Q" (inc), "+m" (lock->slock), "=r" (tmp)
+ : "+r" (inc), "+m" (lock->slock), "=&r" (tmp)
:
: "memory", "cc");
}
@@ -160,13 +136,13 @@ static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
"movl %0,%1\n\t"
"roll $16, %0\n\t"
"cmpl %0,%1\n\t"
+ "leal 0x00010000(%" REG_PTR_MODE "0), %1\n\t"
"jne 1f\n\t"
- "addl $0x00010000, %1\n\t"
- "lock ; cmpxchgl %1,%2\n\t"
+ LOCK_PREFIX "cmpxchgl %1,%2\n\t"
"1:"
"sete %b1\n\t"
"movzbl %b1,%0\n\t"
- : "=&a" (tmp), "=r" (new), "+m" (lock->slock)
+ : "=&a" (tmp), "=&q" (new), "+m" (lock->slock)
:
: "memory", "cc");
@@ -182,7 +158,19 @@ static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
}
#endif
-#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
+static inline int __ticket_spin_is_locked(raw_spinlock_t *lock)
+{
+ int tmp = ACCESS_ONCE(lock->slock);
+
+ return !!(((tmp >> TICKET_SHIFT) ^ tmp) & ((1 << TICKET_SHIFT) - 1));
+}
+
+static inline int __ticket_spin_is_contended(raw_spinlock_t *lock)
+{
+ int tmp = ACCESS_ONCE(lock->slock);
+
+ return (((tmp >> TICKET_SHIFT) - tmp) & ((1 << TICKET_SHIFT) - 1)) > 1;
+}
#ifdef CONFIG_PARAVIRT
/*
@@ -272,6 +260,13 @@ static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock)
{
__ticket_spin_unlock(lock);
}
+
+static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock,
+ unsigned long flags)
+{
+ __raw_spin_lock(lock);
+}
+
#endif /* CONFIG_PARAVIRT */
static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
@@ -366,4 +361,4 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw)
#define _raw_read_relax(lock) cpu_relax()
#define _raw_write_relax(lock) cpu_relax()
-#endif
+#endif /* ASM_X86__SPINLOCK_H */