summaryrefslogtreecommitdiff
path: root/patches.tizen/0350-arch-Make-__mutex_fastpath_lock_retval-return-whethe.patch
diff options
context:
space:
mode:
Diffstat (limited to 'patches.tizen/0350-arch-Make-__mutex_fastpath_lock_retval-return-whethe.patch')
-rw-r--r--patches.tizen/0350-arch-Make-__mutex_fastpath_lock_retval-return-whethe.patch315
1 files changed, 315 insertions, 0 deletions
diff --git a/patches.tizen/0350-arch-Make-__mutex_fastpath_lock_retval-return-whethe.patch b/patches.tizen/0350-arch-Make-__mutex_fastpath_lock_retval-return-whethe.patch
new file mode 100644
index 00000000000..4d46e625ec9
--- /dev/null
+++ b/patches.tizen/0350-arch-Make-__mutex_fastpath_lock_retval-return-whethe.patch
@@ -0,0 +1,315 @@
+From 89281e7aafd408556ced76de0e2b3fd5c5d78dd7 Mon Sep 17 00:00:00 2001
+From: Maarten Lankhorst <maarten.lankhorst@canonical.com>
+Date: Thu, 20 Jun 2013 13:31:05 +0200
+Subject: [PATCH 0350/1302] arch: Make __mutex_fastpath_lock_retval return
+ whether fastpath succeeded or not
+
+This will allow me to call functions that have multiple
+arguments if fastpath fails. This is required to support ticket
+mutexes, because they need to be able to pass an extra argument
+to the fail function.
+
+Originally I duplicated the functions, by adding
+__mutex_fastpath_lock_retval_arg. This ended up being just a
+duplication of the existing function, so a way to test if
+fastpath was called ended up being better.
+
+This also cleaned up the reservation mutex patch some by being
+able to call an atomic_set instead of atomic_xchg, and making it
+easier to detect if the wrong unlock function was previously
+used.
+
+Signed-off-by: Maarten Lankhorst <maarten.lankhorst@canonical.com>
+Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Cc: dri-devel@lists.freedesktop.org
+Cc: linaro-mm-sig@lists.linaro.org
+Cc: robclark@gmail.com
+Cc: rostedt@goodmis.org
+Cc: daniel@ffwll.ch
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Link: http://lkml.kernel.org/r/20130620113105.4001.83929.stgit@patser
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: MyungJoo Ham <myungjoo.ham@samsung.com>
+---
+ arch/ia64/include/asm/mutex.h | 10 ++++------
+ arch/powerpc/include/asm/mutex.h | 10 ++++------
+ arch/sh/include/asm/mutex-llsc.h | 4 ++--
+ arch/x86/include/asm/mutex_32.h | 11 ++++-------
+ arch/x86/include/asm/mutex_64.h | 11 ++++-------
+ include/asm-generic/mutex-dec.h | 10 ++++------
+ include/asm-generic/mutex-null.h | 2 +-
+ include/asm-generic/mutex-xchg.h | 10 ++++------
+ kernel/mutex.c | 32 ++++++++++++++------------------
+ 9 files changed, 41 insertions(+), 59 deletions(-)
+
+diff --git a/arch/ia64/include/asm/mutex.h b/arch/ia64/include/asm/mutex.h
+index bed73a6..f41e66d 100644
+--- a/arch/ia64/include/asm/mutex.h
++++ b/arch/ia64/include/asm/mutex.h
+@@ -29,17 +29,15 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
+ * __mutex_fastpath_lock_retval - try to take the lock by moving the count
+ * from 1 to a 0 value
+ * @count: pointer of type atomic_t
+- * @fail_fn: function to call if the original value was not 1
+ *
+- * Change the count from 1 to a value lower than 1, and call <fail_fn> if
+- * it wasn't 1 originally. This function returns 0 if the fastpath succeeds,
+- * or anything the slow path function returns.
++ * Change the count from 1 to a value lower than 1. This function returns 0
++ * if the fastpath succeeds, or -1 otherwise.
+ */
+ static inline int
+-__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
++__mutex_fastpath_lock_retval(atomic_t *count)
+ {
+ if (unlikely(ia64_fetchadd4_acq(count, -1) != 1))
+- return fail_fn(count);
++ return -1;
+ return 0;
+ }
+
+diff --git a/arch/powerpc/include/asm/mutex.h b/arch/powerpc/include/asm/mutex.h
+index 5399f7e..127ab23 100644
+--- a/arch/powerpc/include/asm/mutex.h
++++ b/arch/powerpc/include/asm/mutex.h
+@@ -82,17 +82,15 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
+ * __mutex_fastpath_lock_retval - try to take the lock by moving the count
+ * from 1 to a 0 value
+ * @count: pointer of type atomic_t
+- * @fail_fn: function to call if the original value was not 1
+ *
+- * Change the count from 1 to a value lower than 1, and call <fail_fn> if
+- * it wasn't 1 originally. This function returns 0 if the fastpath succeeds,
+- * or anything the slow path function returns.
++ * Change the count from 1 to a value lower than 1. This function returns 0
++ * if the fastpath succeeds, or -1 otherwise.
+ */
+ static inline int
+-__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
++__mutex_fastpath_lock_retval(atomic_t *count)
+ {
+ if (unlikely(__mutex_dec_return_lock(count) < 0))
+- return fail_fn(count);
++ return -1;
+ return 0;
+ }
+
+diff --git a/arch/sh/include/asm/mutex-llsc.h b/arch/sh/include/asm/mutex-llsc.h
+index 090358a..dad29b6 100644
+--- a/arch/sh/include/asm/mutex-llsc.h
++++ b/arch/sh/include/asm/mutex-llsc.h
+@@ -37,7 +37,7 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
+ }
+
+ static inline int
+-__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
++__mutex_fastpath_lock_retval(atomic_t *count)
+ {
+ int __done, __res;
+
+@@ -51,7 +51,7 @@ __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
+ : "t");
+
+ if (unlikely(!__done || __res != 0))
+- __res = fail_fn(count);
++ __res = -1;
+
+ return __res;
+ }
+diff --git a/arch/x86/include/asm/mutex_32.h b/arch/x86/include/asm/mutex_32.h
+index 03f90c8..0208c3c 100644
+--- a/arch/x86/include/asm/mutex_32.h
++++ b/arch/x86/include/asm/mutex_32.h
+@@ -42,17 +42,14 @@ do { \
+ * __mutex_fastpath_lock_retval - try to take the lock by moving the count
+ * from 1 to a 0 value
+ * @count: pointer of type atomic_t
+- * @fail_fn: function to call if the original value was not 1
+ *
+- * Change the count from 1 to a value lower than 1, and call <fail_fn> if it
+- * wasn't 1 originally. This function returns 0 if the fastpath succeeds,
+- * or anything the slow path function returns
++ * Change the count from 1 to a value lower than 1. This function returns 0
++ * if the fastpath succeeds, or -1 otherwise.
+ */
+-static inline int __mutex_fastpath_lock_retval(atomic_t *count,
+- int (*fail_fn)(atomic_t *))
++static inline int __mutex_fastpath_lock_retval(atomic_t *count)
+ {
+ if (unlikely(atomic_dec_return(count) < 0))
+- return fail_fn(count);
++ return -1;
+ else
+ return 0;
+ }
+diff --git a/arch/x86/include/asm/mutex_64.h b/arch/x86/include/asm/mutex_64.h
+index 68a87b0..2c543ff 100644
+--- a/arch/x86/include/asm/mutex_64.h
++++ b/arch/x86/include/asm/mutex_64.h
+@@ -37,17 +37,14 @@ do { \
+ * __mutex_fastpath_lock_retval - try to take the lock by moving the count
+ * from 1 to a 0 value
+ * @count: pointer of type atomic_t
+- * @fail_fn: function to call if the original value was not 1
+ *
+- * Change the count from 1 to a value lower than 1, and call <fail_fn> if
+- * it wasn't 1 originally. This function returns 0 if the fastpath succeeds,
+- * or anything the slow path function returns
++ * Change the count from 1 to a value lower than 1. This function returns 0
++ * if the fastpath succeeds, or -1 otherwise.
+ */
+-static inline int __mutex_fastpath_lock_retval(atomic_t *count,
+- int (*fail_fn)(atomic_t *))
++static inline int __mutex_fastpath_lock_retval(atomic_t *count)
+ {
+ if (unlikely(atomic_dec_return(count) < 0))
+- return fail_fn(count);
++ return -1;
+ else
+ return 0;
+ }
+diff --git a/include/asm-generic/mutex-dec.h b/include/asm-generic/mutex-dec.h
+index f104af7..d4f9fb4 100644
+--- a/include/asm-generic/mutex-dec.h
++++ b/include/asm-generic/mutex-dec.h
+@@ -28,17 +28,15 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
+ * __mutex_fastpath_lock_retval - try to take the lock by moving the count
+ * from 1 to a 0 value
+ * @count: pointer of type atomic_t
+- * @fail_fn: function to call if the original value was not 1
+ *
+- * Change the count from 1 to a value lower than 1, and call <fail_fn> if
+- * it wasn't 1 originally. This function returns 0 if the fastpath succeeds,
+- * or anything the slow path function returns.
++ * Change the count from 1 to a value lower than 1. This function returns 0
++ * if the fastpath succeeds, or -1 otherwise.
+ */
+ static inline int
+-__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
++__mutex_fastpath_lock_retval(atomic_t *count)
+ {
+ if (unlikely(atomic_dec_return(count) < 0))
+- return fail_fn(count);
++ return -1;
+ return 0;
+ }
+
+diff --git a/include/asm-generic/mutex-null.h b/include/asm-generic/mutex-null.h
+index e1bbbc7..61069ed 100644
+--- a/include/asm-generic/mutex-null.h
++++ b/include/asm-generic/mutex-null.h
+@@ -11,7 +11,7 @@
+ #define _ASM_GENERIC_MUTEX_NULL_H
+
+ #define __mutex_fastpath_lock(count, fail_fn) fail_fn(count)
+-#define __mutex_fastpath_lock_retval(count, fail_fn) fail_fn(count)
++#define __mutex_fastpath_lock_retval(count) (-1)
+ #define __mutex_fastpath_unlock(count, fail_fn) fail_fn(count)
+ #define __mutex_fastpath_trylock(count, fail_fn) fail_fn(count)
+ #define __mutex_slowpath_needs_to_unlock() 1
+diff --git a/include/asm-generic/mutex-xchg.h b/include/asm-generic/mutex-xchg.h
+index c04e0db..f169ec0 100644
+--- a/include/asm-generic/mutex-xchg.h
++++ b/include/asm-generic/mutex-xchg.h
+@@ -39,18 +39,16 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
+ * __mutex_fastpath_lock_retval - try to take the lock by moving the count
+ * from 1 to a 0 value
+ * @count: pointer of type atomic_t
+- * @fail_fn: function to call if the original value was not 1
+ *
+- * Change the count from 1 to a value lower than 1, and call <fail_fn> if it
+- * wasn't 1 originally. This function returns 0 if the fastpath succeeds,
+- * or anything the slow path function returns
++ * Change the count from 1 to a value lower than 1. This function returns 0
++ * if the fastpath succeeds, or -1 otherwise.
+ */
+ static inline int
+-__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
++__mutex_fastpath_lock_retval(atomic_t *count)
+ {
+ if (unlikely(atomic_xchg(count, 0) != 1))
+ if (likely(atomic_xchg(count, -1) != 1))
+- return fail_fn(count);
++ return -1;
+ return 0;
+ }
+
+diff --git a/kernel/mutex.c b/kernel/mutex.c
+index ad53a66..42f8dda 100644
+--- a/kernel/mutex.c
++++ b/kernel/mutex.c
+@@ -494,10 +494,10 @@ __mutex_unlock_slowpath(atomic_t *lock_count)
+ * mutex_lock_interruptible() and mutex_trylock().
+ */
+ static noinline int __sched
+-__mutex_lock_killable_slowpath(atomic_t *lock_count);
++__mutex_lock_killable_slowpath(struct mutex *lock);
+
+ static noinline int __sched
+-__mutex_lock_interruptible_slowpath(atomic_t *lock_count);
++__mutex_lock_interruptible_slowpath(struct mutex *lock);
+
+ /**
+ * mutex_lock_interruptible - acquire the mutex, interruptible
+@@ -515,12 +515,12 @@ int __sched mutex_lock_interruptible(struct mutex *lock)
+ int ret;
+
+ might_sleep();
+- ret = __mutex_fastpath_lock_retval
+- (&lock->count, __mutex_lock_interruptible_slowpath);
+- if (!ret)
++ ret = __mutex_fastpath_lock_retval(&lock->count);
++ if (likely(!ret)) {
+ mutex_set_owner(lock);
+-
+- return ret;
++ return 0;
++ } else
++ return __mutex_lock_interruptible_slowpath(lock);
+ }
+
+ EXPORT_SYMBOL(mutex_lock_interruptible);
+@@ -530,12 +530,12 @@ int __sched mutex_lock_killable(struct mutex *lock)
+ int ret;
+
+ might_sleep();
+- ret = __mutex_fastpath_lock_retval
+- (&lock->count, __mutex_lock_killable_slowpath);
+- if (!ret)
++ ret = __mutex_fastpath_lock_retval(&lock->count);
++ if (likely(!ret)) {
+ mutex_set_owner(lock);
+-
+- return ret;
++ return 0;
++ } else
++ return __mutex_lock_killable_slowpath(lock);
+ }
+ EXPORT_SYMBOL(mutex_lock_killable);
+
+@@ -548,18 +548,14 @@ __mutex_lock_slowpath(atomic_t *lock_count)
+ }
+
+ static noinline int __sched
+-__mutex_lock_killable_slowpath(atomic_t *lock_count)
++__mutex_lock_killable_slowpath(struct mutex *lock)
+ {
+- struct mutex *lock = container_of(lock_count, struct mutex, count);
+-
+ return __mutex_lock_common(lock, TASK_KILLABLE, 0, NULL, _RET_IP_);
+ }
+
+ static noinline int __sched
+-__mutex_lock_interruptible_slowpath(atomic_t *lock_count)
++__mutex_lock_interruptible_slowpath(struct mutex *lock)
+ {
+- struct mutex *lock = container_of(lock_count, struct mutex, count);
+-
+ return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_);
+ }
+ #endif
+--
+1.8.3.2
+