diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-01-25 21:08:33 +0100 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-01-25 21:08:33 +0100 |
commit | 6478d8800b75253b2a934ddcb734e13ade023ad0 (patch) | |
tree | df4017269b8755735578445c0a8a9e8b3b2615e9 /include/linux | |
parent | 58b8a73ab8becfcaea84abc2a06038281efa4c8a (diff) | |
download | linux-3.10-6478d8800b75253b2a934ddcb734e13ade023ad0.tar.gz linux-3.10-6478d8800b75253b2a934ddcb734e13ade023ad0.tar.bz2 linux-3.10-6478d8800b75253b2a934ddcb734e13ade023ad0.zip |
sched: remove the !PREEMPT_BKL code
remove the !PREEMPT_BKL code.
this removes 160 lines of legacy code.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/linux')
-rw-r--r-- | include/linux/hardirq.h | 6 | ||||
-rw-r--r-- | include/linux/smp_lock.h | 14 |
2 files changed, 2 insertions, 18 deletions
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h index 8d302298a16..2961ec78804 100644 --- a/include/linux/hardirq.h +++ b/include/linux/hardirq.h @@ -72,11 +72,7 @@ #define in_softirq() (softirq_count()) #define in_interrupt() (irq_count()) -#if defined(CONFIG_PREEMPT) && !defined(CONFIG_PREEMPT_BKL) -# define in_atomic() ((preempt_count() & ~PREEMPT_ACTIVE) != kernel_locked()) -#else -# define in_atomic() ((preempt_count() & ~PREEMPT_ACTIVE) != 0) -#endif +#define in_atomic() ((preempt_count() & ~PREEMPT_ACTIVE) != 0) #ifdef CONFIG_PREEMPT # define PREEMPT_CHECK_OFFSET 1 diff --git a/include/linux/smp_lock.h b/include/linux/smp_lock.h index 58962c51dee..aab3a4cff4e 100644 --- a/include/linux/smp_lock.h +++ b/include/linux/smp_lock.h @@ -17,22 +17,10 @@ extern void __lockfunc __release_kernel_lock(void); __release_kernel_lock(); \ } while (0) -/* - * Non-SMP kernels will never block on the kernel lock, - * so we are better off returning a constant zero from - * reacquire_kernel_lock() so that the compiler can see - * it at compile-time. - */ -#if defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT_BKL) -# define return_value_on_smp return -#else -# define return_value_on_smp -#endif - static inline int reacquire_kernel_lock(struct task_struct *task) { if (unlikely(task->lock_depth >= 0)) - return_value_on_smp __reacquire_kernel_lock(); + return __reacquire_kernel_lock(); return 0; } |