summaryrefslogtreecommitdiff
path: root/include/asm-generic
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2013-08-14 14:55:46 +0200
committerIngo Molnar <mingo@kernel.org>2013-09-25 14:07:52 +0200
commit01028747559ac6c6f642a7bbd2875cc4f66b2feb (patch)
tree3672a4ca3409cae40d5c7338ad9bfa590045dcbc /include/asm-generic
parenta787870924dbd6f321661e06d4ec1c7a408c9ccf (diff)
downloadkernel-common-01028747559ac6c6f642a7bbd2875cc4f66b2feb.tar.gz
kernel-common-01028747559ac6c6f642a7bbd2875cc4f66b2feb.tar.bz2
kernel-common-01028747559ac6c6f642a7bbd2875cc4f66b2feb.zip
sched: Create more preempt_count accessors
We need a few special preempt_count accessors: - task_preempt_count() for when we're interested in the preemption count of another (non-running) task. - init_task_preempt_count() for properly initializing the preemption count. - init_idle_preempt_count() a special case of the above for the idle threads. With these no generic code ever touches thread_info::preempt_count anymore and architectures could choose to remove it. Signed-off-by: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/n/tip-jf5swrio8l78j37d06fzmo4r@git.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'include/asm-generic')
-rw-r--r--include/asm-generic/preempt.h14
1 files changed, 14 insertions, 0 deletions
diff --git a/include/asm-generic/preempt.h b/include/asm-generic/preempt.h
index a1fc6590a743..8100b1ec1715 100644
--- a/include/asm-generic/preempt.h
+++ b/include/asm-generic/preempt.h
@@ -28,6 +28,20 @@ static __always_inline void preempt_count_set(int pc)
}
/*
+ * must be macros to avoid header recursion hell
+ */
+#define task_preempt_count(p) \
+ (task_thread_info(p)->preempt_count & ~PREEMPT_NEED_RESCHED)
+
+#define init_task_preempt_count(p) do { \
+ task_thread_info(p)->preempt_count = PREEMPT_DISABLED; \
+} while (0)
+
+#define init_idle_preempt_count(p, cpu) do { \
+ task_thread_info(p)->preempt_count = PREEMPT_ENABLED; \
+} while (0)
+
+/*
* We fold the NEED_RESCHED bit into the preempt count such that
* preempt_enable() can decrement and test for needing to reschedule with a
* single instruction.