diff options
author | Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com> | 2007-10-15 17:00:12 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2007-10-15 17:00:12 +0200 |
commit | fad095a7b963d9e914e0cdb73e27355c47709441 (patch) | |
tree | afb9056ce0416a48d3f62b1f1e85f3d5627de913 /kernel | |
parent | fb615581c78efee25e4d04f1145e8fa8ec705dc3 (diff) | |
download | kernel-common-fad095a7b963d9e914e0cdb73e27355c47709441.tar.gz kernel-common-fad095a7b963d9e914e0cdb73e27355c47709441.tar.bz2 kernel-common-fad095a7b963d9e914e0cdb73e27355c47709441.zip |
sched: group scheduler, fix bloat
Recent fix to check_preempt_wakeup() to check for preemption at higher
levels caused a size bloat for !CONFIG_FAIR_GROUP_SCHED.
Fix the problem.
42277 10598 320 53195 cfcb kernel/sched.o-before_this_patch
42216 10598 320 53134 cf8e kernel/sched.o-after_this_patch
Signed-off-by: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched_fair.c | 43 |
1 files changed, 25 insertions, 18 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index c44a295eee0e..57e7f3672fd7 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -652,15 +652,21 @@ static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu) #define for_each_leaf_cfs_rq(rq, cfs_rq) \ list_for_each_entry(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list) -/* Do the two (enqueued) tasks belong to the same group ? */ -static inline int is_same_group(struct task_struct *curr, struct task_struct *p) +/* Do the two (enqueued) entities belong to the same group ? */ +static inline int +is_same_group(struct sched_entity *se, struct sched_entity *pse) { - if (curr->se.cfs_rq == p->se.cfs_rq) + if (se->cfs_rq == pse->cfs_rq) return 1; return 0; } +static inline struct sched_entity *parent_entity(struct sched_entity *se) +{ + return se->parent; +} + #else /* CONFIG_FAIR_GROUP_SCHED */ #define for_each_sched_entity(se) \ @@ -693,11 +699,17 @@ static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu) #define for_each_leaf_cfs_rq(rq, cfs_rq) \ for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL) -static inline int is_same_group(struct task_struct *curr, struct task_struct *p) +static inline int +is_same_group(struct sched_entity *se, struct sched_entity *pse) { return 1; } +static inline struct sched_entity *parent_entity(struct sched_entity *se) +{ + return NULL; +} + #endif /* CONFIG_FAIR_GROUP_SCHED */ /* @@ -787,8 +799,9 @@ static void yield_task_fair(struct rq *rq) static void check_preempt_wakeup(struct rq *rq, struct task_struct *p) { struct task_struct *curr = rq->curr; - struct cfs_rq *cfs_rq = task_cfs_rq(curr), *pcfs_rq; + struct cfs_rq *cfs_rq = task_cfs_rq(curr); struct sched_entity *se = &curr->se, *pse = &p->se; + s64 delta; if (unlikely(rt_prio(p->prio))) { update_rq_clock(rq); @@ -797,21 +810,15 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p) return; } - for_each_sched_entity(se) { - cfs_rq = cfs_rq_of(se); - pcfs_rq = cfs_rq_of(pse); + while (!is_same_group(se, pse)) { + se = parent_entity(se); + pse = parent_entity(pse); + } - if (cfs_rq == pcfs_rq) { - s64 delta = se->vruntime - pse->vruntime; + delta = se->vruntime - pse->vruntime; - if (delta > (s64)sysctl_sched_wakeup_granularity) - resched_task(curr); - break; - } -#ifdef CONFIG_FAIR_GROUP_SCHED - pse = pse->parent; -#endif - } + if (delta > (s64)sysctl_sched_wakeup_granularity) + resched_task(curr); } static struct task_struct *pick_next_task_fair(struct rq *rq) |