From 916671c08b7808aebec87cc56c85788e665b3c6b Mon Sep 17 00:00:00 2001 From: Mike Galbraith Date: Tue, 22 Nov 2011 15:21:26 +0100 Subject: [PATCH] sched: Set skip_clock_update in yield_task_fair() This is another case where we are on our way to schedule(), so can save a useless clock update and resulting microscopic vruntime update. Signed-off-by: Mike Galbraith Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/1321971686.6855.18.camel@marge.simson.net Signed-off-by: Ingo Molnar --- kernel/sched/core.c | 7 +++++++ kernel/sched/fair.c | 6 ++++++ 2 files changed, 13 insertions(+) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index ca8fd44145ac..db313c33af29 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -4547,6 +4547,13 @@ again: */ if (preempt && rq != p_rq) resched_task(p_rq->curr); + } else { + /* + * We might have set it in task_yield_fair(), but are + * not going to schedule(), so don't want to skip + * the next update. + */ + rq->skip_clock_update = 0; } out: diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 8e534a05e3ed..81ccb811afb4 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -3075,6 +3075,12 @@ static void yield_task_fair(struct rq *rq) * Update run-time statistics of the 'current'. */ update_curr(cfs_rq); + /* + * Tell update_rq_clock() that we've just updated, + * so we don't do microscopic update in schedule() + * and double the fastpath cost. + */ + rq->skip_clock_update = 1; } set_skip_buddy(se); -- 2.39.5