]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/commitdiff
sched/core: Avoid obvious double update_rq_clock()
authorPeter Zijlstra <peterz@infradead.org>
Tue, 21 Feb 2017 13:47:02 +0000 (14:47 +0100)
committerIngo Molnar <mingo@kernel.org>
Thu, 16 Mar 2017 08:46:25 +0000 (09:46 +0100)
Add DEQUEUE_NOCLOCK to all places where we just did an
update_rq_clock() already.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
kernel/sched/core.c

index 179a6c928bf1af6e7f5d430d38cce1e1412c84fe..c6be770d6e68d944d872708b1aa656a124a5c42e 100644 (file)
@@ -1062,7 +1062,7 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
                 * holding rq->lock.
                 */
                lockdep_assert_held(&rq->lock);
-               dequeue_task(rq, p, DEQUEUE_SAVE);
+               dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK);
        }
        if (running)
                put_prev_task(rq, p);
@@ -2555,7 +2555,7 @@ void wake_up_new_task(struct task_struct *p)
        update_rq_clock(rq);
        post_init_entity_util_avg(&p->se);
 
-       activate_task(rq, p, 0);
+       activate_task(rq, p, ENQUEUE_NOCLOCK);
        p->on_rq = TASK_ON_RQ_QUEUED;
        trace_sched_wakeup_new(p);
        check_preempt_curr(rq, p, WF_FORK);
@@ -3683,7 +3683,8 @@ EXPORT_SYMBOL(default_wake_function);
  */
 void rt_mutex_setprio(struct task_struct *p, int prio)
 {
-       int oldprio, queued, running, queue_flag = DEQUEUE_SAVE | DEQUEUE_MOVE;
+       int oldprio, queued, running, queue_flag =
+               DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
        const struct sched_class *prev_class;
        struct rq_flags rf;
        struct rq *rq;
@@ -3804,7 +3805,7 @@ void set_user_nice(struct task_struct *p, long nice)
        queued = task_on_rq_queued(p);
        running = task_current(rq, p);
        if (queued)
-               dequeue_task(rq, p, DEQUEUE_SAVE);
+               dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK);
        if (running)
                put_prev_task(rq, p);
 
@@ -4125,7 +4126,7 @@ static int __sched_setscheduler(struct task_struct *p,
        const struct sched_class *prev_class;
        struct rq_flags rf;
        int reset_on_fork;
-       int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE;
+       int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
        struct rq *rq;
 
        /* May grab non-irq protected spin_locks: */
@@ -6413,7 +6414,8 @@ static void sched_change_group(struct task_struct *tsk, int type)
  */
 void sched_move_task(struct task_struct *tsk)
 {
-       int queued, running;
+       int queued, running, queue_flags =
+               DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
        struct rq_flags rf;
        struct rq *rq;
 
@@ -6424,14 +6426,14 @@ void sched_move_task(struct task_struct *tsk)
        queued = task_on_rq_queued(tsk);
 
        if (queued)
-               dequeue_task(rq, tsk, DEQUEUE_SAVE | DEQUEUE_MOVE);
+               dequeue_task(rq, tsk, queue_flags);
        if (running)
                put_prev_task(rq, tsk);
 
        sched_change_group(tsk, TASK_MOVE_GROUP);
 
        if (queued)
-               enqueue_task(rq, tsk, ENQUEUE_RESTORE | ENQUEUE_MOVE | ENQUEUE_NOCLOCK);
+               enqueue_task(rq, tsk, queue_flags);
        if (running)
                set_curr_task(rq, tsk);