]> git.proxmox.com Git - mirror_ubuntu-kernels.git/commitdiff
kernel/sched/: remove caller signal_pending branch predictions
authorDavidlohr Bueso <dave@stgolabs.net>
Thu, 3 Jan 2019 23:28:48 +0000 (15:28 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 4 Jan 2019 21:13:48 +0000 (13:13 -0800)
This is already done for us internally by the signal machinery.

Link: http://lkml.kernel.org/r/20181116002713.8474-3-dave@stgolabs.net
Signed-off-by: Davidlohr Bueso <dave@stgolabs.net>
Reviewed-by: Andrew Morton <akpm@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
kernel/sched/core.c
kernel/sched/swait.c
kernel/sched/wait.c

index f6692017337032f4cc69f684d1a5039780b9fd34..17a954c9e15377adf4e97a53c8760a6a03797ca0 100644 (file)
@@ -3416,7 +3416,7 @@ static void __sched notrace __schedule(bool preempt)
 
        switch_count = &prev->nivcsw;
        if (!preempt && prev->state) {
-               if (unlikely(signal_pending_state(prev->state, prev))) {
+               if (signal_pending_state(prev->state, prev)) {
                        prev->state = TASK_RUNNING;
                } else {
                        deactivate_task(rq, prev, DEQUEUE_SLEEP | DEQUEUE_NOCLOCK);
index 66b59ac77c2209fd9fccd92c8ae0c660e7428e4d..e83a3f8449f653475f27e2770e9b55f21223cd4c 100644 (file)
@@ -93,7 +93,7 @@ long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait
        long ret = 0;
 
        raw_spin_lock_irqsave(&q->lock, flags);
-       if (unlikely(signal_pending_state(state, current))) {
+       if (signal_pending_state(state, current)) {
                /*
                 * See prepare_to_wait_event(). TL;DR, subsequent swake_up_one()
                 * must not see us.
index 5dd47f1103d18bd769cace68b753088574e02f27..6eb1f8efd221c5fd9b590014e5e51cb0e479356a 100644 (file)
@@ -264,7 +264,7 @@ long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_en
        long ret = 0;
 
        spin_lock_irqsave(&wq_head->lock, flags);
-       if (unlikely(signal_pending_state(state, current))) {
+       if (signal_pending_state(state, current)) {
                /*
                 * Exclusive waiter must not fail if it was selected by wakeup,
                 * it should "consume" the condition we were waiting for.