]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blobdiff - kernel/sched/core.c
sched/fair: Fix fault in reweight_entity
[mirror_ubuntu-jammy-kernel.git] / kernel / sched / core.c
index 4230fc1d40b0bfd108ac03269ec616a0fe7cb0b6..4959ed985c301c7edad4850d0b64c27aa4b3a44d 100644 (file)
@@ -1199,8 +1199,9 @@ int tg_nop(struct task_group *tg, void *data)
 }
 #endif
 
-static void set_load_weight(struct task_struct *p, bool update_load)
+static void set_load_weight(struct task_struct *p)
 {
+       bool update_load = !(READ_ONCE(p->__state) & TASK_NEW);
        int prio = p->static_prio - MAX_RT_PRIO;
        struct load_weight *load = &p->se.load;
 
@@ -4358,7 +4359,7 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
                        p->static_prio = NICE_TO_PRIO(0);
 
                p->prio = p->normal_prio = p->static_prio;
-               set_load_weight(p, false);
+               set_load_weight(p);
 
                /*
                 * We don't need the reset flag anymore after the fork. It has
@@ -6902,7 +6903,7 @@ void set_user_nice(struct task_struct *p, long nice)
                put_prev_task(rq, p);
 
        p->static_prio = NICE_TO_PRIO(nice);
-       set_load_weight(p, true);
+       set_load_weight(p);
        old_prio = p->prio;
        p->prio = effective_prio(p);
 
@@ -7194,7 +7195,7 @@ static void __setscheduler_params(struct task_struct *p,
         */
        p->rt_priority = attr->sched_priority;
        p->normal_prio = normal_prio(p);
-       set_load_weight(p, true);
+       set_load_weight(p);
 }
 
 /*
@@ -9432,7 +9433,7 @@ void __init sched_init(void)
 #endif
        }
 
-       set_load_weight(&init_task, false);
+       set_load_weight(&init_task);
 
        /*
         * The boot idle thread does lazy MMU switching as well: