]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blobdiff - kernel/sched.c
ftrace: add tracing_cpumask
[mirror_ubuntu-jammy-kernel.git] / kernel / sched.c
index 561b3b39bdb873d0dcc56c760752155b7fa106a4..9ca4a2e6a236ffd98749771338384839cdefab28 100644 (file)
 #include <linux/bootmem.h>
 #include <linux/debugfs.h>
 #include <linux/ctype.h>
+#include <linux/ftrace.h>
 
 #include <asm/tlb.h>
 #include <asm/irq_regs.h>
 
-/*
- * Scheduler clock - returns current time in nanosec units.
- * This is default implementation.
- * Architectures and sub-architectures can override this.
- */
-unsigned long long __attribute__((weak)) sched_clock(void)
-{
-       return (unsigned long long)jiffies * (NSEC_PER_SEC / HZ);
-}
-
 /*
  * Convert user-nice values [ -20 ... 0 ... 19 ]
  * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
@@ -321,7 +312,13 @@ static DEFINE_SPINLOCK(task_group_lock);
 # define INIT_TASK_GROUP_LOAD  NICE_0_LOAD
 #endif
 
+/*
+ * A weight of 0, 1 or ULONG_MAX can cause arithmetics problems.
+ * (The default weight is 1024 - so there's no practical
+ *  limitation from this.)
+ */
 #define MIN_SHARES     2
+#define MAX_SHARES     (ULONG_MAX - 1)
 
 static int init_task_group_load = INIT_TASK_GROUP_LOAD;
 #endif
@@ -551,13 +548,7 @@ struct rq {
        unsigned long next_balance;
        struct mm_struct *prev_mm;
 
-       u64 clock, prev_clock_raw;
-       s64 clock_max_delta;
-
-       unsigned int clock_warps, clock_overflows, clock_underflows;
-       u64 idle_clock;
-       unsigned int clock_deep_idle_events;
-       u64 tick_timestamp;
+       u64 clock;
 
        atomic_t nr_iowait;
 
@@ -622,82 +613,6 @@ static inline int cpu_of(struct rq *rq)
 #endif
 }
 
-#ifdef CONFIG_NO_HZ
-static inline bool nohz_on(int cpu)
-{
-       return tick_get_tick_sched(cpu)->nohz_mode != NOHZ_MODE_INACTIVE;
-}
-
-static inline u64 max_skipped_ticks(struct rq *rq)
-{
-       return nohz_on(cpu_of(rq)) ? jiffies - rq->last_tick_seen + 2 : 1;
-}
-
-static inline void update_last_tick_seen(struct rq *rq)
-{
-       rq->last_tick_seen = jiffies;
-}
-#else
-static inline u64 max_skipped_ticks(struct rq *rq)
-{
-       return 1;
-}
-
-static inline void update_last_tick_seen(struct rq *rq)
-{
-}
-#endif
-
-/*
- * Update the per-runqueue clock, as finegrained as the platform can give
- * us, but without assuming monotonicity, etc.:
- */
-static void __update_rq_clock(struct rq *rq)
-{
-       u64 prev_raw = rq->prev_clock_raw;
-       u64 now = sched_clock();
-       s64 delta = now - prev_raw;
-       u64 clock = rq->clock;
-
-#ifdef CONFIG_SCHED_DEBUG
-       WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
-#endif
-       /*
-        * Protect against sched_clock() occasionally going backwards:
-        */
-       if (unlikely(delta < 0)) {
-               clock++;
-               rq->clock_warps++;
-       } else {
-               /*
-                * Catch too large forward jumps too:
-                */
-               u64 max_jump = max_skipped_ticks(rq) * TICK_NSEC;
-               u64 max_time = rq->tick_timestamp + max_jump;
-
-               if (unlikely(clock + delta > max_time)) {
-                       if (clock < max_time)
-                               clock = max_time;
-                       else
-                               clock++;
-                       rq->clock_overflows++;
-               } else {
-                       if (unlikely(delta > rq->clock_max_delta))
-                               rq->clock_max_delta = delta;
-                       clock += delta;
-               }
-       }
-
-       rq->prev_clock_raw = now;
-       rq->clock = clock;
-}
-
-static void update_rq_clock(struct rq *rq)
-{
-       if (likely(smp_processor_id() == cpu_of(rq)))
-               __update_rq_clock(rq);
-}
-
 /*
  * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
  * See detach_destroy_domains: synchronize_sched for details.
@@ -713,6 +628,11 @@ static void update_rq_clock(struct rq *rq)
 #define task_rq(p)             cpu_rq(task_cpu(p))
 #define cpu_curr(cpu)          (cpu_rq(cpu)->curr)
 
+static inline void update_rq_clock(struct rq *rq)
+{
+       rq->clock = sched_clock_cpu(cpu_of(rq));
+}
+
 /*
  * Tunables that become constants when CONFIG_SCHED_DEBUG is off:
  */
@@ -722,6 +642,24 @@ static void update_rq_clock(struct rq *rq)
 # define const_debug static const
 #endif
 
+/**
+ * runqueue_is_locked
+ *
+ * Returns true if the current cpu runqueue is locked.
+ * This interface allows printk to be called with the runqueue lock
+ * held and know whether or not it is OK to wake up the klogd.
+ */
+int runqueue_is_locked(void)
+{
+       int cpu = get_cpu();
+       struct rq *rq = cpu_rq(cpu);
+       int ret;
+
+       ret = spin_is_locked(&rq->lock);
+       put_cpu();
+       return ret;
+}
+
 /*
  * Debugging: various feature bits
  */
@@ -904,11 +842,14 @@ static DEFINE_PER_CPU(unsigned long long, prev_cpu_time);
 static DEFINE_SPINLOCK(time_sync_lock);
 static unsigned long long prev_global_time;
 
-static unsigned long long __sync_cpu_clock(cycles_t time, int cpu)
+static unsigned long long __sync_cpu_clock(unsigned long long time, int cpu)
 {
-       unsigned long flags;
-
-       spin_lock_irqsave(&time_sync_lock, flags);
+       /*
+        * We want this inlined, to not get tracer function calls
+        * in this critical section:
+        */
+       spin_acquire(&time_sync_lock.dep_map, 0, 0, _THIS_IP_);
+       __raw_spin_lock(&time_sync_lock.raw_lock);
 
        if (time < prev_global_time) {
                per_cpu(time_offset, cpu) += prev_global_time - time;
@@ -917,7 +858,8 @@ static unsigned long long __sync_cpu_clock(cycles_t time, int cpu)
                prev_global_time = time;
        }
 
-       spin_unlock_irqrestore(&time_sync_lock, flags);
+       __raw_spin_unlock(&time_sync_lock.raw_lock);
+       spin_release(&time_sync_lock.dep_map, 1, _THIS_IP_);
 
        return time;
 }
@@ -925,8 +867,6 @@ static unsigned long long __sync_cpu_clock(cycles_t time, int cpu)
 static unsigned long long __cpu_clock(int cpu)
 {
        unsigned long long now;
-       unsigned long flags;
-       struct rq *rq;
 
        /*
         * Only call sched_clock() if the scheduler has already been
@@ -935,11 +875,7 @@ static unsigned long long __cpu_clock(int cpu)
        if (unlikely(!scheduler_running))
                return 0;
 
-       local_irq_save(flags);
-       rq = cpu_rq(cpu);
-       update_rq_clock(rq);
-       now = rq->clock;
-       local_irq_restore(flags);
+       now = sched_clock_cpu(cpu);
 
        return now;
 }
@@ -951,13 +887,18 @@ static unsigned long long __cpu_clock(int cpu)
 unsigned long long cpu_clock(int cpu)
 {
        unsigned long long prev_cpu_time, time, delta_time;
+       unsigned long flags;
 
+       local_irq_save(flags);
        prev_cpu_time = per_cpu(prev_cpu_time, cpu);
        time = __cpu_clock(cpu) + per_cpu(time_offset, cpu);
        delta_time = time-prev_cpu_time;
 
-       if (unlikely(delta_time > time_sync_thresh))
+       if (unlikely(delta_time > time_sync_thresh)) {
                time = __sync_cpu_clock(time, cpu);
+               per_cpu(prev_cpu_time, cpu) = time;
+       }
+       local_irq_restore(flags);
 
        return time;
 }
@@ -1108,45 +1049,6 @@ static struct rq *this_rq_lock(void)
        return rq;
 }
 
-/*
- * We are going deep-idle (irqs are disabled):
- */
-void sched_clock_idle_sleep_event(void)
-{
-       struct rq *rq = cpu_rq(smp_processor_id());
-
-       WARN_ON(!irqs_disabled());
-       spin_lock(&rq->lock);
-       __update_rq_clock(rq);
-       spin_unlock(&rq->lock);
-       rq->clock_deep_idle_events++;
-}
-EXPORT_SYMBOL_GPL(sched_clock_idle_sleep_event);
-
-/*
- * We just idled delta nanoseconds (called with irqs disabled):
- */
-void sched_clock_idle_wakeup_event(u64 delta_ns)
-{
-       struct rq *rq = cpu_rq(smp_processor_id());
-       u64 now = sched_clock();
-
-       WARN_ON(!irqs_disabled());
-       rq->idle_clock += delta_ns;
-       /*
-        * Override the previous timestamp and ignore all
-        * sched_clock() deltas that occured while we idled,
-        * and use the PM-provided delta_ns to advance the
-        * rq clock:
-        */
-       spin_lock(&rq->lock);
-       rq->prev_clock_raw = now;
-       rq->clock += delta_ns;
-       spin_unlock(&rq->lock);
-       touch_softlockup_watchdog();
-}
-EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event);
-
 static void __resched_task(struct task_struct *p, int tif_bit);
 
 static inline void resched_task(struct task_struct *p)
@@ -1271,7 +1173,7 @@ static enum hrtimer_restart hrtick(struct hrtimer *timer)
        WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
 
        spin_lock(&rq->lock);
-       __update_rq_clock(rq);
+       update_rq_clock(rq);
        rq->curr->sched_class->task_tick(rq, rq->curr, 1);
        spin_unlock(&rq->lock);
 
@@ -1804,6 +1706,8 @@ __update_group_shares_cpu(struct task_group *tg, struct sched_domain *sd,
 
        if (shares < MIN_SHARES)
                shares = MIN_SHARES;
+       else if (shares > MAX_SHARES)
+               shares = MAX_SHARES;
 
        __set_se_shares(tg->se[tcpu], shares);
 }
@@ -2508,6 +2412,46 @@ static int sched_balance_self(int cpu, int flag)
 
 #endif /* CONFIG_SMP */
 
+#ifdef CONFIG_CONTEXT_SWITCH_TRACER
+
+void ftrace_all_fair_tasks(void *__rq, void *__tr, void *__data)
+{
+       struct task_struct *p;
+       struct sched_entity *se;
+       struct rb_node *curr;
+       struct rq *rq = __rq;
+
+       curr = first_fair(&rq->cfs);
+       if (!curr)
+               return;
+
+       if (rq->cfs.curr) {
+               p = task_of(rq->cfs.curr);
+               __trace_special(__tr, __data,
+                     p->pid, p->se.vruntime, p->se.sum_exec_runtime);
+       }
+       if (rq->cfs.next) {
+               p = task_of(rq->cfs.next);
+               __trace_special(__tr, __data,
+                     p->pid, p->se.vruntime, p->se.sum_exec_runtime);
+       }
+
+       while (curr) {
+               se = rb_entry(curr, struct sched_entity, run_node);
+               if (!entity_is_task(se))
+                       continue;
+
+               p = task_of(se);
+
+               __trace_special(__tr, __data,
+                             p->pid, p->se.vruntime, p->se.sum_exec_runtime);
+
+               curr = rb_next(curr);
+       }
+}
+
+#endif
+
 /***
  * try_to_wake_up - wake up a thread
  * @p: the to-be-woken-up thread
@@ -2582,6 +2526,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
 
 out_activate:
 #endif /* CONFIG_SMP */
+       ftrace_wake_up_task(rq, p, rq->curr);
        schedstat_inc(p, se.nr_wakeups);
        if (sync)
                schedstat_inc(p, se.nr_wakeups_sync);
@@ -2726,6 +2671,7 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
                p->sched_class->task_new(rq, p);
                inc_nr_running(rq);
        }
+       ftrace_wake_up_task(rq, p, rq->curr);
        check_preempt_curr(rq, p);
 #ifdef CONFIG_SMP
        if (p->sched_class->task_wake_up)
@@ -2898,6 +2844,7 @@ context_switch(struct rq *rq, struct task_struct *prev,
        struct mm_struct *mm, *oldmm;
 
        prepare_task_switch(rq, prev, next);
+       ftrace_ctx_switch(rq, prev, next);
        mm = next->mm;
        oldmm = prev->active_mm;
        /*
@@ -4462,19 +4409,11 @@ void scheduler_tick(void)
        int cpu = smp_processor_id();
        struct rq *rq = cpu_rq(cpu);
        struct task_struct *curr = rq->curr;
-       u64 next_tick = rq->tick_timestamp + TICK_NSEC;
+
+       sched_clock_tick();
 
        spin_lock(&rq->lock);
-       __update_rq_clock(rq);
-       /*
-        * Let rq->clock advance by at least TICK_NSEC:
-        */
-       if (unlikely(rq->clock < next_tick)) {
-               rq->clock = next_tick;
-               rq->clock_underflows++;
-       }
-       rq->tick_timestamp = rq->clock;
-       update_last_tick_seen(rq);
+       update_rq_clock(rq);
        update_cpu_load(rq);
        curr->sched_class->task_tick(rq, curr, 0);
        spin_unlock(&rq->lock);
@@ -4485,26 +4424,44 @@ void scheduler_tick(void)
 #endif
 }
 
-#if defined(CONFIG_PREEMPT) && defined(CONFIG_DEBUG_PREEMPT)
+#if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \
+                               defined(CONFIG_PREEMPT_TRACER))
+
+static inline unsigned long get_parent_ip(unsigned long addr)
+{
+       if (in_lock_functions(addr)) {
+               addr = CALLER_ADDR2;
+               if (in_lock_functions(addr))
+                       addr = CALLER_ADDR3;
+       }
+       return addr;
+}
 
 void __kprobes add_preempt_count(int val)
 {
+#ifdef CONFIG_DEBUG_PREEMPT
        /*
         * Underflow?
         */
        if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
                return;
+#endif
        preempt_count() += val;
+#ifdef CONFIG_DEBUG_PREEMPT
        /*
         * Spinlock count overflowing soon?
         */
        DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
                                PREEMPT_MASK - 10);
+#endif
+       if (preempt_count() == val)
+               trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
 }
 EXPORT_SYMBOL(add_preempt_count);
 
 void __kprobes sub_preempt_count(int val)
 {
+#ifdef CONFIG_DEBUG_PREEMPT
        /*
         * Underflow?
         */
@@ -4516,7 +4473,10 @@ void __kprobes sub_preempt_count(int val)
        if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
                        !(preempt_count() & PREEMPT_MASK)))
                return;
+#endif
 
+       if (preempt_count() == val)
+               trace_preempt_on(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
        preempt_count() -= val;
 }
 EXPORT_SYMBOL(sub_preempt_count);
@@ -4628,7 +4588,7 @@ need_resched_nonpreemptible:
         * Do the rq-clock update outside the rq lock:
         */
        local_irq_disable();
-       __update_rq_clock(rq);
+       update_rq_clock(rq);
        spin_lock(&rq->lock);
        clear_tsk_need_resched(prev);
 
@@ -4690,8 +4650,6 @@ EXPORT_SYMBOL(schedule);
 asmlinkage void __sched preempt_schedule(void)
 {
        struct thread_info *ti = current_thread_info();
-       struct task_struct *task = current;
-       int saved_lock_depth;
 
        /*
         * If there is a non-zero preempt_count or interrupts are disabled,
@@ -4702,16 +4660,7 @@ asmlinkage void __sched preempt_schedule(void)
 
        do {
                add_preempt_count(PREEMPT_ACTIVE);
-
-               /*
-                * We keep the big kernel semaphore locked, but we
-                * clear ->lock_depth so that schedule() doesnt
-                * auto-release the semaphore:
-                */
-               saved_lock_depth = task->lock_depth;
-               task->lock_depth = -1;
                schedule();
-               task->lock_depth = saved_lock_depth;
                sub_preempt_count(PREEMPT_ACTIVE);
 
                /*
@@ -4732,26 +4681,15 @@ EXPORT_SYMBOL(preempt_schedule);
 asmlinkage void __sched preempt_schedule_irq(void)
 {
        struct thread_info *ti = current_thread_info();
-       struct task_struct *task = current;
-       int saved_lock_depth;
 
        /* Catch callers which need to be fixed */
        BUG_ON(ti->preempt_count || !irqs_disabled());
 
        do {
                add_preempt_count(PREEMPT_ACTIVE);
-
-               /*
-                * We keep the big kernel semaphore locked, but we
-                * clear ->lock_depth so that schedule() doesnt
-                * auto-release the semaphore:
-                */
-               saved_lock_depth = task->lock_depth;
-               task->lock_depth = -1;
                local_irq_enable();
                schedule();
                local_irq_disable();
-               task->lock_depth = saved_lock_depth;
                sub_preempt_count(PREEMPT_ACTIVE);
 
                /*
@@ -5670,7 +5608,6 @@ static void __cond_resched(void)
        } while (need_resched());
 }
 
-#if !defined(CONFIG_PREEMPT) || defined(CONFIG_PREEMPT_VOLUNTARY)
 int __sched _cond_resched(void)
 {
        if (need_resched() && !(preempt_count() & PREEMPT_ACTIVE) &&
@@ -5681,7 +5618,6 @@ int __sched _cond_resched(void)
        return 0;
 }
 EXPORT_SYMBOL(_cond_resched);
-#endif
 
 /*
  * cond_resched_lock() - if a reschedule is pending, drop the given lock,
@@ -5873,7 +5809,7 @@ out_unlock:
        return retval;
 }
 
-static const char stat_nam[] = "RSDTtZX";
+static const char stat_nam[] = TASK_STATE_TO_CHAR_STR;
 
 void sched_show_task(struct task_struct *p)
 {
@@ -5976,8 +5912,11 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
        spin_unlock_irqrestore(&rq->lock, flags);
 
        /* Set the preempt count _outside_ the spinlocks! */
+#if defined(CONFIG_PREEMPT)
+       task_thread_info(idle)->preempt_count = (idle->lock_depth >= 0);
+#else
        task_thread_info(idle)->preempt_count = 0;
-
+#endif
        /*
         * The idle tasks have their own, simple scheduling class:
         */
@@ -8212,8 +8151,6 @@ void __init sched_init(void)
                spin_lock_init(&rq->lock);
                lockdep_set_class(&rq->lock, &rq->rq_lock_key);
                rq->nr_running = 0;
-               rq->clock = 1;
-               update_last_tick_seen(rq);
                init_cfs_rq(&rq->cfs, rq);
                init_rt_rq(&rq->rt, rq);
 #ifdef CONFIG_FAIR_GROUP_SCHED
@@ -8357,6 +8294,7 @@ EXPORT_SYMBOL(__might_sleep);
 static void normalize_task(struct rq *rq, struct task_struct *p)
 {
        int on_rq;
+
        update_rq_clock(rq);
        on_rq = p->se.on_rq;
        if (on_rq)
@@ -8388,7 +8326,6 @@ void normalize_rt_tasks(void)
                p->se.sleep_start               = 0;
                p->se.block_start               = 0;
 #endif
-               task_rq(p)->clock               = 0;
 
                if (!rt_task(p)) {
                        /*
@@ -8785,13 +8722,10 @@ int sched_group_set_shares(struct task_group *tg, unsigned long shares)
        if (!tg->se[0])
                return -EINVAL;
 
-       /*
-        * A weight of 0 or 1 can cause arithmetics problems.
-        * (The default weight is 1024 - so there's no practical
-        *  limitation from this.)
-        */
        if (shares < MIN_SHARES)
                shares = MIN_SHARES;
+       else if (shares > MAX_SHARES)
+               shares = MAX_SHARES;
 
        mutex_lock(&shares_mutex);
        if (tg->shares == shares)
@@ -8816,7 +8750,7 @@ int sched_group_set_shares(struct task_group *tg, unsigned long shares)
                 * force a rebalance
                 */
                cfs_rq_set_shares(tg->cfs_rq[i], 0);
-               set_se_shares(tg->se[i], shares/nr_cpu_ids);
+               set_se_shares(tg->se[i], shares);
        }
 
        /*
@@ -9135,7 +9069,7 @@ static u64 cpu_shares_read_u64(struct cgroup *cgrp, struct cftype *cft)
 #endif
 
 #ifdef CONFIG_RT_GROUP_SCHED
-static ssize_t cpu_rt_runtime_write(struct cgroup *cgrp, struct cftype *cft,
+static int cpu_rt_runtime_write(struct cgroup *cgrp, struct cftype *cft,
                                s64 val)
 {
        return sched_group_set_rt_runtime(cgroup_tg(cgrp), val);