]> git.proxmox.com Git - mirror_ubuntu-kernels.git/commitdiff
sched/debug: Add new tracepoints to track util_est
authorVincent Donnefort <vincent.donnefort@arm.com>
Wed, 27 May 2020 16:39:14 +0000 (17:39 +0100)
committerPeter Zijlstra <peterz@infradead.org>
Mon, 15 Jun 2020 12:10:02 +0000 (14:10 +0200)
The util_est signals are key elements for EAS task placement and
frequency selection. Having tracepoints to track these signals enables
load-tracking and schedutil testing and/or debugging by a toolkit.

Signed-off-by: Vincent Donnefort <vincent.donnefort@arm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Valentin Schneider <valentin.schneider@arm.com>
Link: https://lkml.kernel.org/r/1590597554-370150-1-git-send-email-vincent.donnefort@arm.com
include/trace/events/sched.h
kernel/sched/core.c
kernel/sched/fair.c

index ed168b0e2c53f08447bffc3b2dbc55a1e1eed334..04f9a4c7b0d91078a49dda02c65a771214309671 100644 (file)
@@ -634,6 +634,14 @@ DECLARE_TRACE(sched_overutilized_tp,
        TP_PROTO(struct root_domain *rd, bool overutilized),
        TP_ARGS(rd, overutilized));
 
+DECLARE_TRACE(sched_util_est_cfs_tp,
+       TP_PROTO(struct cfs_rq *cfs_rq),
+       TP_ARGS(cfs_rq));
+
+DECLARE_TRACE(sched_util_est_se_tp,
+       TP_PROTO(struct sched_entity *se),
+       TP_ARGS(se));
+
 #endif /* _TRACE_SCHED_H */
 
 /* This part must be outside protection */
index 9c89b0eaf796176c44cbd918454ce701a04279c9..0208b71bef80241a32abf32a69c466f7bb1debc8 100644 (file)
@@ -36,6 +36,8 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_dl_tp);
 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_irq_tp);
 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_se_tp);
 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_overutilized_tp);
+EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_cfs_tp);
+EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_se_tp);
 
 DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
 
index 69da576f7f487915e18f51983541a207a15fa0c5..a785a9b262ddf00a9609380e148f2c42384c176e 100644 (file)
@@ -3922,6 +3922,8 @@ static inline void util_est_enqueue(struct cfs_rq *cfs_rq,
        enqueued  = cfs_rq->avg.util_est.enqueued;
        enqueued += _task_util_est(p);
        WRITE_ONCE(cfs_rq->avg.util_est.enqueued, enqueued);
+
+       trace_sched_util_est_cfs_tp(cfs_rq);
 }
 
 /*
@@ -3952,6 +3954,8 @@ util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p, bool task_sleep)
        ue.enqueued -= min_t(unsigned int, ue.enqueued, _task_util_est(p));
        WRITE_ONCE(cfs_rq->avg.util_est.enqueued, ue.enqueued);
 
+       trace_sched_util_est_cfs_tp(cfs_rq);
+
        /*
         * Skip update of task's estimated utilization when the task has not
         * yet completed an activation, e.g. being migrated.
@@ -4017,6 +4021,8 @@ util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p, bool task_sleep)
        ue.ewma >>= UTIL_EST_WEIGHT_SHIFT;
 done:
        WRITE_ONCE(p->se.avg.util_est, ue);
+
+       trace_sched_util_est_se_tp(&p->se);
 }
 
 static inline int task_fits_capacity(struct task_struct *p, long capacity)