]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/commitdiff
sched/topology,schedutil: Wrap sched domains rebuild
authorIonela Voinescu <ionela.voinescu@arm.com>
Tue, 27 Oct 2020 18:07:11 +0000 (18:07 +0000)
committerPeter Zijlstra <peterz@infradead.org>
Thu, 19 Nov 2020 10:25:47 +0000 (11:25 +0100)
Add the rebuild_sched_domains_energy() function to wrap the functionality
that rebuilds the scheduling domains if any of the Energy Aware Scheduling
(EAS) initialisation conditions change. This functionality is used when
schedutil is added or removed or when EAS is enabled or disabled
through the sched_energy_aware sysctl.

Therefore, create a single function that is used in both these cases and
that can be later reused.

Signed-off-by: Ionela Voinescu <ionela.voinescu@arm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Quentin Perret <qperret@google.com>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Link: https://lkml.kernel.org/r/20201027180713.7642-2-ionela.voinescu@arm.com
include/linux/sched/topology.h
kernel/sched/cpufreq_schedutil.c
kernel/sched/topology.c

index 9ef7bf686a9f7937c66d75d445c1907b307b7e73..8f0f778b7c9111bb51c5961f1042b6a1755e6f91 100644 (file)
@@ -225,6 +225,14 @@ static inline bool cpus_share_cache(int this_cpu, int that_cpu)
 
 #endif /* !CONFIG_SMP */
 
+#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
+extern void rebuild_sched_domains_energy(void);
+#else
+static inline void rebuild_sched_domains_energy(void)
+{
+}
+#endif
+
 #ifndef arch_scale_cpu_capacity
 /**
  * arch_scale_cpu_capacity - get the capacity scale factor of a given CPU.
index e254745a82cb851e0f695edfc628fbbf519b8e40..37b3038903361fb9187a897c649ac97e82f84183 100644 (file)
@@ -899,16 +899,9 @@ struct cpufreq_governor *cpufreq_default_governor(void)
 cpufreq_governor_init(schedutil_gov);
 
 #ifdef CONFIG_ENERGY_MODEL
-extern bool sched_energy_update;
-extern struct mutex sched_energy_mutex;
-
 static void rebuild_sd_workfn(struct work_struct *work)
 {
-       mutex_lock(&sched_energy_mutex);
-       sched_energy_update = true;
-       rebuild_sched_domains();
-       sched_energy_update = false;
-       mutex_unlock(&sched_energy_mutex);
+       rebuild_sched_domains_energy();
 }
 static DECLARE_WORK(rebuild_sd_work, rebuild_sd_workfn);
 
index b296c1c6b961caad0a744f57dba041da3d18a63d..04d9ebfaedd61ed352cd302af8be2d396a0a4f91 100644 (file)
@@ -211,6 +211,15 @@ unsigned int sysctl_sched_energy_aware = 1;
 DEFINE_MUTEX(sched_energy_mutex);
 bool sched_energy_update;
 
+void rebuild_sched_domains_energy(void)
+{
+       mutex_lock(&sched_energy_mutex);
+       sched_energy_update = true;
+       rebuild_sched_domains();
+       sched_energy_update = false;
+       mutex_unlock(&sched_energy_mutex);
+}
+
 #ifdef CONFIG_PROC_SYSCTL
 int sched_energy_aware_handler(struct ctl_table *table, int write,
                void *buffer, size_t *lenp, loff_t *ppos)
@@ -223,13 +232,8 @@ int sched_energy_aware_handler(struct ctl_table *table, int write,
        ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
        if (!ret && write) {
                state = static_branch_unlikely(&sched_energy_present);
-               if (state != sysctl_sched_energy_aware) {
-                       mutex_lock(&sched_energy_mutex);
-                       sched_energy_update = 1;
-                       rebuild_sched_domains();
-                       sched_energy_update = 0;
-                       mutex_unlock(&sched_energy_mutex);
-               }
+               if (state != sysctl_sched_energy_aware)
+                       rebuild_sched_domains_energy();
        }
 
        return ret;