]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/commitdiff
Merge branches 'pm-sleep', 'pm-domains', 'powercap' and 'pm-tools'
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>
Mon, 3 Aug 2020 11:12:44 +0000 (13:12 +0200)
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>
Mon, 3 Aug 2020 11:12:44 +0000 (13:12 +0200)
* pm-sleep:
  PM: sleep: spread "const char *" correctness
  PM: hibernate: fix white space in a few places
  freezer: Add unsafe version of freezable_schedule_timeout_interruptible() for NFS
  PM: sleep: core: Emit changed uevent on wakeup_sysfs_add/remove

* pm-domains:
  PM: domains: Restore comment indentation for generic_pm_domain.child_links
  PM: domains: Fix up terminology with parent/child

* powercap:
  powercap: Add Power Limit4 support
  powercap: idle_inject: Replace play_idle() with play_idle_precise() in comments
  powercap: intel_rapl: add support for Sapphire Rapids

* pm-tools:
  pm-graph v5.7 - important s2idle fixes
  cpupower: Replace HTTP links with HTTPS ones
  cpupower: Fix NULL but dereferenced coccicheck errors
  cpupower: Fix comparing pointer to 0 coccicheck warns

76 files changed:
Documentation/ABI/testing/sysfs-class-devfreq
Documentation/admin-guide/kernel-parameters.txt
Documentation/admin-guide/pm/cpufreq.rst
Documentation/admin-guide/pm/intel_pstate.rst
Documentation/devicetree/bindings/devfreq/rk3399_dmc.txt
Documentation/power/energy-model.rst
Documentation/power/powercap/powercap.rst
MAINTAINERS
arch/powerpc/platforms/cell/cpufreq_spudemand.c
arch/x86/include/asm/msr-index.h
drivers/acpi/processor_idle.c
drivers/base/power/domain.c
drivers/base/power/domain_governor.c
drivers/base/power/sysfs.c
drivers/cpufreq/acpi-cpufreq.c
drivers/cpufreq/amd_freq_sensitivity.c
drivers/cpufreq/cpufreq-dt.c
drivers/cpufreq/cpufreq.c
drivers/cpufreq/cpufreq_conservative.c
drivers/cpufreq/cpufreq_governor.c
drivers/cpufreq/cpufreq_ondemand.c
drivers/cpufreq/cpufreq_performance.c
drivers/cpufreq/cpufreq_powersave.c
drivers/cpufreq/cpufreq_userspace.c
drivers/cpufreq/davinci-cpufreq.c
drivers/cpufreq/freq_table.c
drivers/cpufreq/imx6q-cpufreq.c
drivers/cpufreq/intel_pstate.c
drivers/cpufreq/mediatek-cpufreq.c
drivers/cpufreq/omap-cpufreq.c
drivers/cpufreq/pasemi-cpufreq.c
drivers/cpufreq/pcc-cpufreq.c
drivers/cpufreq/powernow-k8.c
drivers/cpufreq/powernv-cpufreq.c
drivers/cpufreq/qcom-cpufreq-hw.c
drivers/cpufreq/scmi-cpufreq.c
drivers/cpufreq/scpi-cpufreq.c
drivers/cpufreq/vexpress-spc-cpufreq.c
drivers/cpuidle/Kconfig.arm
drivers/cpuidle/Makefile
drivers/cpuidle/cpuidle-psci-domain.c
drivers/cpuidle/cpuidle-psci.c
drivers/cpuidle/cpuidle-psci.h
drivers/cpuidle/cpuidle-tegra.c
drivers/devfreq/devfreq-event.c
drivers/devfreq/devfreq.c
drivers/devfreq/rk3399_dmc.c
drivers/idle/intel_idle.c
drivers/memory/samsung/exynos5422-dmc.c
drivers/mmc/host/jz4740_mmc.c
drivers/opp/core.c
drivers/opp/of.c
drivers/opp/ti-opp-supply.c
drivers/powercap/idle_inject.c
drivers/powercap/intel_rapl_common.c
drivers/powercap/intel_rapl_msr.c
drivers/thermal/cpufreq_cooling.c
fs/nfs/nfs4proc.c
include/linux/cpufreq.h
include/linux/cpuidle.h
include/linux/devfreq.h
include/linux/device.h
include/linux/energy_model.h
include/linux/freezer.h
include/linux/intel_rapl.h
include/linux/pm.h
include/linux/pm_domain.h
include/linux/pm_opp.h
kernel/power/energy_model.c
kernel/power/hibernate.c
kernel/power/power.h
kernel/power/snapshot.c
kernel/sched/cpufreq_schedutil.c
kernel/sched/fair.c
kernel/sched/topology.c
scripts/gdb/linux/genpd.py

index 9758eb85ade3ea461787f2707c14eb400df5a660..deefffb3bbe45dba1c855e3525c739363afed0f6 100644 (file)
@@ -108,3 +108,15 @@ Description:
                frequency requested by governors and min_freq.
                The max_freq overrides min_freq because max_freq may be
                used to throttle devices to avoid overheating.
+
+What:          /sys/class/devfreq/.../timer
+Date:          July 2020
+Contact:       Chanwoo Choi <cw00.choi@samsung.com>
+Description:
+               This ABI shows and stores the kind of work timer by users.
+               This work timer is used by devfreq workqueue in order to
+               monitor the device status such as utilization. The user
+               can change the work timer on runtime according to their demand
+               as following:
+                       echo deferrable > /sys/class/devfreq/.../timer
+                       echo delayed > /sys/class/devfreq/.../timer
index fb95fad81c79a065e728202a34a3dabac5653cb4..8deb5a89328ac6447e5de8c3cb733140422061c6 100644 (file)
        cpufreq.off=1   [CPU_FREQ]
                        disable the cpufreq sub-system
 
+       cpufreq.default_governor=
+                       [CPU_FREQ] Name of the default cpufreq governor or
+                       policy to use. This governor must be registered in the
+                       kernel before the cpufreq driver probes.
+
        cpu_init_udelay=N
                        [X86] Delay for N microsec between assert and de-assert
                        of APIC INIT to start processors.  This delay occurs
index 0c74a778496480ac9899b7bce415798cc274c362..368e612145d2e84f12d10d37a7859cab65599967 100644 (file)
@@ -147,9 +147,9 @@ CPUs in it.
 
 The next major initialization step for a new policy object is to attach a
 scaling governor to it (to begin with, that is the default scaling governor
-determined by the kernel configuration, but it may be changed later
-via ``sysfs``).  First, a pointer to the new policy object is passed to the
-governor's ``->init()`` callback which is expected to initialize all of the
+determined by the kernel command line or configuration, but it may be changed
+later via ``sysfs``).  First, a pointer to the new policy object is passed to
+the governor's ``->init()`` callback which is expected to initialize all of the
 data structures necessary to handle the given policy and, possibly, to add
 a governor ``sysfs`` interface to it.  Next, the governor is started by
 invoking its ``->start()`` callback.
index 39d80bc29ccd6e11c13ca084d34343b6238456ca..40d481cca36848cfb37248bb765fd146e73f93c2 100644 (file)
@@ -431,6 +431,17 @@ argument is passed to the kernel in the command line.
        supported in the current configuration, writes to this attribute will
        fail with an appropriate error.
 
+``energy_efficiency``
+       This attribute is only present on platforms, which have CPUs matching
+       Kaby Lake or Coffee Lake desktop CPU model. By default
+       energy efficiency optimizations are disabled on these CPU models in HWP
+       mode by this driver. Enabling energy efficiency may limit maximum
+       operating frequency in both HWP and non HWP mode. In non HWP mode,
+       optimizations are done only in the turbo frequency range. In HWP mode,
+       optimizations are done in the entire frequency range. Setting this
+       attribute to "1" enables energy efficiency optimizations and setting
+       to "0" disables energy efficiency optimizations.
+
 Interpretation of Policy Attributes
 -----------------------------------
 
@@ -554,7 +565,11 @@ somewhere between the two extremes:
 Strings written to the ``energy_performance_preference`` attribute are
 internally translated to integer values written to the processor's
 Energy-Performance Preference (EPP) knob (if supported) or its
-Energy-Performance Bias (EPB) knob.
+Energy-Performance Bias (EPB) knob. It is also possible to write a positive
+integer value between 0 to 255, if the EPP feature is present. If the EPP
+feature is not present, writing integer value to this attribute is not
+supported. In this case, user can use
+ "/sys/devices/system/cpu/cpu*/power/energy_perf_bias" interface.
 
 [Note that tasks may by migrated from one CPU to another by the scheduler's
 load-balancing algorithm and if different energy vs performance hints are
index 0ec68141f85a7c05aa04b10c277b3ad02af43942..a10d1f6d85c64ef5443aca1b64ec88425b0e8eff 100644 (file)
@@ -18,6 +18,8 @@ Optional properties:
                         format depends on the interrupt controller.
                         It should be a DCF interrupt. When DDR DVFS finishes
                         a DCF interrupt is triggered.
+- rockchip,pmu:                 Phandle to the syscon managing the "PMU general register
+                        files".
 
 Following properties relate to DDR timing:
 
index 90a345d57ae9231423c0cb1b8971c78738e27f07..a6fb986abe3cab2144d5801e9bbd83d2df05c893 100644 (file)
@@ -1,15 +1,17 @@
-====================
-Energy Model of CPUs
-====================
+.. SPDX-License-Identifier: GPL-2.0
+
+=======================
+Energy Model of devices
+=======================
 
 1. Overview
 -----------
 
 The Energy Model (EM) framework serves as an interface between drivers knowing
-the power consumed by CPUs at various performance levels, and the kernel
+the power consumed by devices at various performance levels, and the kernel
 subsystems willing to use that information to make energy-aware decisions.
 
-The source of the information about the power consumed by CPUs can vary greatly
+The source of the information about the power consumed by devices can vary greatly
 from one platform to another. These power costs can be estimated using
 devicetree data in some cases. In others, the firmware will know better.
 Alternatively, userspace might be best positioned. And so on. In order to avoid
@@ -25,7 +27,7 @@ framework, and interested clients reading the data from it::
        +---------------+  +-----------------+  +---------------+
        | Thermal (IPA) |  | Scheduler (EAS) |  |     Other     |
        +---------------+  +-----------------+  +---------------+
-               |                   | em_pd_energy()    |
+               |                   | em_cpu_energy()   |
                |                   | em_cpu_get()      |
                +---------+         |         +---------+
                          |         |         |
@@ -35,7 +37,7 @@ framework, and interested clients reading the data from it::
                         |     Framework       |
                         +---------------------+
                            ^       ^       ^
-                           |       |       | em_register_perf_domain()
+                           |       |       | em_dev_register_perf_domain()
                 +----------+       |       +---------+
                 |                  |                 |
         +---------------+  +---------------+  +--------------+
@@ -47,12 +49,12 @@ framework, and interested clients reading the data from it::
         | Device Tree  |   |   Firmware    |  |      ?       |
         +--------------+   +---------------+  +--------------+
 
-The EM framework manages power cost tables per 'performance domain' in the
-system. A performance domain is a group of CPUs whose performance is scaled
-together. Performance domains generally have a 1-to-1 mapping with CPUFreq
-policies. All CPUs in a performance domain are required to have the same
-micro-architecture. CPUs in different performance domains can have different
-micro-architectures.
+In case of CPU devices the EM framework manages power cost tables per
+'performance domain' in the system. A performance domain is a group of CPUs
+whose performance is scaled together. Performance domains generally have a
+1-to-1 mapping with CPUFreq policies. All CPUs in a performance domain are
+required to have the same micro-architecture. CPUs in different performance
+domains can have different micro-architectures.
 
 
 2. Core APIs
@@ -70,14 +72,16 @@ CONFIG_ENERGY_MODEL must be enabled to use the EM framework.
 Drivers are expected to register performance domains into the EM framework by
 calling the following API::
 
-  int em_register_perf_domain(cpumask_t *span, unsigned int nr_states,
-                             struct em_data_callback *cb);
+  int em_dev_register_perf_domain(struct device *dev, unsigned int nr_states,
+               struct em_data_callback *cb, cpumask_t *cpus);
 
-Drivers must specify the CPUs of the performance domains using the cpumask
-argument, and provide a callback function returning <frequency, power> tuples
-for each capacity state. The callback function provided by the driver is free
+Drivers must provide a callback function returning <frequency, power> tuples
+for each performance state. The callback function provided by the driver is free
 to fetch data from any relevant location (DT, firmware, ...), and by any mean
-deemed necessary. See Section 3. for an example of driver implementing this
+deemed necessary. Only for CPU devices, drivers must specify the CPUs of the
+performance domains using cpumask. For other devices than CPUs the last
+argument must be set to NULL.
+See Section 3. for an example of driver implementing this
 callback, and kernel/power/energy_model.c for further documentation on this
 API.
 
@@ -85,13 +89,20 @@ API.
 2.3 Accessing performance domains
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
+There are two API functions which provide the access to the energy model:
+em_cpu_get() which takes CPU id as an argument and em_pd_get() with device
+pointer as an argument. It depends on the subsystem which interface it is
+going to use, but in case of CPU devices both functions return the same
+performance domain.
+
 Subsystems interested in the energy model of a CPU can retrieve it using the
 em_cpu_get() API. The energy model tables are allocated once upon creation of
 the performance domains, and kept in memory untouched.
 
 The energy consumed by a performance domain can be estimated using the
-em_pd_energy() API. The estimation is performed assuming that the schedutil
-CPUfreq governor is in use.
+em_cpu_energy() API. The estimation is performed assuming that the schedutil
+CPUfreq governor is in use in case of CPU device. Currently this calculation is
+not provided for other type of devices.
 
 More details about the above APIs can be found in include/linux/energy_model.h.
 
@@ -106,42 +117,46 @@ EM framework::
 
   -> drivers/cpufreq/foo_cpufreq.c
 
-  01   static int est_power(unsigned long *mW, unsigned long *KHz, int cpu)
-  02   {
-  03           long freq, power;
-  04
-  05           /* Use the 'foo' protocol to ceil the frequency */
-  06           freq = foo_get_freq_ceil(cpu, *KHz);
-  07           if (freq < 0);
-  08                   return freq;
-  09
-  10           /* Estimate the power cost for the CPU at the relevant freq. */
-  11           power = foo_estimate_power(cpu, freq);
-  12           if (power < 0);
-  13                   return power;
-  14
-  15           /* Return the values to the EM framework */
-  16           *mW = power;
-  17           *KHz = freq;
-  18
-  19           return 0;
-  20   }
-  21
-  22   static int foo_cpufreq_init(struct cpufreq_policy *policy)
-  23   {
-  24           struct em_data_callback em_cb = EM_DATA_CB(est_power);
-  25           int nr_opp, ret;
-  26
-  27           /* Do the actual CPUFreq init work ... */
-  28           ret = do_foo_cpufreq_init(policy);
-  29           if (ret)
-  30                   return ret;
-  31
-  32           /* Find the number of OPPs for this policy */
-  33           nr_opp = foo_get_nr_opp(policy);
-  34
-  35           /* And register the new performance domain */
-  36           em_register_perf_domain(policy->cpus, nr_opp, &em_cb);
-  37
-  38           return 0;
-  39   }
+  01   static int est_power(unsigned long *mW, unsigned long *KHz,
+  02                   struct device *dev)
+  03   {
+  04           long freq, power;
+  05
+  06           /* Use the 'foo' protocol to ceil the frequency */
+  07           freq = foo_get_freq_ceil(dev, *KHz);
+  08           if (freq < 0);
+  09                   return freq;
+  10
+  11           /* Estimate the power cost for the dev at the relevant freq. */
+  12           power = foo_estimate_power(dev, freq);
+  13           if (power < 0);
+  14                   return power;
+  15
+  16           /* Return the values to the EM framework */
+  17           *mW = power;
+  18           *KHz = freq;
+  19
+  20           return 0;
+  21   }
+  22
+  23   static int foo_cpufreq_init(struct cpufreq_policy *policy)
+  24   {
+  25           struct em_data_callback em_cb = EM_DATA_CB(est_power);
+  26           struct device *cpu_dev;
+  27           int nr_opp, ret;
+  28
+  29           cpu_dev = get_cpu_device(cpumask_first(policy->cpus));
+  30
+  31           /* Do the actual CPUFreq init work ... */
+  32           ret = do_foo_cpufreq_init(policy);
+  33           if (ret)
+  34                   return ret;
+  35
+  36           /* Find the number of OPPs for this policy */
+  37           nr_opp = foo_get_nr_opp(policy);
+  38
+  39           /* And register the new performance domain */
+  40           em_dev_register_perf_domain(cpu_dev, nr_opp, &em_cb, policy->cpus);
+  41
+  42           return 0;
+  43   }
index 7ae3b44c7624b94ec925f6f309ec9f32a3226b17..e75d12596dac2f2739d9507406207285428930c9 100644 (file)
@@ -167,11 +167,13 @@ For example::
 package-0
 ---------
 
-The Intel RAPL technology allows two constraints, short term and long term,
-with two different time windows to be applied to each power zone.  Thus for
-each zone there are 2 attributes representing the constraint names, 2 power
-limits and 2 attributes representing the sizes of the time windows. Such that,
-constraint_j_* attributes correspond to the jth constraint (j = 0,1).
+Depending on different power zones, the Intel RAPL technology allows
+one or multiple constraints like short term, long term and peak power,
+with different time windows to be applied to each power zone.
+All the zones contain attributes representing the constraint names,
+power limits and the sizes of the time windows. Note that time window
+is not applicable to peak power. Here, constraint_j_* attributes
+correspond to the jth constraint (j = 0,1,2).
 
 For example::
 
@@ -181,6 +183,9 @@ For example::
        constraint_1_name
        constraint_1_power_limit_uw
        constraint_1_time_window_us
+       constraint_2_name
+       constraint_2_power_limit_uw
+       constraint_2_time_window_us
 
 Power Zone Attributes
 =====================
index f0569cf304cacb8792e56d79bbb195c0abbfcc6f..85d7fb9f04c8b83f10a18cbbdba3487d0f4b9beb 100644 (file)
@@ -11097,6 +11097,15 @@ F:     Documentation/core-api/boot-time-mm.rst
 F:     include/linux/memblock.h
 F:     mm/memblock.c
 
+MEMORY FREQUENCY SCALING DRIVERS FOR NVIDIA TEGRA
+M:     Dmitry Osipenko <digetx@gmail.com>
+L:     linux-pm@vger.kernel.org
+L:     linux-tegra@vger.kernel.org
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/chanwoo/linux.git
+S:     Maintained
+F:     drivers/devfreq/tegra20-devfreq.c
+F:     drivers/devfreq/tegra30-devfreq.c
+
 MEMORY MANAGEMENT
 M:     Andrew Morton <akpm@linux-foundation.org>
 L:     linux-mm@kvack.org
index 55b31eadb3c8ec911928b6eabc7eac73f5a8b6b6..ca7849e113d7fbc485eef1a873e3241e4abaf287 100644 (file)
@@ -126,30 +126,8 @@ static struct cpufreq_governor spu_governor = {
        .stop = spu_gov_stop,
        .owner = THIS_MODULE,
 };
-
-/*
- * module init and destoy
- */
-
-static int __init spu_gov_init(void)
-{
-       int ret;
-
-       ret = cpufreq_register_governor(&spu_governor);
-       if (ret)
-               printk(KERN_ERR "registration of governor failed\n");
-       return ret;
-}
-
-static void __exit spu_gov_exit(void)
-{
-       cpufreq_unregister_governor(&spu_governor);
-}
-
-
-module_init(spu_gov_init);
-module_exit(spu_gov_exit);
+cpufreq_governor_init(spu_governor);
+cpufreq_governor_exit(spu_governor);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Christian Krafft <krafft@de.ibm.com>");
-
index e8370e64a155cf27321d6727011d2e5883e4483f..21b409195b463cf1e154e88acf0cda171b3192ed 100644 (file)
 
 #define MSR_LBR_SELECT                 0x000001c8
 #define MSR_LBR_TOS                    0x000001c9
+
+#define MSR_IA32_POWER_CTL             0x000001fc
+#define MSR_IA32_POWER_CTL_BIT_EE      19
+
 #define MSR_LBR_NHM_FROM               0x00000680
 #define MSR_LBR_NHM_TO                 0x000006c0
 #define MSR_LBR_CORE_FROM              0x00000040
 
 #define MSR_PEBS_FRONTEND              0x000003f7
 
-#define MSR_IA32_POWER_CTL             0x000001fc
-
 #define MSR_IA32_MC0_CTL               0x00000400
 #define MSR_IA32_MC0_STATUS            0x00000401
 #define MSR_IA32_MC0_ADDR              0x00000402
index 75534c5b543331930d8ac1c1fc0d3c18ae4b5ee8..0c65a09aec1b04705bc04780d9e466ac242e3663 100644 (file)
@@ -655,8 +655,8 @@ static int acpi_idle_enter(struct cpuidle_device *dev,
        return index;
 }
 
-static void acpi_idle_enter_s2idle(struct cpuidle_device *dev,
-                                  struct cpuidle_driver *drv, int index)
+static int acpi_idle_enter_s2idle(struct cpuidle_device *dev,
+                                 struct cpuidle_driver *drv, int index)
 {
        struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
 
@@ -664,16 +664,18 @@ static void acpi_idle_enter_s2idle(struct cpuidle_device *dev,
                struct acpi_processor *pr = __this_cpu_read(processors);
 
                if (unlikely(!pr))
-                       return;
+                       return 0;
 
                if (pr->flags.bm_check) {
                        acpi_idle_enter_bm(pr, cx, false);
-                       return;
+                       return 0;
                } else {
                        ACPI_FLUSH_CPU_CACHE();
                }
        }
        acpi_idle_do_entry(cx);
+
+       return 0;
 }
 
 static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr,
index 0a01df60884944b4753265cecbd3986d93acd660..2cb5e04cf86cd313aabdc3b275ec0db988e0c2a5 100644 (file)
@@ -263,18 +263,18 @@ static int _genpd_reeval_performance_state(struct generic_pm_domain *genpd,
        /*
         * Traverse all sub-domains within the domain. This can be
         * done without any additional locking as the link->performance_state
-        * field is protected by the master genpd->lock, which is already taken.
+        * field is protected by the parent genpd->lock, which is already taken.
         *
         * Also note that link->performance_state (subdomain's performance state
-        * requirement to master domain) is different from
-        * link->slave->performance_state (current performance state requirement
+        * requirement to parent domain) is different from
+        * link->child->performance_state (current performance state requirement
         * of the devices/sub-domains of the subdomain) and so can have a
         * different value.
         *
         * Note that we also take vote from powered-off sub-domains into account
         * as the same is done for devices right now.
         */
-       list_for_each_entry(link, &genpd->master_links, master_node) {
+       list_for_each_entry(link, &genpd->parent_links, parent_node) {
                if (link->performance_state > state)
                        state = link->performance_state;
        }
@@ -285,40 +285,40 @@ static int _genpd_reeval_performance_state(struct generic_pm_domain *genpd,
 static int _genpd_set_performance_state(struct generic_pm_domain *genpd,
                                        unsigned int state, int depth)
 {
-       struct generic_pm_domain *master;
+       struct generic_pm_domain *parent;
        struct gpd_link *link;
-       int master_state, ret;
+       int parent_state, ret;
 
        if (state == genpd->performance_state)
                return 0;
 
-       /* Propagate to masters of genpd */
-       list_for_each_entry(link, &genpd->slave_links, slave_node) {
-               master = link->master;
+       /* Propagate to parents of genpd */
+       list_for_each_entry(link, &genpd->child_links, child_node) {
+               parent = link->parent;
 
-               if (!master->set_performance_state)
+               if (!parent->set_performance_state)
                        continue;
 
-               /* Find master's performance state */
+               /* Find parent's performance state */
                ret = dev_pm_opp_xlate_performance_state(genpd->opp_table,
-                                                        master->opp_table,
+                                                        parent->opp_table,
                                                         state);
                if (unlikely(ret < 0))
                        goto err;
 
-               master_state = ret;
+               parent_state = ret;
 
-               genpd_lock_nested(master, depth + 1);
+               genpd_lock_nested(parent, depth + 1);
 
                link->prev_performance_state = link->performance_state;
-               link->performance_state = master_state;
-               master_state = _genpd_reeval_performance_state(master,
-                                               master_state);
-               ret = _genpd_set_performance_state(master, master_state, depth + 1);
+               link->performance_state = parent_state;
+               parent_state = _genpd_reeval_performance_state(parent,
+                                               parent_state);
+               ret = _genpd_set_performance_state(parent, parent_state, depth + 1);
                if (ret)
                        link->performance_state = link->prev_performance_state;
 
-               genpd_unlock(master);
+               genpd_unlock(parent);
 
                if (ret)
                        goto err;
@@ -333,26 +333,26 @@ static int _genpd_set_performance_state(struct generic_pm_domain *genpd,
 
 err:
        /* Encountered an error, lets rollback */
-       list_for_each_entry_continue_reverse(link, &genpd->slave_links,
-                                            slave_node) {
-               master = link->master;
+       list_for_each_entry_continue_reverse(link, &genpd->child_links,
+                                            child_node) {
+               parent = link->parent;
 
-               if (!master->set_performance_state)
+               if (!parent->set_performance_state)
                        continue;
 
-               genpd_lock_nested(master, depth + 1);
+               genpd_lock_nested(parent, depth + 1);
 
-               master_state = link->prev_performance_state;
-               link->performance_state = master_state;
+               parent_state = link->prev_performance_state;
+               link->performance_state = parent_state;
 
-               master_state = _genpd_reeval_performance_state(master,
-                                               master_state);
-               if (_genpd_set_performance_state(master, master_state, depth + 1)) {
+               parent_state = _genpd_reeval_performance_state(parent,
+                                               parent_state);
+               if (_genpd_set_performance_state(parent, parent_state, depth + 1)) {
                        pr_err("%s: Failed to roll back to %d performance state\n",
-                              master->name, master_state);
+                              parent->name, parent_state);
                }
 
-               genpd_unlock(master);
+               genpd_unlock(parent);
        }
 
        return ret;
@@ -552,7 +552,7 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
 
                /*
                 * If sd_count > 0 at this point, one of the subdomains hasn't
-                * managed to call genpd_power_on() for the master yet after
+                * managed to call genpd_power_on() for the parent yet after
                 * incrementing it.  In that case genpd_power_on() will wait
                 * for us to drop the lock, so we can call .power_off() and let
                 * the genpd_power_on() restore power for us (this shouldn't
@@ -566,22 +566,22 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
        genpd->status = GPD_STATE_POWER_OFF;
        genpd_update_accounting(genpd);
 
-       list_for_each_entry(link, &genpd->slave_links, slave_node) {
-               genpd_sd_counter_dec(link->master);
-               genpd_lock_nested(link->master, depth + 1);
-               genpd_power_off(link->master, false, depth + 1);
-               genpd_unlock(link->master);
+       list_for_each_entry(link, &genpd->child_links, child_node) {
+               genpd_sd_counter_dec(link->parent);
+               genpd_lock_nested(link->parent, depth + 1);
+               genpd_power_off(link->parent, false, depth + 1);
+               genpd_unlock(link->parent);
        }
 
        return 0;
 }
 
 /**
- * genpd_power_on - Restore power to a given PM domain and its masters.
+ * genpd_power_on - Restore power to a given PM domain and its parents.
  * @genpd: PM domain to power up.
  * @depth: nesting count for lockdep.
  *
- * Restore power to @genpd and all of its masters so that it is possible to
+ * Restore power to @genpd and all of its parents so that it is possible to
  * resume a device belonging to it.
  */
 static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth)
@@ -594,20 +594,20 @@ static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth)
 
        /*
         * The list is guaranteed not to change while the loop below is being
-        * executed, unless one of the masters' .power_on() callbacks fiddles
+        * executed, unless one of the parents' .power_on() callbacks fiddles
         * with it.
         */
-       list_for_each_entry(link, &genpd->slave_links, slave_node) {
-               struct generic_pm_domain *master = link->master;
+       list_for_each_entry(link, &genpd->child_links, child_node) {
+               struct generic_pm_domain *parent = link->parent;
 
-               genpd_sd_counter_inc(master);
+               genpd_sd_counter_inc(parent);
 
-               genpd_lock_nested(master, depth + 1);
-               ret = genpd_power_on(master, depth + 1);
-               genpd_unlock(master);
+               genpd_lock_nested(parent, depth + 1);
+               ret = genpd_power_on(parent, depth + 1);
+               genpd_unlock(parent);
 
                if (ret) {
-                       genpd_sd_counter_dec(master);
+                       genpd_sd_counter_dec(parent);
                        goto err;
                }
        }
@@ -623,12 +623,12 @@ static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth)
 
  err:
        list_for_each_entry_continue_reverse(link,
-                                       &genpd->slave_links,
-                                       slave_node) {
-               genpd_sd_counter_dec(link->master);
-               genpd_lock_nested(link->master, depth + 1);
-               genpd_power_off(link->master, false, depth + 1);
-               genpd_unlock(link->master);
+                                       &genpd->child_links,
+                                       child_node) {
+               genpd_sd_counter_dec(link->parent);
+               genpd_lock_nested(link->parent, depth + 1);
+               genpd_power_off(link->parent, false, depth + 1);
+               genpd_unlock(link->parent);
        }
 
        return ret;
@@ -932,13 +932,13 @@ late_initcall(genpd_power_off_unused);
 #ifdef CONFIG_PM_SLEEP
 
 /**
- * genpd_sync_power_off - Synchronously power off a PM domain and its masters.
+ * genpd_sync_power_off - Synchronously power off a PM domain and its parents.
  * @genpd: PM domain to power off, if possible.
  * @use_lock: use the lock.
  * @depth: nesting count for lockdep.
  *
  * Check if the given PM domain can be powered off (during system suspend or
- * hibernation) and do that if so.  Also, in that case propagate to its masters.
+ * hibernation) and do that if so.  Also, in that case propagate to its parents.
  *
  * This function is only called in "noirq" and "syscore" stages of system power
  * transitions. The "noirq" callbacks may be executed asynchronously, thus in
@@ -963,21 +963,21 @@ static void genpd_sync_power_off(struct generic_pm_domain *genpd, bool use_lock,
 
        genpd->status = GPD_STATE_POWER_OFF;
 
-       list_for_each_entry(link, &genpd->slave_links, slave_node) {
-               genpd_sd_counter_dec(link->master);
+       list_for_each_entry(link, &genpd->child_links, child_node) {
+               genpd_sd_counter_dec(link->parent);
 
                if (use_lock)
-                       genpd_lock_nested(link->master, depth + 1);
+                       genpd_lock_nested(link->parent, depth + 1);
 
-               genpd_sync_power_off(link->master, use_lock, depth + 1);
+               genpd_sync_power_off(link->parent, use_lock, depth + 1);
 
                if (use_lock)
-                       genpd_unlock(link->master);
+                       genpd_unlock(link->parent);
        }
 }
 
 /**
- * genpd_sync_power_on - Synchronously power on a PM domain and its masters.
+ * genpd_sync_power_on - Synchronously power on a PM domain and its parents.
  * @genpd: PM domain to power on.
  * @use_lock: use the lock.
  * @depth: nesting count for lockdep.
@@ -994,16 +994,16 @@ static void genpd_sync_power_on(struct generic_pm_domain *genpd, bool use_lock,
        if (genpd_status_on(genpd))
                return;
 
-       list_for_each_entry(link, &genpd->slave_links, slave_node) {
-               genpd_sd_counter_inc(link->master);
+       list_for_each_entry(link, &genpd->child_links, child_node) {
+               genpd_sd_counter_inc(link->parent);
 
                if (use_lock)
-                       genpd_lock_nested(link->master, depth + 1);
+                       genpd_lock_nested(link->parent, depth + 1);
 
-               genpd_sync_power_on(link->master, use_lock, depth + 1);
+               genpd_sync_power_on(link->parent, use_lock, depth + 1);
 
                if (use_lock)
-                       genpd_unlock(link->master);
+                       genpd_unlock(link->parent);
        }
 
        _genpd_power_on(genpd, false);
@@ -1443,12 +1443,12 @@ static void genpd_update_cpumask(struct generic_pm_domain *genpd,
        if (!genpd_is_cpu_domain(genpd))
                return;
 
-       list_for_each_entry(link, &genpd->slave_links, slave_node) {
-               struct generic_pm_domain *master = link->master;
+       list_for_each_entry(link, &genpd->child_links, child_node) {
+               struct generic_pm_domain *parent = link->parent;
 
-               genpd_lock_nested(master, depth + 1);
-               genpd_update_cpumask(master, cpu, set, depth + 1);
-               genpd_unlock(master);
+               genpd_lock_nested(parent, depth + 1);
+               genpd_update_cpumask(parent, cpu, set, depth + 1);
+               genpd_unlock(parent);
        }
 
        if (set)
@@ -1636,17 +1636,17 @@ static int genpd_add_subdomain(struct generic_pm_domain *genpd,
                goto out;
        }
 
-       list_for_each_entry(itr, &genpd->master_links, master_node) {
-               if (itr->slave == subdomain && itr->master == genpd) {
+       list_for_each_entry(itr, &genpd->parent_links, parent_node) {
+               if (itr->child == subdomain && itr->parent == genpd) {
                        ret = -EINVAL;
                        goto out;
                }
        }
 
-       link->master = genpd;
-       list_add_tail(&link->master_node, &genpd->master_links);
-       link->slave = subdomain;
-       list_add_tail(&link->slave_node, &subdomain->slave_links);
+       link->parent = genpd;
+       list_add_tail(&link->parent_node, &genpd->parent_links);
+       link->child = subdomain;
+       list_add_tail(&link->child_node, &subdomain->child_links);
        if (genpd_status_on(subdomain))
                genpd_sd_counter_inc(genpd);
 
@@ -1660,7 +1660,7 @@ static int genpd_add_subdomain(struct generic_pm_domain *genpd,
 
 /**
  * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
- * @genpd: Master PM domain to add the subdomain to.
+ * @genpd: Leader PM domain to add the subdomain to.
  * @subdomain: Subdomain to be added.
  */
 int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
@@ -1678,7 +1678,7 @@ EXPORT_SYMBOL_GPL(pm_genpd_add_subdomain);
 
 /**
  * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
- * @genpd: Master PM domain to remove the subdomain from.
+ * @genpd: Leader PM domain to remove the subdomain from.
  * @subdomain: Subdomain to be removed.
  */
 int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
@@ -1693,19 +1693,19 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
        genpd_lock(subdomain);
        genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
 
-       if (!list_empty(&subdomain->master_links) || subdomain->device_count) {
+       if (!list_empty(&subdomain->parent_links) || subdomain->device_count) {
                pr_warn("%s: unable to remove subdomain %s\n",
                        genpd->name, subdomain->name);
                ret = -EBUSY;
                goto out;
        }
 
-       list_for_each_entry_safe(link, l, &genpd->master_links, master_node) {
-               if (link->slave != subdomain)
+       list_for_each_entry_safe(link, l, &genpd->parent_links, parent_node) {
+               if (link->child != subdomain)
                        continue;
 
-               list_del(&link->master_node);
-               list_del(&link->slave_node);
+               list_del(&link->parent_node);
+               list_del(&link->child_node);
                kfree(link);
                if (genpd_status_on(subdomain))
                        genpd_sd_counter_dec(genpd);
@@ -1770,8 +1770,8 @@ int pm_genpd_init(struct generic_pm_domain *genpd,
        if (IS_ERR_OR_NULL(genpd))
                return -EINVAL;
 
-       INIT_LIST_HEAD(&genpd->master_links);
-       INIT_LIST_HEAD(&genpd->slave_links);
+       INIT_LIST_HEAD(&genpd->parent_links);
+       INIT_LIST_HEAD(&genpd->child_links);
        INIT_LIST_HEAD(&genpd->dev_list);
        genpd_lock_init(genpd);
        genpd->gov = gov;
@@ -1848,15 +1848,15 @@ static int genpd_remove(struct generic_pm_domain *genpd)
                return -EBUSY;
        }
 
-       if (!list_empty(&genpd->master_links) || genpd->device_count) {
+       if (!list_empty(&genpd->parent_links) || genpd->device_count) {
                genpd_unlock(genpd);
                pr_err("%s: unable to remove %s\n", __func__, genpd->name);
                return -EBUSY;
        }
 
-       list_for_each_entry_safe(link, l, &genpd->slave_links, slave_node) {
-               list_del(&link->master_node);
-               list_del(&link->slave_node);
+       list_for_each_entry_safe(link, l, &genpd->child_links, child_node) {
+               list_del(&link->parent_node);
+               list_del(&link->child_node);
                kfree(link);
        }
 
@@ -2827,12 +2827,12 @@ static int genpd_summary_one(struct seq_file *s,
 
        /*
         * Modifications on the list require holding locks on both
-        * master and slave, so we are safe.
+        * parent and child, so we are safe.
         * Also genpd->name is immutable.
         */
-       list_for_each_entry(link, &genpd->master_links, master_node) {
-               seq_printf(s, "%s", link->slave->name);
-               if (!list_is_last(&link->master_node, &genpd->master_links))
+       list_for_each_entry(link, &genpd->parent_links, parent_node) {
+               seq_printf(s, "%s", link->child->name);
+               if (!list_is_last(&link->parent_node, &genpd->parent_links))
                        seq_puts(s, ", ");
        }
 
@@ -2860,7 +2860,7 @@ static int summary_show(struct seq_file *s, void *data)
        struct generic_pm_domain *genpd;
        int ret = 0;
 
-       seq_puts(s, "domain                          status          slaves\n");
+       seq_puts(s, "domain                          status          children\n");
        seq_puts(s, "    /device                                             runtime status\n");
        seq_puts(s, "----------------------------------------------------------------------\n");
 
@@ -2915,8 +2915,8 @@ static int sub_domains_show(struct seq_file *s, void *data)
        if (ret)
                return -ERESTARTSYS;
 
-       list_for_each_entry(link, &genpd->master_links, master_node)
-               seq_printf(s, "%s\n", link->slave->name);
+       list_for_each_entry(link, &genpd->parent_links, parent_node)
+               seq_printf(s, "%s\n", link->child->name);
 
        genpd_unlock(genpd);
        return ret;
index daa8c7689f7e6251052c0742cc943c6ece64a3e0..490ed7deb99a790691324e83b18fed02fd25a68e 100644 (file)
@@ -135,8 +135,8 @@ static bool __default_power_down_ok(struct dev_pm_domain *pd,
         *
         * All subdomains have been powered off already at this point.
         */
-       list_for_each_entry(link, &genpd->master_links, master_node) {
-               struct generic_pm_domain *sd = link->slave;
+       list_for_each_entry(link, &genpd->parent_links, parent_node) {
+               struct generic_pm_domain *sd = link->child;
                s64 sd_max_off_ns = sd->max_off_time_ns;
 
                if (sd_max_off_ns < 0)
@@ -217,13 +217,13 @@ static bool default_power_down_ok(struct dev_pm_domain *pd)
        }
 
        /*
-        * We have to invalidate the cached results for the masters, so
+        * We have to invalidate the cached results for the parents, so
         * use the observation that default_power_down_ok() is not
-        * going to be called for any master until this instance
+        * going to be called for any parent until this instance
         * returns.
         */
-       list_for_each_entry(link, &genpd->slave_links, slave_node)
-               link->master->max_off_time_changed = true;
+       list_for_each_entry(link, &genpd->child_links, child_node)
+               link->parent->max_off_time_changed = true;
 
        genpd->max_off_time_ns = -1;
        genpd->max_off_time_changed = false;
index 24d25cf8ab14870d3e883e39b3201c86cb6974f6..c7b24812523c9e3c1bbba8f4b3f0115c5b1c478e 100644 (file)
@@ -1,6 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0
 /* sysfs entries for device PM */
 #include <linux/device.h>
+#include <linux/kobject.h>
 #include <linux/string.h>
 #include <linux/export.h>
 #include <linux/pm_qos.h>
@@ -739,12 +740,18 @@ int dpm_sysfs_change_owner(struct device *dev, kuid_t kuid, kgid_t kgid)
 
 int wakeup_sysfs_add(struct device *dev)
 {
-       return sysfs_merge_group(&dev->kobj, &pm_wakeup_attr_group);
+       int ret = sysfs_merge_group(&dev->kobj, &pm_wakeup_attr_group);
+
+       if (!ret)
+               kobject_uevent(&dev->kobj, KOBJ_CHANGE);
+
+       return ret;
 }
 
 void wakeup_sysfs_remove(struct device *dev)
 {
        sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group);
+       kobject_uevent(&dev->kobj, KOBJ_CHANGE);
 }
 
 int pm_qos_sysfs_add_resume_latency(struct device *dev)
index 429e5a36c08a9a0dd41184e2d3f09f73a50edfc8..e4ff681faaaaae089d9b847d2c0a6f125c49d1ca 100644 (file)
@@ -244,7 +244,7 @@ static unsigned extract_freq(struct cpufreq_policy *policy, u32 val)
 
 static u32 cpu_freq_read_intel(struct acpi_pct_register *not_used)
 {
-       u32 val, dummy;
+       u32 val, dummy __always_unused;
 
        rdmsr(MSR_IA32_PERF_CTL, val, dummy);
        return val;
@@ -261,7 +261,7 @@ static void cpu_freq_write_intel(struct acpi_pct_register *not_used, u32 val)
 
 static u32 cpu_freq_read_amd(struct acpi_pct_register *not_used)
 {
-       u32 val, dummy;
+       u32 val, dummy __always_unused;
 
        rdmsr(MSR_AMD_PERF_CTL, val, dummy);
        return val;
@@ -612,7 +612,7 @@ static const struct dmi_system_id sw_any_bug_dmi_table[] = {
 static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c)
 {
        /* Intel Xeon Processor 7100 Series Specification Update
-        * http://www.intel.com/Assets/PDF/specupdate/314554.pdf
+        * https://www.intel.com/Assets/PDF/specupdate/314554.pdf
         * AL30: A Machine Check Exception (MCE) Occurring during an
         * Enhanced Intel SpeedStep Technology Ratio Change May Cause
         * Both Processor Cores to Lock Up. */
@@ -993,14 +993,14 @@ MODULE_PARM_DESC(acpi_pstate_strict,
 late_initcall(acpi_cpufreq_init);
 module_exit(acpi_cpufreq_exit);
 
-static const struct x86_cpu_id acpi_cpufreq_ids[] = {
+static const struct x86_cpu_id __maybe_unused acpi_cpufreq_ids[] = {
        X86_MATCH_FEATURE(X86_FEATURE_ACPI, NULL),
        X86_MATCH_FEATURE(X86_FEATURE_HW_PSTATE, NULL),
        {}
 };
 MODULE_DEVICE_TABLE(x86cpu, acpi_cpufreq_ids);
 
-static const struct acpi_device_id processor_device_ids[] = {
+static const struct acpi_device_id __maybe_unused processor_device_ids[] = {
        {ACPI_PROCESSOR_OBJECT_HID, },
        {ACPI_PROCESSOR_DEVICE_HID, },
        {},
index f7c4206d4c90b1d50821929b045daa4649e3338b..d0b10baf039ab3b54b2b5dc488be9d68aa9ceeaf 100644 (file)
@@ -144,7 +144,7 @@ static void __exit amd_freq_sensitivity_exit(void)
 }
 module_exit(amd_freq_sensitivity_exit);
 
-static const struct x86_cpu_id amd_freq_sensitivity_ids[] = {
+static const struct x86_cpu_id __maybe_unused amd_freq_sensitivity_ids[] = {
        X86_MATCH_FEATURE(X86_FEATURE_PROC_FEEDBACK, NULL),
        {}
 };
index 79742bbd221f0ca82e247552f749a705eff61d95..944d7b45afe9a9cbdb38b48ca4c3bb90e5780121 100644 (file)
@@ -279,7 +279,7 @@ static int cpufreq_init(struct cpufreq_policy *policy)
        policy->cpuinfo.transition_latency = transition_latency;
        policy->dvfs_possible_from_any_cpu = true;
 
-       dev_pm_opp_of_register_em(policy->cpus);
+       dev_pm_opp_of_register_em(cpu_dev, policy->cpus);
 
        return 0;
 
index 0128de3603dfc8ac59c4435c44fd5ca255b582a7..17c1c3becd925008444315ee18f77584814e2111 100644 (file)
@@ -50,7 +50,9 @@ static LIST_HEAD(cpufreq_governor_list);
 #define for_each_governor(__governor)                          \
        list_for_each_entry(__governor, &cpufreq_governor_list, governor_list)
 
-/**
+static char default_governor[CPUFREQ_NAME_LEN];
+
+/*
  * The "cpufreq driver" - the arch- or hardware-dependent low
  * level driver of CPUFreq support, and its spinlock. This lock
  * also protects the cpufreq_cpu_data array.
@@ -78,7 +80,7 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
                              struct cpufreq_governor *new_gov,
                              unsigned int new_pol);
 
-/**
+/*
  * Two notifier lists: the "policy" list is involved in the
  * validation process for a new CPU frequency policy; the
  * "transition" list for kernel code that needs to handle
@@ -298,7 +300,7 @@ struct cpufreq_policy *cpufreq_cpu_acquire(unsigned int cpu)
  *            EXTERNALLY AFFECTING FREQUENCY CHANGES                 *
  *********************************************************************/
 
-/**
+/*
  * adjust_jiffies - adjust the system "loops_per_jiffy"
  *
  * This function alters the system "loops_per_jiffy" for the clock
@@ -524,6 +526,7 @@ EXPORT_SYMBOL_GPL(cpufreq_disable_fast_switch);
 /**
  * cpufreq_driver_resolve_freq - Map a target frequency to a driver-supported
  * one.
+ * @policy: associated policy to interrogate
  * @target_freq: target frequency to resolve.
  *
  * The target to driver frequency mapping is cached in the policy.
@@ -621,6 +624,24 @@ static struct cpufreq_governor *find_governor(const char *str_governor)
        return NULL;
 }
 
+static struct cpufreq_governor *get_governor(const char *str_governor)
+{
+       struct cpufreq_governor *t;
+
+       mutex_lock(&cpufreq_governor_mutex);
+       t = find_governor(str_governor);
+       if (!t)
+               goto unlock;
+
+       if (!try_module_get(t->owner))
+               t = NULL;
+
+unlock:
+       mutex_unlock(&cpufreq_governor_mutex);
+
+       return t;
+}
+
 static unsigned int cpufreq_parse_policy(char *str_governor)
 {
        if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN))
@@ -640,31 +661,17 @@ static struct cpufreq_governor *cpufreq_parse_governor(char *str_governor)
 {
        struct cpufreq_governor *t;
 
-       mutex_lock(&cpufreq_governor_mutex);
-
-       t = find_governor(str_governor);
-       if (!t) {
-               int ret;
-
-               mutex_unlock(&cpufreq_governor_mutex);
-
-               ret = request_module("cpufreq_%s", str_governor);
-               if (ret)
-                       return NULL;
+       t = get_governor(str_governor);
+       if (t)
+               return t;
 
-               mutex_lock(&cpufreq_governor_mutex);
-
-               t = find_governor(str_governor);
-       }
-       if (t && !try_module_get(t->owner))
-               t = NULL;
-
-       mutex_unlock(&cpufreq_governor_mutex);
+       if (request_module("cpufreq_%s", str_governor))
+               return NULL;
 
-       return t;
+       return get_governor(str_governor);
 }
 
-/**
+/*
  * cpufreq_per_cpu_attr_read() / show_##file_name() -
  * print out cpufreq information
  *
@@ -706,7 +713,7 @@ static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf)
        return ret;
 }
 
-/**
+/*
  * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
  */
 #define store_one(file_name, object)                   \
@@ -727,7 +734,7 @@ static ssize_t store_##file_name                                    \
 store_one(scaling_min_freq, min);
 store_one(scaling_max_freq, max);
 
-/**
+/*
  * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
  */
 static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
@@ -741,7 +748,7 @@ static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
        return sprintf(buf, "<unknown>\n");
 }
 
-/**
+/*
  * show_scaling_governor - show the current policy for the specified CPU
  */
 static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
@@ -756,7 +763,7 @@ static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
        return -EINVAL;
 }
 
-/**
+/*
  * store_scaling_governor - store policy for the specified CPU
  */
 static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
@@ -793,7 +800,7 @@ static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
        return ret ? ret : count;
 }
 
-/**
+/*
  * show_scaling_driver - show the cpufreq driver currently loaded
  */
 static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
@@ -801,7 +808,7 @@ static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
        return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
 }
 
-/**
+/*
  * show_scaling_available_governors - show the available CPUfreq governors
  */
 static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
@@ -815,12 +822,14 @@ static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
                goto out;
        }
 
+       mutex_lock(&cpufreq_governor_mutex);
        for_each_governor(t) {
                if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
                    - (CPUFREQ_NAME_LEN + 2)))
-                       goto out;
+                       break;
                i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
        }
+       mutex_unlock(&cpufreq_governor_mutex);
 out:
        i += sprintf(&buf[i], "\n");
        return i;
@@ -843,7 +852,7 @@ ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
 }
 EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
 
-/**
+/*
  * show_related_cpus - show the CPUs affected by each transition even if
  * hw coordination is in use
  */
@@ -852,7 +861,7 @@ static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
        return cpufreq_show_cpus(policy->related_cpus, buf);
 }
 
-/**
+/*
  * show_affected_cpus - show the CPUs affected by each transition
  */
 static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
@@ -886,7 +895,7 @@ static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
        return policy->governor->show_setspeed(policy, buf);
 }
 
-/**
+/*
  * show_bios_limit - show the current cpufreq HW/BIOS limitation
  */
 static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
@@ -1048,36 +1057,36 @@ static int cpufreq_add_dev_interface(struct cpufreq_policy *policy)
        return 0;
 }
 
-__weak struct cpufreq_governor *cpufreq_default_governor(void)
-{
-       return NULL;
-}
-
 static int cpufreq_init_policy(struct cpufreq_policy *policy)
 {
-       struct cpufreq_governor *def_gov = cpufreq_default_governor();
        struct cpufreq_governor *gov = NULL;
        unsigned int pol = CPUFREQ_POLICY_UNKNOWN;
+       int ret;
 
        if (has_target()) {
                /* Update policy governor to the one used before hotplug. */
-               gov = find_governor(policy->last_governor);
+               gov = get_governor(policy->last_governor);
                if (gov) {
                        pr_debug("Restoring governor %s for cpu %d\n",
-                                policy->governor->name, policy->cpu);
-               } else if (def_gov) {
-                       gov = def_gov;
+                                gov->name, policy->cpu);
                } else {
-                       return -ENODATA;
+                       gov = get_governor(default_governor);
+               }
+
+               if (!gov) {
+                       gov = cpufreq_default_governor();
+                       __module_get(gov->owner);
                }
+
        } else {
+
                /* Use the default policy if there is no last_policy. */
                if (policy->last_policy) {
                        pol = policy->last_policy;
-               } else if (def_gov) {
-                       pol = cpufreq_parse_policy(def_gov->name);
+               } else {
+                       pol = cpufreq_parse_policy(default_governor);
                        /*
-                        * In case the default governor is neiter "performance"
+                        * In case the default governor is neither "performance"
                         * nor "powersave", fall back to the initial policy
                         * value set by the driver.
                         */
@@ -1089,7 +1098,11 @@ static int cpufreq_init_policy(struct cpufreq_policy *policy)
                        return -ENODATA;
        }
 
-       return cpufreq_set_policy(policy, gov, pol);
+       ret = cpufreq_set_policy(policy, gov, pol);
+       if (gov)
+               module_put(gov->owner);
+
+       return ret;
 }
 
 static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
@@ -1604,7 +1617,7 @@ unlock:
        return 0;
 }
 
-/**
+/*
  * cpufreq_remove_dev - remove a CPU device
  *
  * Removes the cpufreq interface for a CPU device.
@@ -2361,6 +2374,7 @@ EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
  * cpufreq_get_policy - get the current cpufreq_policy
  * @policy: struct cpufreq_policy into which the current cpufreq_policy
  *     is written
+ * @cpu: CPU to find the policy for
  *
  * Reads the current cpufreq policy.
  */
@@ -2747,7 +2761,7 @@ out:
 }
 EXPORT_SYMBOL_GPL(cpufreq_register_driver);
 
-/**
+/*
  * cpufreq_unregister_driver - unregister the current CPUFreq driver
  *
  * Unregister the current CPUFreq driver. Only call this if you have
@@ -2783,13 +2797,19 @@ EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
 
 static int __init cpufreq_core_init(void)
 {
+       struct cpufreq_governor *gov = cpufreq_default_governor();
+
        if (cpufreq_disabled())
                return -ENODEV;
 
        cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj);
        BUG_ON(!cpufreq_global_kobject);
 
+       if (!strlen(default_governor))
+               strncpy(default_governor, gov->name, CPUFREQ_NAME_LEN);
+
        return 0;
 }
 module_param(off, int, 0444);
+module_param_string(default_governor, default_governor, CPUFREQ_NAME_LEN, 0444);
 core_initcall(cpufreq_core_init);
index 737ff3b9c2c0983736511d685922737c4dae324b..aa39ff31ec9f76ec0f83b861c55c91af0e105c57 100644 (file)
@@ -322,17 +322,7 @@ static struct dbs_governor cs_governor = {
        .start = cs_start,
 };
 
-#define CPU_FREQ_GOV_CONSERVATIVE      (&cs_governor.gov)
-
-static int __init cpufreq_gov_dbs_init(void)
-{
-       return cpufreq_register_governor(CPU_FREQ_GOV_CONSERVATIVE);
-}
-
-static void __exit cpufreq_gov_dbs_exit(void)
-{
-       cpufreq_unregister_governor(CPU_FREQ_GOV_CONSERVATIVE);
-}
+#define CPU_FREQ_GOV_CONSERVATIVE      (cs_governor.gov)
 
 MODULE_AUTHOR("Alexander Clouter <alex@digriz.org.uk>");
 MODULE_DESCRIPTION("'cpufreq_conservative' - A dynamic cpufreq governor for "
@@ -343,11 +333,9 @@ MODULE_LICENSE("GPL");
 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE
 struct cpufreq_governor *cpufreq_default_governor(void)
 {
-       return CPU_FREQ_GOV_CONSERVATIVE;
+       return &CPU_FREQ_GOV_CONSERVATIVE;
 }
-
-core_initcall(cpufreq_gov_dbs_init);
-#else
-module_init(cpufreq_gov_dbs_init);
 #endif
-module_exit(cpufreq_gov_dbs_exit);
+
+cpufreq_governor_init(CPU_FREQ_GOV_CONSERVATIVE);
+cpufreq_governor_exit(CPU_FREQ_GOV_CONSERVATIVE);
index f99ae45efaea764dc9c46c4435fa0dbf74486324..63f7c219062b9e711ef93a5c284da59bc8b16e3b 100644 (file)
@@ -26,7 +26,7 @@ static DEFINE_PER_CPU(struct cpu_dbs_info, cpu_dbs);
 static DEFINE_MUTEX(gov_dbs_data_mutex);
 
 /* Common sysfs tunables */
-/**
+/*
  * store_sampling_rate - update sampling rate effective immediately if needed.
  *
  * If new rate is smaller than the old, simply updating
index 82a4d37ddecb3f79de5e75093006eb7ac5817bad..ac361a8b1d3bb4cafe3e68e74445e91032c8498f 100644 (file)
@@ -408,7 +408,7 @@ static struct dbs_governor od_dbs_gov = {
        .start = od_start,
 };
 
-#define CPU_FREQ_GOV_ONDEMAND  (&od_dbs_gov.gov)
+#define CPU_FREQ_GOV_ONDEMAND  (od_dbs_gov.gov)
 
 static void od_set_powersave_bias(unsigned int powersave_bias)
 {
@@ -429,7 +429,7 @@ static void od_set_powersave_bias(unsigned int powersave_bias)
                        continue;
 
                policy = cpufreq_cpu_get_raw(cpu);
-               if (!policy || policy->governor != CPU_FREQ_GOV_ONDEMAND)
+               if (!policy || policy->governor != &CPU_FREQ_GOV_ONDEMAND)
                        continue;
 
                policy_dbs = policy->governor_data;
@@ -461,16 +461,6 @@ void od_unregister_powersave_bias_handler(void)
 }
 EXPORT_SYMBOL_GPL(od_unregister_powersave_bias_handler);
 
-static int __init cpufreq_gov_dbs_init(void)
-{
-       return cpufreq_register_governor(CPU_FREQ_GOV_ONDEMAND);
-}
-
-static void __exit cpufreq_gov_dbs_exit(void)
-{
-       cpufreq_unregister_governor(CPU_FREQ_GOV_ONDEMAND);
-}
-
 MODULE_AUTHOR("Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>");
 MODULE_AUTHOR("Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>");
 MODULE_DESCRIPTION("'cpufreq_ondemand' - A dynamic cpufreq governor for "
@@ -480,11 +470,9 @@ MODULE_LICENSE("GPL");
 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
 struct cpufreq_governor *cpufreq_default_governor(void)
 {
-       return CPU_FREQ_GOV_ONDEMAND;
+       return &CPU_FREQ_GOV_ONDEMAND;
 }
-
-core_initcall(cpufreq_gov_dbs_init);
-#else
-module_init(cpufreq_gov_dbs_init);
 #endif
-module_exit(cpufreq_gov_dbs_exit);
+
+cpufreq_governor_init(CPU_FREQ_GOV_ONDEMAND);
+cpufreq_governor_exit(CPU_FREQ_GOV_ONDEMAND);
index def9afe0f5b863e20a4907aa19cd8c2f60af26d4..71c1d9aba77271957dbb3f4bccacbba9c20674d3 100644 (file)
@@ -23,16 +23,6 @@ static struct cpufreq_governor cpufreq_gov_performance = {
        .limits         = cpufreq_gov_performance_limits,
 };
 
-static int __init cpufreq_gov_performance_init(void)
-{
-       return cpufreq_register_governor(&cpufreq_gov_performance);
-}
-
-static void __exit cpufreq_gov_performance_exit(void)
-{
-       cpufreq_unregister_governor(&cpufreq_gov_performance);
-}
-
 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE
 struct cpufreq_governor *cpufreq_default_governor(void)
 {
@@ -50,5 +40,5 @@ MODULE_AUTHOR("Dominik Brodowski <linux@brodo.de>");
 MODULE_DESCRIPTION("CPUfreq policy governor 'performance'");
 MODULE_LICENSE("GPL");
 
-core_initcall(cpufreq_gov_performance_init);
-module_exit(cpufreq_gov_performance_exit);
+cpufreq_governor_init(cpufreq_gov_performance);
+cpufreq_governor_exit(cpufreq_gov_performance);
index 1ae66019eb8357ac23ad13deb6d718c7bf25b9f6..7749522355b5946914ac8099d58789a75729c1a2 100644 (file)
@@ -23,16 +23,6 @@ static struct cpufreq_governor cpufreq_gov_powersave = {
        .owner          = THIS_MODULE,
 };
 
-static int __init cpufreq_gov_powersave_init(void)
-{
-       return cpufreq_register_governor(&cpufreq_gov_powersave);
-}
-
-static void __exit cpufreq_gov_powersave_exit(void)
-{
-       cpufreq_unregister_governor(&cpufreq_gov_powersave);
-}
-
 MODULE_AUTHOR("Dominik Brodowski <linux@brodo.de>");
 MODULE_DESCRIPTION("CPUfreq policy governor 'powersave'");
 MODULE_LICENSE("GPL");
@@ -42,9 +32,7 @@ struct cpufreq_governor *cpufreq_default_governor(void)
 {
        return &cpufreq_gov_powersave;
 }
-
-core_initcall(cpufreq_gov_powersave_init);
-#else
-module_init(cpufreq_gov_powersave_init);
 #endif
-module_exit(cpufreq_gov_powersave_exit);
+
+cpufreq_governor_init(cpufreq_gov_powersave);
+cpufreq_governor_exit(cpufreq_gov_powersave);
index b43e7cd502c587f3db94d2d07546f377965d5b7c..50a4d784658009112d1d160e8b3bc8d9d9f2de0d 100644 (file)
@@ -126,16 +126,6 @@ static struct cpufreq_governor cpufreq_gov_userspace = {
        .owner          = THIS_MODULE,
 };
 
-static int __init cpufreq_gov_userspace_init(void)
-{
-       return cpufreq_register_governor(&cpufreq_gov_userspace);
-}
-
-static void __exit cpufreq_gov_userspace_exit(void)
-{
-       cpufreq_unregister_governor(&cpufreq_gov_userspace);
-}
-
 MODULE_AUTHOR("Dominik Brodowski <linux@brodo.de>, "
                "Russell King <rmk@arm.linux.org.uk>");
 MODULE_DESCRIPTION("CPUfreq policy governor 'userspace'");
@@ -146,9 +136,7 @@ struct cpufreq_governor *cpufreq_default_governor(void)
 {
        return &cpufreq_gov_userspace;
 }
-
-core_initcall(cpufreq_gov_userspace_init);
-#else
-module_init(cpufreq_gov_userspace_init);
 #endif
-module_exit(cpufreq_gov_userspace_exit);
+
+cpufreq_governor_init(cpufreq_gov_userspace);
+cpufreq_governor_exit(cpufreq_gov_userspace);
index 297d23cad8b5f0110f2a43b1ade1e9174d454616..91f477a6cbc460305f125383a1bfa3a0b239e2f0 100644 (file)
@@ -2,7 +2,7 @@
 /*
  * CPU frequency scaling for DaVinci
  *
- * Copyright (C) 2009 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2009 Texas Instruments Incorporated - https://www.ti.com/
  *
  * Based on linux/arch/arm/plat-omap/cpu-omap.c. Original Copyright follows:
  *
index e117b0059123ec3d1a74e3a039bc0335c3f30f4a..f839dc9852c08d05b826d06c6b4b71813172815a 100644 (file)
@@ -221,7 +221,7 @@ int cpufreq_frequency_table_get_index(struct cpufreq_policy *policy,
 }
 EXPORT_SYMBOL_GPL(cpufreq_frequency_table_get_index);
 
-/**
+/*
  * show_available_freqs - show available frequencies for the specified CPU
  */
 static ssize_t show_available_freqs(struct cpufreq_policy *policy, char *buf,
@@ -260,7 +260,7 @@ static ssize_t show_available_freqs(struct cpufreq_policy *policy, char *buf,
 struct freq_attr cpufreq_freq_attr_##_name##_freqs =     \
 __ATTR_RO(_name##_frequencies)
 
-/**
+/*
  * show_scaling_available_frequencies - show available normal frequencies for
  * the specified CPU
  */
@@ -272,7 +272,7 @@ static ssize_t scaling_available_frequencies_show(struct cpufreq_policy *policy,
 cpufreq_attr_available_freq(scaling_available);
 EXPORT_SYMBOL_GPL(cpufreq_freq_attr_scaling_available_freqs);
 
-/**
+/*
  * show_available_boost_freqs - show available boost frequencies for
  * the specified CPU
  */
index fdb2ffffbd15a5706e2ae034ff8c9a6113b3d3e3..ef7b34c1fd2ba1b7bc9915aa34fdd84a6a1ebd4a 100644 (file)
@@ -193,7 +193,7 @@ static int imx6q_cpufreq_init(struct cpufreq_policy *policy)
        policy->clk = clks[ARM].clk;
        cpufreq_generic_init(policy, freq_table, transition_latency);
        policy->suspend_freq = max_freq;
-       dev_pm_opp_of_register_em(policy->cpus);
+       dev_pm_opp_of_register_em(cpu_dev, policy->cpus);
 
        return 0;
 }
index 7e0f7880b21a66f5424d0e2e91ac6c8d54088320..7f5d819314834a9bfebb2f706f389c8e88a5fbd5 100644 (file)
@@ -201,9 +201,7 @@ struct global_params {
  * @pstate:            Stores P state limits for this CPU
  * @vid:               Stores VID limits for this CPU
  * @last_sample_time:  Last Sample time
- * @aperf_mperf_shift: Number of clock cycles after aperf, merf is incremented
- *                     This shift is a multiplier to mperf delta to
- *                     calculate CPU busy.
+ * @aperf_mperf_shift: APERF vs MPERF counting frequency difference
  * @prev_aperf:                Last APERF value read from APERF MSR
  * @prev_mperf:                Last MPERF value read from MPERF MSR
  * @prev_tsc:          Last timestamp counter (TSC) value
@@ -275,6 +273,7 @@ static struct cpudata **all_cpu_data;
  * @get_min:           Callback to get minimum P state
  * @get_turbo:         Callback to get turbo P state
  * @get_scaling:       Callback to get frequency scaling factor
+ * @get_aperf_mperf_shift: Callback to get the APERF vs MPERF frequency difference
  * @get_val:           Callback to convert P state to actual MSR write value
  * @get_vid:           Callback to get VID data for Atom platforms
  *
@@ -602,11 +601,12 @@ static const unsigned int epp_values[] = {
        HWP_EPP_POWERSAVE
 };
 
-static int intel_pstate_get_energy_pref_index(struct cpudata *cpu_data)
+static int intel_pstate_get_energy_pref_index(struct cpudata *cpu_data, int *raw_epp)
 {
        s16 epp;
        int index = -EINVAL;
 
+       *raw_epp = 0;
        epp = intel_pstate_get_epp(cpu_data, 0);
        if (epp < 0)
                return epp;
@@ -614,12 +614,14 @@ static int intel_pstate_get_energy_pref_index(struct cpudata *cpu_data)
        if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
                if (epp == HWP_EPP_PERFORMANCE)
                        return 1;
-               if (epp <= HWP_EPP_BALANCE_PERFORMANCE)
+               if (epp == HWP_EPP_BALANCE_PERFORMANCE)
                        return 2;
-               if (epp <= HWP_EPP_BALANCE_POWERSAVE)
+               if (epp == HWP_EPP_BALANCE_POWERSAVE)
                        return 3;
-               else
+               if (epp == HWP_EPP_POWERSAVE)
                        return 4;
+               *raw_epp = epp;
+               return 0;
        } else if (boot_cpu_has(X86_FEATURE_EPB)) {
                /*
                 * Range:
@@ -638,7 +640,8 @@ static int intel_pstate_get_energy_pref_index(struct cpudata *cpu_data)
 }
 
 static int intel_pstate_set_energy_pref_index(struct cpudata *cpu_data,
-                                             int pref_index)
+                                             int pref_index, bool use_raw,
+                                             u32 raw_epp)
 {
        int epp = -EINVAL;
        int ret;
@@ -646,29 +649,34 @@ static int intel_pstate_set_energy_pref_index(struct cpudata *cpu_data,
        if (!pref_index)
                epp = cpu_data->epp_default;
 
-       mutex_lock(&intel_pstate_limits_lock);
-
        if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
-               u64 value;
-
-               ret = rdmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST, &value);
-               if (ret)
-                       goto return_pref;
+               /*
+                * Use the cached HWP Request MSR value, because the register
+                * itself may be updated by intel_pstate_hwp_boost_up() or
+                * intel_pstate_hwp_boost_down() at any time.
+                */
+               u64 value = READ_ONCE(cpu_data->hwp_req_cached);
 
                value &= ~GENMASK_ULL(31, 24);
 
-               if (epp == -EINVAL)
+               if (use_raw)
+                       epp = raw_epp;
+               else if (epp == -EINVAL)
                        epp = epp_values[pref_index - 1];
 
                value |= (u64)epp << 24;
+               /*
+                * The only other updater of hwp_req_cached in the active mode,
+                * intel_pstate_hwp_set(), is called under the same lock as this
+                * function, so it cannot run in parallel with the update below.
+                */
+               WRITE_ONCE(cpu_data->hwp_req_cached, value);
                ret = wrmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST, value);
        } else {
                if (epp == -EINVAL)
                        epp = (pref_index - 1) << 2;
                ret = intel_pstate_set_epb(cpu_data->cpu, epp);
        }
-return_pref:
-       mutex_unlock(&intel_pstate_limits_lock);
 
        return ret;
 }
@@ -694,31 +702,54 @@ static ssize_t store_energy_performance_preference(
 {
        struct cpudata *cpu_data = all_cpu_data[policy->cpu];
        char str_preference[21];
-       int ret;
+       bool raw = false;
+       ssize_t ret;
+       u32 epp = 0;
 
        ret = sscanf(buf, "%20s", str_preference);
        if (ret != 1)
                return -EINVAL;
 
        ret = match_string(energy_perf_strings, -1, str_preference);
-       if (ret < 0)
-               return ret;
+       if (ret < 0) {
+               if (!boot_cpu_has(X86_FEATURE_HWP_EPP))
+                       return ret;
 
-       intel_pstate_set_energy_pref_index(cpu_data, ret);
-       return count;
+               ret = kstrtouint(buf, 10, &epp);
+               if (ret)
+                       return ret;
+
+               if (epp > 255)
+                       return -EINVAL;
+
+               raw = true;
+       }
+
+       mutex_lock(&intel_pstate_limits_lock);
+
+       ret = intel_pstate_set_energy_pref_index(cpu_data, ret, raw, epp);
+       if (!ret)
+               ret = count;
+
+       mutex_unlock(&intel_pstate_limits_lock);
+
+       return ret;
 }
 
 static ssize_t show_energy_performance_preference(
                                struct cpufreq_policy *policy, char *buf)
 {
        struct cpudata *cpu_data = all_cpu_data[policy->cpu];
-       int preference;
+       int preference, raw_epp;
 
-       preference = intel_pstate_get_energy_pref_index(cpu_data);
+       preference = intel_pstate_get_energy_pref_index(cpu_data, &raw_epp);
        if (preference < 0)
                return preference;
 
-       return  sprintf(buf, "%s\n", energy_perf_strings[preference]);
+       if (raw_epp)
+               return  sprintf(buf, "%d\n", raw_epp);
+       else
+               return  sprintf(buf, "%s\n", energy_perf_strings[preference]);
 }
 
 cpufreq_freq_attr_rw(energy_performance_preference);
@@ -866,10 +897,39 @@ static int intel_pstate_hwp_save_state(struct cpufreq_policy *policy)
        return 0;
 }
 
+#define POWER_CTL_EE_ENABLE    1
+#define POWER_CTL_EE_DISABLE   2
+
+static int power_ctl_ee_state;
+
+static void set_power_ctl_ee_state(bool input)
+{
+       u64 power_ctl;
+
+       mutex_lock(&intel_pstate_driver_lock);
+       rdmsrl(MSR_IA32_POWER_CTL, power_ctl);
+       if (input) {
+               power_ctl &= ~BIT(MSR_IA32_POWER_CTL_BIT_EE);
+               power_ctl_ee_state = POWER_CTL_EE_ENABLE;
+       } else {
+               power_ctl |= BIT(MSR_IA32_POWER_CTL_BIT_EE);
+               power_ctl_ee_state = POWER_CTL_EE_DISABLE;
+       }
+       wrmsrl(MSR_IA32_POWER_CTL, power_ctl);
+       mutex_unlock(&intel_pstate_driver_lock);
+}
+
 static void intel_pstate_hwp_enable(struct cpudata *cpudata);
 
 static int intel_pstate_resume(struct cpufreq_policy *policy)
 {
+
+       /* Only restore if the system default is changed */
+       if (power_ctl_ee_state == POWER_CTL_EE_ENABLE)
+               set_power_ctl_ee_state(true);
+       else if (power_ctl_ee_state == POWER_CTL_EE_DISABLE)
+               set_power_ctl_ee_state(false);
+
        if (!hwp_active)
                return 0;
 
@@ -1218,6 +1278,32 @@ static ssize_t store_hwp_dynamic_boost(struct kobject *a,
        return count;
 }
 
+static ssize_t show_energy_efficiency(struct kobject *kobj, struct kobj_attribute *attr,
+                                     char *buf)
+{
+       u64 power_ctl;
+       int enable;
+
+       rdmsrl(MSR_IA32_POWER_CTL, power_ctl);
+       enable = !!(power_ctl & BIT(MSR_IA32_POWER_CTL_BIT_EE));
+       return sprintf(buf, "%d\n", !enable);
+}
+
+static ssize_t store_energy_efficiency(struct kobject *a, struct kobj_attribute *b,
+                                      const char *buf, size_t count)
+{
+       bool input;
+       int ret;
+
+       ret = kstrtobool(buf, &input);
+       if (ret)
+               return ret;
+
+       set_power_ctl_ee_state(input);
+
+       return count;
+}
+
 show_one(max_perf_pct, max_perf_pct);
 show_one(min_perf_pct, min_perf_pct);
 
@@ -1228,6 +1314,7 @@ define_one_global_rw(min_perf_pct);
 define_one_global_ro(turbo_pct);
 define_one_global_ro(num_pstates);
 define_one_global_rw(hwp_dynamic_boost);
+define_one_global_rw(energy_efficiency);
 
 static struct attribute *intel_pstate_attributes[] = {
        &status.attr,
@@ -1241,6 +1328,8 @@ static const struct attribute_group intel_pstate_attr_group = {
        .attrs = intel_pstate_attributes,
 };
 
+static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids[];
+
 static void __init intel_pstate_sysfs_expose_params(void)
 {
        struct kobject *intel_pstate_kobject;
@@ -1273,6 +1362,11 @@ static void __init intel_pstate_sysfs_expose_params(void)
                                       &hwp_dynamic_boost.attr);
                WARN_ON(rc);
        }
+
+       if (x86_match_cpu(intel_pstate_cpu_ee_disable_ids)) {
+               rc = sysfs_create_file(intel_pstate_kobject, &energy_efficiency.attr);
+               WARN_ON(rc);
+       }
 }
 /************************** sysfs end ************************/
 
@@ -1288,25 +1382,6 @@ static void intel_pstate_hwp_enable(struct cpudata *cpudata)
                cpudata->epp_default = intel_pstate_get_epp(cpudata, 0);
 }
 
-#define MSR_IA32_POWER_CTL_BIT_EE      19
-
-/* Disable energy efficiency optimization */
-static void intel_pstate_disable_ee(int cpu)
-{
-       u64 power_ctl;
-       int ret;
-
-       ret = rdmsrl_on_cpu(cpu, MSR_IA32_POWER_CTL, &power_ctl);
-       if (ret)
-               return;
-
-       if (!(power_ctl & BIT(MSR_IA32_POWER_CTL_BIT_EE))) {
-               pr_info("Disabling energy efficiency optimization\n");
-               power_ctl |= BIT(MSR_IA32_POWER_CTL_BIT_EE);
-               wrmsrl_on_cpu(cpu, MSR_IA32_POWER_CTL, power_ctl);
-       }
-}
-
 static int atom_get_min_pstate(void)
 {
        u64 value;
@@ -1982,10 +2057,6 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
        if (hwp_active) {
                const struct x86_cpu_id *id;
 
-               id = x86_match_cpu(intel_pstate_cpu_ee_disable_ids);
-               if (id)
-                       intel_pstate_disable_ee(cpunum);
-
                intel_pstate_hwp_enable(cpu);
 
                id = x86_match_cpu(intel_pstate_hwp_boost_ids);
@@ -2754,7 +2825,12 @@ static int __init intel_pstate_init(void)
        id = x86_match_cpu(hwp_support_ids);
        if (id) {
                copy_cpu_funcs(&core_funcs);
-               if (!no_hwp) {
+               /*
+                * Avoid enabling HWP for processors without EPP support,
+                * because that means incomplete HWP implementation which is a
+                * corner case and supporting it is generally problematic.
+                */
+               if (!no_hwp && boot_cpu_has(X86_FEATURE_HWP_EPP)) {
                        hwp_active++;
                        hwp_mode_bdw = id->driver_data;
                        intel_pstate.attr = hwp_cpufreq_attrs;
@@ -2808,8 +2884,17 @@ hwp_cpu_matched:
        if (rc)
                return rc;
 
-       if (hwp_active)
+       if (hwp_active) {
+               const struct x86_cpu_id *id;
+
+               id = x86_match_cpu(intel_pstate_cpu_ee_disable_ids);
+               if (id) {
+                       set_power_ctl_ee_state(false);
+                       pr_info("Disabling energy efficiency optimization\n");
+               }
+
                pr_info("HWP enabled\n");
+       }
 
        return 0;
 }
index 0c98dd08273d0ab4405adfde78644462887485db..7d1212c9b7c88af34ead03f1efd83a28f2ae85bb 100644 (file)
@@ -448,7 +448,7 @@ static int mtk_cpufreq_init(struct cpufreq_policy *policy)
        policy->driver_data = info;
        policy->clk = info->cpu_clk;
 
-       dev_pm_opp_of_register_em(policy->cpus);
+       dev_pm_opp_of_register_em(info->cpu_dev, policy->cpus);
 
        return 0;
 }
index 8d14b42a8c6f28ce5489f5d00bcba3169783eb12..3694bb030df3d902b53ae98210a6a82c7a317b55 100644 (file)
@@ -131,7 +131,7 @@ static int omap_cpu_init(struct cpufreq_policy *policy)
 
        /* FIXME: what's the actual transition time? */
        cpufreq_generic_init(policy, freq_table, 300 * 1000);
-       dev_pm_opp_of_register_em(policy->cpus);
+       dev_pm_opp_of_register_em(mpu_dev, policy->cpus);
 
        return 0;
 }
index c66f566a854cb6775242df8a2078ee217269e260..815645170c4deca8239247ddb93ab5befb1b7a6e 100644 (file)
@@ -22,6 +22,8 @@
 #include <asm/time.h>
 #include <asm/smp.h>
 
+#include <platforms/pasemi/pasemi.h>
+
 #define SDCASR_REG             0x0100
 #define SDCASR_REG_STRIDE      0x1000
 #define SDCPWR_CFGA0_REG       0x0100
index 5789fe7a94bd3fa3649c308d9cd11e632fae16ad..9f3fc7a073d075ba50ae3e3f72be03070db59316 100644 (file)
@@ -616,7 +616,7 @@ static void __exit pcc_cpufreq_exit(void)
        free_percpu(pcc_cpu_info);
 }
 
-static const struct acpi_device_id processor_device_ids[] = {
+static const struct acpi_device_id __maybe_unused processor_device_ids[] = {
        {ACPI_PROCESSOR_OBJECT_HID, },
        {ACPI_PROCESSOR_DEVICE_HID, },
        {},
index 3984959eed1d514bcd73febe29cb95d20c8c001d..0acc9e241cd7d6e2688973fc1f13f13d7198c964 100644 (file)
@@ -86,7 +86,7 @@ static u32 convert_fid_to_vco_fid(u32 fid)
  */
 static int pending_bit_stuck(void)
 {
-       u32 lo, hi;
+       u32 lo, hi __always_unused;
 
        rdmsr(MSR_FIDVID_STATUS, lo, hi);
        return lo & MSR_S_LO_CHANGE_PENDING ? 1 : 0;
@@ -282,7 +282,7 @@ static int core_voltage_pre_transition(struct powernow_k8_data *data,
 {
        u32 rvosteps = data->rvo;
        u32 savefid = data->currfid;
-       u32 maxvid, lo, rvomult = 1;
+       u32 maxvid, lo __always_unused, rvomult = 1;
 
        pr_debug("ph1 (cpu%d): start, currfid 0x%x, currvid 0x%x, reqvid 0x%x, rvo 0x%x\n",
                smp_processor_id(),
index 8646eb197cd967968580ba2253e4e60c0a59cfb5..a9af15e994ccf057074f984efd8b333258db73bf 100644 (file)
  *                             highest_lpstate_idx
  * @last_sampled_time:         Time from boot in ms when global pstates were
  *                             last set
- * @last_lpstate_idx,          Last set value of local pstate and global
- * last_gpstate_idx            pstate in terms of cpufreq table index
+ * @last_lpstate_idx:          Last set value of local pstate and global
+ * @last_gpstate_idx:          pstate in terms of cpufreq table index
  * @timer:                     Is used for ramping down if cpu goes idle for
  *                             a long time with global pstate held high
  * @gpstate_lock:              A spinlock to maintain synchronization between
  *                             routines called by the timer handler and
  *                             governer's target_index calls
+ * @policy:                    Associated CPUFreq policy
  */
 struct global_pstate_info {
        int highest_lpstate_idx;
@@ -85,7 +86,7 @@ struct global_pstate_info {
 
 static struct cpufreq_frequency_table powernv_freqs[POWERNV_MAX_PSTATES+1];
 
-DEFINE_HASHTABLE(pstate_revmap, POWERNV_MAX_PSTATES_ORDER);
+static DEFINE_HASHTABLE(pstate_revmap, POWERNV_MAX_PSTATES_ORDER);
 /**
  * struct pstate_idx_revmap_data: Entry in the hashmap pstate_revmap
  *                               indexed by a function of pstate id.
@@ -170,7 +171,7 @@ static inline u8 extract_pstate(u64 pmsr_val, unsigned int shift)
 
 /* Use following functions for conversions between pstate_id and index */
 
-/**
+/*
  * idx_to_pstate : Returns the pstate id corresponding to the
  *                frequency in the cpufreq frequency table
  *                powernv_freqs indexed by @i.
@@ -188,7 +189,7 @@ static inline u8 idx_to_pstate(unsigned int i)
        return powernv_freqs[i].driver_data;
 }
 
-/**
+/*
  * pstate_to_idx : Returns the index in the cpufreq frequencytable
  *                powernv_freqs for the frequency whose corresponding
  *                pstate id is @pstate.
@@ -380,7 +381,7 @@ static ssize_t cpuinfo_nominal_freq_show(struct cpufreq_policy *policy,
                powernv_freqs[powernv_pstate_info.nominal].frequency);
 }
 
-struct freq_attr cpufreq_freq_attr_cpuinfo_nominal_freq =
+static struct freq_attr cpufreq_freq_attr_cpuinfo_nominal_freq =
        __ATTR_RO(cpuinfo_nominal_freq);
 
 #define SCALING_BOOST_FREQS_ATTR_INDEX         2
@@ -660,13 +661,13 @@ static inline void  queue_gpstate_timer(struct global_pstate_info *gpstates)
 /**
  * gpstate_timer_handler
  *
- * @data: pointer to cpufreq_policy on which timer was queued
+ * @t: Timer context used to fetch global pstate info struct
  *
  * This handler brings down the global pstate closer to the local pstate
  * according quadratic equation. Queues a new timer if it is still not equal
  * to local pstate
  */
-void gpstate_timer_handler(struct timer_list *t)
+static void gpstate_timer_handler(struct timer_list *t)
 {
        struct global_pstate_info *gpstates = from_timer(gpstates, t, timer);
        struct cpufreq_policy *policy = gpstates->policy;
@@ -899,7 +900,7 @@ static struct notifier_block powernv_cpufreq_reboot_nb = {
        .notifier_call = powernv_cpufreq_reboot_notifier,
 };
 
-void powernv_cpufreq_work_fn(struct work_struct *work)
+static void powernv_cpufreq_work_fn(struct work_struct *work)
 {
        struct chip *chip = container_of(work, struct chip, throttle);
        struct cpufreq_policy *policy;
index fc92a8842e252abe0c8b8fb9d92438ca75086631..0a04b6f03b9a5dfef2a65beb3fc1eb06bde209e5 100644 (file)
@@ -238,7 +238,7 @@ static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
                goto error;
        }
 
-       dev_pm_opp_of_register_em(policy->cpus);
+       dev_pm_opp_of_register_em(cpu_dev, policy->cpus);
 
        policy->fast_switch_possible = true;
 
index 61623e2ff14955ea4e5d4f966066ebe5b61eeef6..11ee24e06d129108e2eb3d01ead7d4094c68c450 100644 (file)
@@ -103,17 +103,12 @@ scmi_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask)
 }
 
 static int __maybe_unused
-scmi_get_cpu_power(unsigned long *power, unsigned long *KHz, int cpu)
+scmi_get_cpu_power(unsigned long *power, unsigned long *KHz,
+                  struct device *cpu_dev)
 {
-       struct device *cpu_dev = get_cpu_device(cpu);
        unsigned long Hz;
        int ret, domain;
 
-       if (!cpu_dev) {
-               pr_err("failed to get cpu%d device\n", cpu);
-               return -ENODEV;
-       }
-
        domain = handle->perf_ops->device_domain_id(cpu_dev);
        if (domain < 0)
                return domain;
@@ -200,7 +195,7 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
 
        policy->fast_switch_possible = true;
 
-       em_register_perf_domain(policy->cpus, nr_opp, &em_cb);
+       em_dev_register_perf_domain(cpu_dev, nr_opp, &em_cb, policy->cpus);
 
        return 0;
 
index 20d1f85d5f5a4e6e8312547082e49824c0967c79..b0f5388b88544f6b71ac792e9367bf7667fa0108 100644 (file)
@@ -167,7 +167,7 @@ static int scpi_cpufreq_init(struct cpufreq_policy *policy)
 
        policy->fast_switch_possible = false;
 
-       dev_pm_opp_of_register_em(policy->cpus);
+       dev_pm_opp_of_register_em(cpu_dev, policy->cpus);
 
        return 0;
 
index 83c85d3d67e355495695b278b37586869e466f0c..4e8b1dee7c9aa20d7eca6cbda6876b998cb2871e 100644 (file)
@@ -450,7 +450,7 @@ static int ve_spc_cpufreq_init(struct cpufreq_policy *policy)
        policy->freq_table = freq_table[cur_cluster];
        policy->cpuinfo.transition_latency = 1000000; /* 1 ms */
 
-       dev_pm_opp_of_register_em(policy->cpus);
+       dev_pm_opp_of_register_em(cpu_dev, policy->cpus);
 
        if (is_bL_switching_enabled())
                per_cpu(cpu_last_req_freq, policy->cpu) =
index 51a7e89085c0bdef5fa23451e55143847a786467..0844fadc4be85b6f1ca5655d0b87b94e9dad33e6 100644 (file)
@@ -23,6 +23,16 @@ config ARM_PSCI_CPUIDLE
          It provides an idle driver that is capable of detecting and
          managing idle states through the PSCI firmware interface.
 
+config ARM_PSCI_CPUIDLE_DOMAIN
+       bool "PSCI CPU idle Domain"
+       depends on ARM_PSCI_CPUIDLE
+       depends on PM_GENERIC_DOMAINS_OF
+       default y
+       help
+         Select this to enable the PSCI based CPUidle driver to use PM domains,
+         which is needed to support the hierarchical DT based layout of the
+         idle states.
+
 config ARM_BIG_LITTLE_CPUIDLE
        bool "Support for ARM big.LITTLE processors"
        depends on ARCH_VEXPRESS_TC2_PM || ARCH_EXYNOS || COMPILE_TEST
index f07800cbb43f04ab6b2d007b69ed6b9325154e0e..26bbc5e7412366aef699a453d90d5810f191b6b4 100644 (file)
@@ -21,9 +21,8 @@ obj-$(CONFIG_ARM_U8500_CPUIDLE)         += cpuidle-ux500.o
 obj-$(CONFIG_ARM_AT91_CPUIDLE)          += cpuidle-at91.o
 obj-$(CONFIG_ARM_EXYNOS_CPUIDLE)        += cpuidle-exynos.o
 obj-$(CONFIG_ARM_CPUIDLE)              += cpuidle-arm.o
-obj-$(CONFIG_ARM_PSCI_CPUIDLE)         += cpuidle_psci.o
-cpuidle_psci-y                         := cpuidle-psci.o
-cpuidle_psci-$(CONFIG_PM_GENERIC_DOMAINS_OF) += cpuidle-psci-domain.o
+obj-$(CONFIG_ARM_PSCI_CPUIDLE)         += cpuidle-psci.o
+obj-$(CONFIG_ARM_PSCI_CPUIDLE_DOMAIN)  += cpuidle-psci-domain.o
 obj-$(CONFIG_ARM_TEGRA_CPUIDLE)                += cpuidle-tegra.o
 obj-$(CONFIG_ARM_QCOM_SPM_CPUIDLE)     += cpuidle-qcom-spm.o
 
index 423f03bbeb74a88bdbe72941528aad2c7c3b8f8c..b6e9649ab0da3f6c18c2a45e186d5b1c71e8f7b5 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/cpu.h>
 #include <linux/device.h>
 #include <linux/kernel.h>
+#include <linux/platform_device.h>
 #include <linux/pm_domain.h>
 #include <linux/pm_runtime.h>
 #include <linux/psci.h>
@@ -26,7 +27,7 @@ struct psci_pd_provider {
 };
 
 static LIST_HEAD(psci_pd_providers);
-static bool osi_mode_enabled __initdata;
+static bool psci_pd_allow_domain_state;
 
 static int psci_pd_power_off(struct generic_pm_domain *pd)
 {
@@ -36,6 +37,9 @@ static int psci_pd_power_off(struct generic_pm_domain *pd)
        if (!state->data)
                return 0;
 
+       if (!psci_pd_allow_domain_state)
+               return -EBUSY;
+
        /* OSI mode is enabled, set the corresponding domain state. */
        pd_state = state->data;
        psci_set_domain_state(*pd_state);
@@ -43,8 +47,8 @@ static int psci_pd_power_off(struct generic_pm_domain *pd)
        return 0;
 }
 
-static int __init psci_pd_parse_state_nodes(struct genpd_power_state *states,
-                                       int state_count)
+static int psci_pd_parse_state_nodes(struct genpd_power_state *states,
+                                    int state_count)
 {
        int i, ret;
        u32 psci_state, *psci_state_buf;
@@ -73,7 +77,7 @@ free_state:
        return ret;
 }
 
-static int __init psci_pd_parse_states(struct device_node *np,
+static int psci_pd_parse_states(struct device_node *np,
                        struct genpd_power_state **states, int *state_count)
 {
        int ret;
@@ -101,7 +105,7 @@ static void psci_pd_free_states(struct genpd_power_state *states,
        kfree(states);
 }
 
-static int __init psci_pd_init(struct device_node *np)
+static int psci_pd_init(struct device_node *np)
 {
        struct generic_pm_domain *pd;
        struct psci_pd_provider *pd_provider;
@@ -168,7 +172,7 @@ out:
        return ret;
 }
 
-static void __init psci_pd_remove(void)
+static void psci_pd_remove(void)
 {
        struct psci_pd_provider *pd_provider, *it;
        struct generic_pm_domain *genpd;
@@ -186,7 +190,7 @@ static void __init psci_pd_remove(void)
        }
 }
 
-static int __init psci_pd_init_topology(struct device_node *np, bool add)
+static int psci_pd_init_topology(struct device_node *np, bool add)
 {
        struct device_node *node;
        struct of_phandle_args child, parent;
@@ -212,24 +216,33 @@ static int __init psci_pd_init_topology(struct device_node *np, bool add)
        return 0;
 }
 
-static int __init psci_pd_add_topology(struct device_node *np)
+static int psci_pd_add_topology(struct device_node *np)
 {
        return psci_pd_init_topology(np, true);
 }
 
-static void __init psci_pd_remove_topology(struct device_node *np)
+static void psci_pd_remove_topology(struct device_node *np)
 {
        psci_pd_init_topology(np, false);
 }
 
-static const struct of_device_id psci_of_match[] __initconst = {
+static void psci_cpuidle_domain_sync_state(struct device *dev)
+{
+       /*
+        * All devices have now been attached/probed to the PM domain topology,
+        * hence it's fine to allow domain states to be picked.
+        */
+       psci_pd_allow_domain_state = true;
+}
+
+static const struct of_device_id psci_of_match[] = {
        { .compatible = "arm,psci-1.0" },
        {}
 };
 
-static int __init psci_idle_init_domains(void)
+static int psci_cpuidle_domain_probe(struct platform_device *pdev)
 {
-       struct device_node *np = of_find_matching_node(NULL, psci_of_match);
+       struct device_node *np = pdev->dev.of_node;
        struct device_node *node;
        int ret = 0, pd_count = 0;
 
@@ -238,7 +251,7 @@ static int __init psci_idle_init_domains(void)
 
        /* Currently limit the hierarchical topology to be used in OSI mode. */
        if (!psci_has_osi_support())
-               goto out;
+               return 0;
 
        /*
         * Parse child nodes for the "#power-domain-cells" property and
@@ -257,7 +270,7 @@ static int __init psci_idle_init_domains(void)
 
        /* Bail out if not using the hierarchical CPU topology. */
        if (!pd_count)
-               goto out;
+               return 0;
 
        /* Link genpd masters/subdomains to model the CPU topology. */
        ret = psci_pd_add_topology(np);
@@ -272,10 +285,8 @@ static int __init psci_idle_init_domains(void)
                goto remove_pd;
        }
 
-       osi_mode_enabled = true;
-       of_node_put(np);
        pr_info("Initialized CPU PM domain topology\n");
-       return pd_count;
+       return 0;
 
 put_node:
        of_node_put(node);
@@ -283,19 +294,28 @@ remove_pd:
        if (pd_count)
                psci_pd_remove();
        pr_err("failed to create CPU PM domains ret=%d\n", ret);
-out:
-       of_node_put(np);
        return ret;
 }
+
+static struct platform_driver psci_cpuidle_domain_driver = {
+       .probe  = psci_cpuidle_domain_probe,
+       .driver = {
+               .name = "psci-cpuidle-domain",
+               .of_match_table = psci_of_match,
+               .sync_state = psci_cpuidle_domain_sync_state,
+       },
+};
+
+static int __init psci_idle_init_domains(void)
+{
+       return platform_driver_register(&psci_cpuidle_domain_driver);
+}
 subsys_initcall(psci_idle_init_domains);
 
-struct device __init *psci_dt_attach_cpu(int cpu)
+struct device *psci_dt_attach_cpu(int cpu)
 {
        struct device *dev;
 
-       if (!osi_mode_enabled)
-               return NULL;
-
        dev = dev_pm_domain_attach_by_name(get_cpu_device(cpu), "psci");
        if (IS_ERR_OR_NULL(dev))
                return dev;
@@ -306,3 +326,11 @@ struct device __init *psci_dt_attach_cpu(int cpu)
 
        return dev;
 }
+
+void psci_dt_detach_cpu(struct device *dev)
+{
+       if (IS_ERR_OR_NULL(dev))
+               return;
+
+       dev_pm_domain_detach(dev, false);
+}
index 3806f911b61c05650517c199cca8ebf01c6be0a6..74463841805f2ecb8784699f08ae1a62475ceffb 100644 (file)
 #include <linux/module.h>
 #include <linux/of.h>
 #include <linux/of_device.h>
+#include <linux/platform_device.h>
 #include <linux/psci.h>
 #include <linux/pm_runtime.h>
 #include <linux/slab.h>
+#include <linux/string.h>
 
 #include <asm/cpuidle.h>
 
@@ -33,7 +35,7 @@ struct psci_cpuidle_data {
 
 static DEFINE_PER_CPU_READ_MOSTLY(struct psci_cpuidle_data, psci_cpuidle_data);
 static DEFINE_PER_CPU(u32, domain_state);
-static bool psci_cpuidle_use_cpuhp __initdata;
+static bool psci_cpuidle_use_cpuhp;
 
 void psci_set_domain_state(u32 state)
 {
@@ -104,7 +106,7 @@ static int psci_idle_cpuhp_down(unsigned int cpu)
        return 0;
 }
 
-static void __init psci_idle_init_cpuhp(void)
+static void psci_idle_init_cpuhp(void)
 {
        int err;
 
@@ -127,30 +129,13 @@ static int psci_enter_idle_state(struct cpuidle_device *dev,
        return psci_enter_state(idx, state[idx]);
 }
 
-static struct cpuidle_driver psci_idle_driver __initdata = {
-       .name = "psci_idle",
-       .owner = THIS_MODULE,
-       /*
-        * PSCI idle states relies on architectural WFI to
-        * be represented as state index 0.
-        */
-       .states[0] = {
-               .enter                  = psci_enter_idle_state,
-               .exit_latency           = 1,
-               .target_residency       = 1,
-               .power_usage            = UINT_MAX,
-               .name                   = "WFI",
-               .desc                   = "ARM WFI",
-       }
-};
-
-static const struct of_device_id psci_idle_state_match[] __initconst = {
+static const struct of_device_id psci_idle_state_match[] = {
        { .compatible = "arm,idle-state",
          .data = psci_enter_idle_state },
        { },
 };
 
-int __init psci_dt_parse_state_node(struct device_node *np, u32 *state)
+int psci_dt_parse_state_node(struct device_node *np, u32 *state)
 {
        int err = of_property_read_u32(np, "arm,psci-suspend-param", state);
 
@@ -167,9 +152,9 @@ int __init psci_dt_parse_state_node(struct device_node *np, u32 *state)
        return 0;
 }
 
-static int __init psci_dt_cpu_init_topology(struct cpuidle_driver *drv,
-                                           struct psci_cpuidle_data *data,
-                                           unsigned int state_count, int cpu)
+static int psci_dt_cpu_init_topology(struct cpuidle_driver *drv,
+                                    struct psci_cpuidle_data *data,
+                                    unsigned int state_count, int cpu)
 {
        /* Currently limit the hierarchical topology to be used in OSI mode. */
        if (!psci_has_osi_support())
@@ -190,9 +175,9 @@ static int __init psci_dt_cpu_init_topology(struct cpuidle_driver *drv,
        return 0;
 }
 
-static int __init psci_dt_cpu_init_idle(struct cpuidle_driver *drv,
-                                       struct device_node *cpu_node,
-                                       unsigned int state_count, int cpu)
+static int psci_dt_cpu_init_idle(struct device *dev, struct cpuidle_driver *drv,
+                                struct device_node *cpu_node,
+                                unsigned int state_count, int cpu)
 {
        int i, ret = 0;
        u32 *psci_states;
@@ -200,7 +185,8 @@ static int __init psci_dt_cpu_init_idle(struct cpuidle_driver *drv,
        struct psci_cpuidle_data *data = per_cpu_ptr(&psci_cpuidle_data, cpu);
 
        state_count++; /* Add WFI state too */
-       psci_states = kcalloc(state_count, sizeof(*psci_states), GFP_KERNEL);
+       psci_states = devm_kcalloc(dev, state_count, sizeof(*psci_states),
+                                  GFP_KERNEL);
        if (!psci_states)
                return -ENOMEM;
 
@@ -213,32 +199,26 @@ static int __init psci_dt_cpu_init_idle(struct cpuidle_driver *drv,
                of_node_put(state_node);
 
                if (ret)
-                       goto free_mem;
+                       return ret;
 
                pr_debug("psci-power-state %#x index %d\n", psci_states[i], i);
        }
 
-       if (i != state_count) {
-               ret = -ENODEV;
-               goto free_mem;
-       }
+       if (i != state_count)
+               return -ENODEV;
 
        /* Initialize optional data, used for the hierarchical topology. */
        ret = psci_dt_cpu_init_topology(drv, data, state_count, cpu);
        if (ret < 0)
-               goto free_mem;
+               return ret;
 
        /* Idle states parsed correctly, store them in the per-cpu struct. */
        data->psci_states = psci_states;
        return 0;
-
-free_mem:
-       kfree(psci_states);
-       return ret;
 }
 
-static __init int psci_cpu_init_idle(struct cpuidle_driver *drv,
-                                    unsigned int cpu, unsigned int state_count)
+static int psci_cpu_init_idle(struct device *dev, struct cpuidle_driver *drv,
+                             unsigned int cpu, unsigned int state_count)
 {
        struct device_node *cpu_node;
        int ret;
@@ -254,14 +234,22 @@ static __init int psci_cpu_init_idle(struct cpuidle_driver *drv,
        if (!cpu_node)
                return -ENODEV;
 
-       ret = psci_dt_cpu_init_idle(drv, cpu_node, state_count, cpu);
+       ret = psci_dt_cpu_init_idle(dev, drv, cpu_node, state_count, cpu);
 
        of_node_put(cpu_node);
 
        return ret;
 }
 
-static int __init psci_idle_init_cpu(int cpu)
+static void psci_cpu_deinit_idle(int cpu)
+{
+       struct psci_cpuidle_data *data = per_cpu_ptr(&psci_cpuidle_data, cpu);
+
+       psci_dt_detach_cpu(data->dev);
+       psci_cpuidle_use_cpuhp = false;
+}
+
+static int psci_idle_init_cpu(struct device *dev, int cpu)
 {
        struct cpuidle_driver *drv;
        struct device_node *cpu_node;
@@ -284,17 +272,26 @@ static int __init psci_idle_init_cpu(int cpu)
        if (ret)
                return ret;
 
-       drv = kmemdup(&psci_idle_driver, sizeof(*drv), GFP_KERNEL);
+       drv = devm_kzalloc(dev, sizeof(*drv), GFP_KERNEL);
        if (!drv)
                return -ENOMEM;
 
+       drv->name = "psci_idle";
+       drv->owner = THIS_MODULE;
        drv->cpumask = (struct cpumask *)cpumask_of(cpu);
 
        /*
-        * Initialize idle states data, starting at index 1, since
-        * by default idle state 0 is the quiescent state reached
-        * by the cpu by executing the wfi instruction.
-        *
+        * PSCI idle states relies on architectural WFI to be represented as
+        * state index 0.
+        */
+       drv->states[0].enter = psci_enter_idle_state;
+       drv->states[0].exit_latency = 1;
+       drv->states[0].target_residency = 1;
+       drv->states[0].power_usage = UINT_MAX;
+       strcpy(drv->states[0].name, "WFI");
+       strcpy(drv->states[0].desc, "ARM WFI");
+
+       /*
         * If no DT idle states are detected (ret == 0) let the driver
         * initialization fail accordingly since there is no reason to
         * initialize the idle driver if only wfi is supported, the
@@ -302,48 +299,45 @@ static int __init psci_idle_init_cpu(int cpu)
         * on idle entry.
         */
        ret = dt_init_idle_driver(drv, psci_idle_state_match, 1);
-       if (ret <= 0) {
-               ret = ret ? : -ENODEV;
-               goto out_kfree_drv;
-       }
+       if (ret <= 0)
+               return ret ? : -ENODEV;
 
        /*
         * Initialize PSCI idle states.
         */
-       ret = psci_cpu_init_idle(drv, cpu, ret);
+       ret = psci_cpu_init_idle(dev, drv, cpu, ret);
        if (ret) {
                pr_err("CPU %d failed to PSCI idle\n", cpu);
-               goto out_kfree_drv;
+               return ret;
        }
 
        ret = cpuidle_register(drv, NULL);
        if (ret)
-               goto out_kfree_drv;
+               goto deinit;
 
        cpuidle_cooling_register(drv);
 
        return 0;
-
-out_kfree_drv:
-       kfree(drv);
+deinit:
+       psci_cpu_deinit_idle(cpu);
        return ret;
 }
 
 /*
- * psci_idle_init - Initializes PSCI cpuidle driver
+ * psci_idle_probe - Initializes PSCI cpuidle driver
  *
  * Initializes PSCI cpuidle driver for all CPUs, if any CPU fails
  * to register cpuidle driver then rollback to cancel all CPUs
  * registration.
  */
-static int __init psci_idle_init(void)
+static int psci_cpuidle_probe(struct platform_device *pdev)
 {
        int cpu, ret;
        struct cpuidle_driver *drv;
        struct cpuidle_device *dev;
 
        for_each_possible_cpu(cpu) {
-               ret = psci_idle_init_cpu(cpu);
+               ret = psci_idle_init_cpu(&pdev->dev, cpu);
                if (ret)
                        goto out_fail;
        }
@@ -356,9 +350,34 @@ out_fail:
                dev = per_cpu(cpuidle_devices, cpu);
                drv = cpuidle_get_cpu_driver(dev);
                cpuidle_unregister(drv);
-               kfree(drv);
+               psci_cpu_deinit_idle(cpu);
        }
 
        return ret;
 }
+
+static struct platform_driver psci_cpuidle_driver = {
+       .probe = psci_cpuidle_probe,
+       .driver = {
+               .name = "psci-cpuidle",
+       },
+};
+
+static int __init psci_idle_init(void)
+{
+       struct platform_device *pdev;
+       int ret;
+
+       ret = platform_driver_register(&psci_cpuidle_driver);
+       if (ret)
+               return ret;
+
+       pdev = platform_device_register_simple("psci-cpuidle", -1, NULL, 0);
+       if (IS_ERR(pdev)) {
+               platform_driver_unregister(&psci_cpuidle_driver);
+               return PTR_ERR(pdev);
+       }
+
+       return 0;
+}
 device_initcall(psci_idle_init);
index 7299a04dd4672699c2ef3fed2f76c5f7cfe4210f..d8e925e84c27af3d400f6f972d4ffe24dcf0c54b 100644 (file)
@@ -3,15 +3,18 @@
 #ifndef __CPUIDLE_PSCI_H
 #define __CPUIDLE_PSCI_H
 
+struct device;
 struct device_node;
 
 void psci_set_domain_state(u32 state);
-int __init psci_dt_parse_state_node(struct device_node *np, u32 *state);
+int psci_dt_parse_state_node(struct device_node *np, u32 *state);
 
-#ifdef CONFIG_PM_GENERIC_DOMAINS_OF
-struct device __init *psci_dt_attach_cpu(int cpu);
+#ifdef CONFIG_ARM_PSCI_CPUIDLE_DOMAIN
+struct device *psci_dt_attach_cpu(int cpu);
+void psci_dt_detach_cpu(struct device *dev);
 #else
-static inline struct device __init *psci_dt_attach_cpu(int cpu) { return NULL; }
+static inline struct device *psci_dt_attach_cpu(int cpu) { return NULL; }
+static inline void psci_dt_detach_cpu(struct device *dev) { }
 #endif
 
 #endif /* __CPUIDLE_PSCI_H */
index 150045849d78206850eaf1ada23d4001c76750c2..a12fb141875a797c0cf1defc46da46f9c7fbce6f 100644 (file)
@@ -253,11 +253,13 @@ static int tegra_cpuidle_enter(struct cpuidle_device *dev,
        return err ? -1 : index;
 }
 
-static void tegra114_enter_s2idle(struct cpuidle_device *dev,
-                                 struct cpuidle_driver *drv,
-                                 int index)
+static int tegra114_enter_s2idle(struct cpuidle_device *dev,
+                                struct cpuidle_driver *drv,
+                                int index)
 {
        tegra_cpuidle_enter(dev, drv, index);
+
+       return 0;
 }
 
 /*
index 8c31b0f2e28f7fdf1faa59a95d048d4567a4e8d6..56efbeb7851e7a1ee5816f3345896648701f733d 100644 (file)
@@ -293,7 +293,7 @@ static void devfreq_event_release_edev(struct device *dev)
 /**
  * devfreq_event_add_edev() - Add new devfreq-event device.
  * @dev                : the device owning the devfreq-event device being created
- * @desc       : the devfreq-event device's decriptor which include essential
+ * @desc       : the devfreq-event device's descriptor which include essential
  *               data for devfreq-event device.
  *
  * Note that this function add new devfreq-event device to devfreq-event class
@@ -385,7 +385,7 @@ static void devm_devfreq_event_release(struct device *dev, void *res)
 /**
  * devm_devfreq_event_add_edev() - Resource-managed devfreq_event_add_edev()
  * @dev                : the device owning the devfreq-event device being created
- * @desc       : the devfreq-event device's decriptor which include essential
+ * @desc       : the devfreq-event device's descriptor which include essential
  *               data for devfreq-event device.
  *
  * Note that this function manages automatically the memory of devfreq-event
index 52b9c3e141f37d882b9913e75db90b7f776f5096..561d91b2d3bf577f3fb6fd34ced8fc7fc8e2ef7c 100644 (file)
@@ -49,6 +49,11 @@ static LIST_HEAD(devfreq_governor_list);
 static LIST_HEAD(devfreq_list);
 static DEFINE_MUTEX(devfreq_list_lock);
 
+static const char timer_name[][DEVFREQ_NAME_LEN] = {
+       [DEVFREQ_TIMER_DEFERRABLE] = { "deferrable" },
+       [DEVFREQ_TIMER_DELAYED] = { "delayed" },
+};
+
 /**
  * find_device_devfreq() - find devfreq struct using device pointer
  * @dev:       device pointer used to lookup device devfreq.
@@ -454,7 +459,17 @@ void devfreq_monitor_start(struct devfreq *devfreq)
        if (devfreq->governor->interrupt_driven)
                return;
 
-       INIT_DEFERRABLE_WORK(&devfreq->work, devfreq_monitor);
+       switch (devfreq->profile->timer) {
+       case DEVFREQ_TIMER_DEFERRABLE:
+               INIT_DEFERRABLE_WORK(&devfreq->work, devfreq_monitor);
+               break;
+       case DEVFREQ_TIMER_DELAYED:
+               INIT_DELAYED_WORK(&devfreq->work, devfreq_monitor);
+               break;
+       default:
+               return;
+       }
+
        if (devfreq->profile->polling_ms)
                queue_delayed_work(devfreq_wq, &devfreq->work,
                        msecs_to_jiffies(devfreq->profile->polling_ms));
@@ -771,6 +786,11 @@ struct devfreq *devfreq_add_device(struct device *dev,
        devfreq->data = data;
        devfreq->nb.notifier_call = devfreq_notifier_call;
 
+       if (devfreq->profile->timer < 0
+               || devfreq->profile->timer >= DEVFREQ_TIMER_NUM) {
+               goto err_out;
+       }
+
        if (!devfreq->profile->max_state && !devfreq->profile->freq_table) {
                mutex_unlock(&devfreq->lock);
                err = set_freq_table(devfreq);
@@ -1260,18 +1280,20 @@ EXPORT_SYMBOL(devfreq_remove_governor);
 static ssize_t name_show(struct device *dev,
                        struct device_attribute *attr, char *buf)
 {
-       struct devfreq *devfreq = to_devfreq(dev);
-       return sprintf(buf, "%s\n", dev_name(devfreq->dev.parent));
+       struct devfreq *df = to_devfreq(dev);
+       return sprintf(buf, "%s\n", dev_name(df->dev.parent));
 }
 static DEVICE_ATTR_RO(name);
 
 static ssize_t governor_show(struct device *dev,
                             struct device_attribute *attr, char *buf)
 {
-       if (!to_devfreq(dev)->governor)
+       struct devfreq *df = to_devfreq(dev);
+
+       if (!df->governor)
                return -EINVAL;
 
-       return sprintf(buf, "%s\n", to_devfreq(dev)->governor->name);
+       return sprintf(buf, "%s\n", df->governor->name);
 }
 
 static ssize_t governor_store(struct device *dev, struct device_attribute *attr,
@@ -1282,6 +1304,9 @@ static ssize_t governor_store(struct device *dev, struct device_attribute *attr,
        char str_governor[DEVFREQ_NAME_LEN + 1];
        const struct devfreq_governor *governor, *prev_governor;
 
+       if (!df->governor)
+               return -EINVAL;
+
        ret = sscanf(buf, "%" __stringify(DEVFREQ_NAME_LEN) "s", str_governor);
        if (ret != 1)
                return -EINVAL;
@@ -1295,20 +1320,18 @@ static ssize_t governor_store(struct device *dev, struct device_attribute *attr,
        if (df->governor == governor) {
                ret = 0;
                goto out;
-       } else if ((df->governor && df->governor->immutable) ||
-                                       governor->immutable) {
+       } else if (df->governor->immutable || governor->immutable) {
                ret = -EINVAL;
                goto out;
        }
 
-       if (df->governor) {
-               ret = df->governor->event_handler(df, DEVFREQ_GOV_STOP, NULL);
-               if (ret) {
-                       dev_warn(dev, "%s: Governor %s not stopped(%d)\n",
-                                __func__, df->governor->name, ret);
-                       goto out;
-               }
+       ret = df->governor->event_handler(df, DEVFREQ_GOV_STOP, NULL);
+       if (ret) {
+               dev_warn(dev, "%s: Governor %s not stopped(%d)\n",
+                        __func__, df->governor->name, ret);
+               goto out;
        }
+
        prev_governor = df->governor;
        df->governor = governor;
        strncpy(df->governor_name, governor->name, DEVFREQ_NAME_LEN);
@@ -1343,13 +1366,16 @@ static ssize_t available_governors_show(struct device *d,
        struct devfreq *df = to_devfreq(d);
        ssize_t count = 0;
 
+       if (!df->governor)
+               return -EINVAL;
+
        mutex_lock(&devfreq_list_lock);
 
        /*
         * The devfreq with immutable governor (e.g., passive) shows
         * only own governor.
         */
-       if (df->governor && df->governor->immutable) {
+       if (df->governor->immutable) {
                count = scnprintf(&buf[count], DEVFREQ_NAME_LEN,
                                  "%s ", df->governor_name);
        /*
@@ -1383,27 +1409,37 @@ static ssize_t cur_freq_show(struct device *dev, struct device_attribute *attr,
                             char *buf)
 {
        unsigned long freq;
-       struct devfreq *devfreq = to_devfreq(dev);
+       struct devfreq *df = to_devfreq(dev);
 
-       if (devfreq->profile->get_cur_freq &&
-               !devfreq->profile->get_cur_freq(devfreq->dev.parent, &freq))
+       if (!df->profile)
+               return -EINVAL;
+
+       if (df->profile->get_cur_freq &&
+               !df->profile->get_cur_freq(df->dev.parent, &freq))
                return sprintf(buf, "%lu\n", freq);
 
-       return sprintf(buf, "%lu\n", devfreq->previous_freq);
+       return sprintf(buf, "%lu\n", df->previous_freq);
 }
 static DEVICE_ATTR_RO(cur_freq);
 
 static ssize_t target_freq_show(struct device *dev,
                                struct device_attribute *attr, char *buf)
 {
-       return sprintf(buf, "%lu\n", to_devfreq(dev)->previous_freq);
+       struct devfreq *df = to_devfreq(dev);
+
+       return sprintf(buf, "%lu\n", df->previous_freq);
 }
 static DEVICE_ATTR_RO(target_freq);
 
 static ssize_t polling_interval_show(struct device *dev,
                                     struct device_attribute *attr, char *buf)
 {
-       return sprintf(buf, "%d\n", to_devfreq(dev)->profile->polling_ms);
+       struct devfreq *df = to_devfreq(dev);
+
+       if (!df->profile)
+               return -EINVAL;
+
+       return sprintf(buf, "%d\n", df->profile->polling_ms);
 }
 
 static ssize_t polling_interval_store(struct device *dev,
@@ -1531,6 +1567,9 @@ static ssize_t available_frequencies_show(struct device *d,
        ssize_t count = 0;
        int i;
 
+       if (!df->profile)
+               return -EINVAL;
+
        mutex_lock(&df->lock);
 
        for (i = 0; i < df->profile->max_state; i++)
@@ -1551,49 +1590,53 @@ static DEVICE_ATTR_RO(available_frequencies);
 static ssize_t trans_stat_show(struct device *dev,
                               struct device_attribute *attr, char *buf)
 {
-       struct devfreq *devfreq = to_devfreq(dev);
+       struct devfreq *df = to_devfreq(dev);
        ssize_t len;
        int i, j;
-       unsigned int max_state = devfreq->profile->max_state;
+       unsigned int max_state;
+
+       if (!df->profile)
+               return -EINVAL;
+       max_state = df->profile->max_state;
 
        if (max_state == 0)
                return sprintf(buf, "Not Supported.\n");
 
-       mutex_lock(&devfreq->lock);
-       if (!devfreq->stop_polling &&
-                       devfreq_update_status(devfreq, devfreq->previous_freq)) {
-               mutex_unlock(&devfreq->lock);
+       mutex_lock(&df->lock);
+       if (!df->stop_polling &&
+                       devfreq_update_status(df, df->previous_freq)) {
+               mutex_unlock(&df->lock);
                return 0;
        }
-       mutex_unlock(&devfreq->lock);
+       mutex_unlock(&df->lock);
 
        len = sprintf(buf, "     From  :   To\n");
        len += sprintf(buf + len, "           :");
        for (i = 0; i < max_state; i++)
                len += sprintf(buf + len, "%10lu",
-                               devfreq->profile->freq_table[i]);
+                               df->profile->freq_table[i]);
 
        len += sprintf(buf + len, "   time(ms)\n");
 
        for (i = 0; i < max_state; i++) {
-               if (devfreq->profile->freq_table[i]
-                                       == devfreq->previous_freq) {
+               if (df->profile->freq_table[i]
+                                       == df->previous_freq) {
                        len += sprintf(buf + len, "*");
                } else {
                        len += sprintf(buf + len, " ");
                }
                len += sprintf(buf + len, "%10lu:",
-                               devfreq->profile->freq_table[i]);
+                               df->profile->freq_table[i]);
                for (j = 0; j < max_state; j++)
                        len += sprintf(buf + len, "%10u",
-                               devfreq->stats.trans_table[(i * max_state) + j]);
+                               df->stats.trans_table[(i * max_state) + j]);
 
                len += sprintf(buf + len, "%10llu\n", (u64)
-                       jiffies64_to_msecs(devfreq->stats.time_in_state[i]));
+                       jiffies64_to_msecs(df->stats.time_in_state[i]));
        }
 
        len += sprintf(buf + len, "Total transition : %u\n",
-                                       devfreq->stats.total_trans);
+                                       df->stats.total_trans);
        return len;
 }
 
@@ -1604,6 +1647,9 @@ static ssize_t trans_stat_store(struct device *dev,
        struct devfreq *df = to_devfreq(dev);
        int err, value;
 
+       if (!df->profile)
+               return -EINVAL;
+
        if (df->profile->max_state == 0)
                return count;
 
@@ -1625,6 +1671,69 @@ static ssize_t trans_stat_store(struct device *dev,
 }
 static DEVICE_ATTR_RW(trans_stat);
 
+static ssize_t timer_show(struct device *dev,
+                            struct device_attribute *attr, char *buf)
+{
+       struct devfreq *df = to_devfreq(dev);
+
+       if (!df->profile)
+               return -EINVAL;
+
+       return sprintf(buf, "%s\n", timer_name[df->profile->timer]);
+}
+
+static ssize_t timer_store(struct device *dev, struct device_attribute *attr,
+                             const char *buf, size_t count)
+{
+       struct devfreq *df = to_devfreq(dev);
+       char str_timer[DEVFREQ_NAME_LEN + 1];
+       int timer = -1;
+       int ret = 0, i;
+
+       if (!df->governor || !df->profile)
+               return -EINVAL;
+
+       ret = sscanf(buf, "%16s", str_timer);
+       if (ret != 1)
+               return -EINVAL;
+
+       for (i = 0; i < DEVFREQ_TIMER_NUM; i++) {
+               if (!strncmp(timer_name[i], str_timer, DEVFREQ_NAME_LEN)) {
+                       timer = i;
+                       break;
+               }
+       }
+
+       if (timer < 0) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       if (df->profile->timer == timer) {
+               ret = 0;
+               goto out;
+       }
+
+       mutex_lock(&df->lock);
+       df->profile->timer = timer;
+       mutex_unlock(&df->lock);
+
+       ret = df->governor->event_handler(df, DEVFREQ_GOV_STOP, NULL);
+       if (ret) {
+               dev_warn(dev, "%s: Governor %s not stopped(%d)\n",
+                        __func__, df->governor->name, ret);
+               goto out;
+       }
+
+       ret = df->governor->event_handler(df, DEVFREQ_GOV_START, NULL);
+       if (ret)
+               dev_warn(dev, "%s: Governor %s not started(%d)\n",
+                        __func__, df->governor->name, ret);
+out:
+       return ret ? ret : count;
+}
+static DEVICE_ATTR_RW(timer);
+
 static struct attribute *devfreq_attrs[] = {
        &dev_attr_name.attr,
        &dev_attr_governor.attr,
@@ -1636,6 +1745,7 @@ static struct attribute *devfreq_attrs[] = {
        &dev_attr_min_freq.attr,
        &dev_attr_max_freq.attr,
        &dev_attr_trans_stat.attr,
+       &dev_attr_timer.attr,
        NULL,
 };
 ATTRIBUTE_GROUPS(devfreq);
@@ -1657,8 +1767,7 @@ static int devfreq_summary_show(struct seq_file *s, void *data)
        unsigned long cur_freq, min_freq, max_freq;
        unsigned int polling_ms;
 
-       seq_printf(s, "%-30s %-10s %-10s %-15s %10s %12s %12s %12s\n",
-                       "dev_name",
+       seq_printf(s, "%-30s %-30s %-15s %10s %12s %12s %12s\n",
                        "dev",
                        "parent_dev",
                        "governor",
@@ -1666,10 +1775,9 @@ static int devfreq_summary_show(struct seq_file *s, void *data)
                        "cur_freq_Hz",
                        "min_freq_Hz",
                        "max_freq_Hz");
-       seq_printf(s, "%30s %10s %10s %15s %10s %12s %12s %12s\n",
+       seq_printf(s, "%30s %30s %15s %10s %12s %12s %12s\n",
+                       "------------------------------",
                        "------------------------------",
-                       "----------",
-                       "----------",
                        "---------------",
                        "----------",
                        "------------",
@@ -1692,14 +1800,13 @@ static int devfreq_summary_show(struct seq_file *s, void *data)
 #endif
 
                mutex_lock(&devfreq->lock);
-               cur_freq = devfreq->previous_freq,
+               cur_freq = devfreq->previous_freq;
                get_freq_range(devfreq, &min_freq, &max_freq);
-               polling_ms = devfreq->profile->polling_ms,
+               polling_ms = devfreq->profile->polling_ms;
                mutex_unlock(&devfreq->lock);
 
                seq_printf(s,
-                       "%-30s %-10s %-10s %-15s %10d %12ld %12ld %12ld\n",
-                       dev_name(devfreq->dev.parent),
+                       "%-30s %-30s %-15s %10d %12ld %12ld %12ld\n",
                        dev_name(&devfreq->dev),
                        p_devfreq ? dev_name(&p_devfreq->dev) : "null",
                        devfreq->governor_name,
index 24f04f78285b7dd9ade185b07a8a26be96cf9d3e..027769e39f9b86211a8bfdfbd8a32dda3e9717e2 100644 (file)
@@ -95,18 +95,20 @@ static int rk3399_dmcfreq_target(struct device *dev, unsigned long *freq,
 
        mutex_lock(&dmcfreq->lock);
 
-       if (target_rate >= dmcfreq->odt_dis_freq)
-               odt_enable = true;
-
-       /*
-        * This makes a SMC call to the TF-A to set the DDR PD (power-down)
-        * timings and to enable or disable the ODT (on-die termination)
-        * resistors.
-        */
-       arm_smccc_smc(ROCKCHIP_SIP_DRAM_FREQ, dmcfreq->odt_pd_arg0,
-                     dmcfreq->odt_pd_arg1,
-                     ROCKCHIP_SIP_CONFIG_DRAM_SET_ODT_PD,
-                     odt_enable, 0, 0, 0, &res);
+       if (dmcfreq->regmap_pmu) {
+               if (target_rate >= dmcfreq->odt_dis_freq)
+                       odt_enable = true;
+
+               /*
+                * This makes a SMC call to the TF-A to set the DDR PD
+                * (power-down) timings and to enable or disable the
+                * ODT (on-die termination) resistors.
+                */
+               arm_smccc_smc(ROCKCHIP_SIP_DRAM_FREQ, dmcfreq->odt_pd_arg0,
+                             dmcfreq->odt_pd_arg1,
+                             ROCKCHIP_SIP_CONFIG_DRAM_SET_ODT_PD,
+                             odt_enable, 0, 0, 0, &res);
+       }
 
        /*
         * If frequency scaling from low to high, adjust voltage first.
@@ -371,13 +373,14 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev)
        }
 
        node = of_parse_phandle(np, "rockchip,pmu", 0);
-       if (node) {
-               data->regmap_pmu = syscon_node_to_regmap(node);
-               of_node_put(node);
-               if (IS_ERR(data->regmap_pmu)) {
-                       ret = PTR_ERR(data->regmap_pmu);
-                       goto err_edev;
-               }
+       if (!node)
+               goto no_pmu;
+
+       data->regmap_pmu = syscon_node_to_regmap(node);
+       of_node_put(node);
+       if (IS_ERR(data->regmap_pmu)) {
+               ret = PTR_ERR(data->regmap_pmu);
+               goto err_edev;
        }
 
        regmap_read(data->regmap_pmu, RK3399_PMUGRF_OS_REG2, &val);
@@ -399,6 +402,7 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev)
                goto err_edev;
        };
 
+no_pmu:
        arm_smccc_smc(ROCKCHIP_SIP_DRAM_FREQ, 0, 0,
                      ROCKCHIP_SIP_CONFIG_DRAM_INIT,
                      0, 0, 0, 0, &res);
index f4495841bf68052ead29c40ac0b83092655b1428..fd0fa9e7900b4c4a7781bfe9ac8af7fc41221ce6 100644 (file)
@@ -66,8 +66,6 @@ static struct cpuidle_device __percpu *intel_idle_cpuidle_devices;
 static unsigned long auto_demotion_disable_flags;
 static bool disable_promotion_to_c1e;
 
-static bool lapic_timer_always_reliable;
-
 struct idle_cpu {
        struct cpuidle_state *state_table;
 
@@ -142,7 +140,7 @@ static __cpuidle int intel_idle(struct cpuidle_device *dev,
        if (state->flags & CPUIDLE_FLAG_TLB_FLUSHED)
                leave_mm(cpu);
 
-       if (!static_cpu_has(X86_FEATURE_ARAT) && !lapic_timer_always_reliable) {
+       if (!static_cpu_has(X86_FEATURE_ARAT)) {
                /*
                 * Switch over to one-shot tick broadcast if the target C-state
                 * is deeper than C1.
@@ -175,13 +173,15 @@ static __cpuidle int intel_idle(struct cpuidle_device *dev,
  * Invoked as a suspend-to-idle callback routine with frozen user space, frozen
  * scheduler tick and suspended scheduler clock on the target CPU.
  */
-static __cpuidle void intel_idle_s2idle(struct cpuidle_device *dev,
-                                       struct cpuidle_driver *drv, int index)
+static __cpuidle int intel_idle_s2idle(struct cpuidle_device *dev,
+                                      struct cpuidle_driver *drv, int index)
 {
        unsigned long eax = flg2MWAIT(drv->states[index].flags);
        unsigned long ecx = 1; /* break on interrupt flag */
 
        mwait_idle_with_hints(eax, ecx);
+
+       return 0;
 }
 
 /*
@@ -752,6 +752,35 @@ static struct cpuidle_state skx_cstates[] __initdata = {
                .enter = NULL }
 };
 
+static struct cpuidle_state icx_cstates[] __initdata = {
+       {
+               .name = "C1",
+               .desc = "MWAIT 0x00",
+               .flags = MWAIT2flg(0x00),
+               .exit_latency = 1,
+               .target_residency = 1,
+               .enter = &intel_idle,
+               .enter_s2idle = intel_idle_s2idle, },
+       {
+               .name = "C1E",
+               .desc = "MWAIT 0x01",
+               .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
+               .exit_latency = 4,
+               .target_residency = 4,
+               .enter = &intel_idle,
+               .enter_s2idle = intel_idle_s2idle, },
+       {
+               .name = "C6",
+               .desc = "MWAIT 0x20",
+               .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
+               .exit_latency = 128,
+               .target_residency = 384,
+               .enter = &intel_idle,
+               .enter_s2idle = intel_idle_s2idle, },
+       {
+               .enter = NULL }
+};
+
 static struct cpuidle_state atom_cstates[] __initdata = {
        {
                .name = "C1E",
@@ -1056,6 +1085,12 @@ static const struct idle_cpu idle_cpu_skx __initconst = {
        .use_acpi = true,
 };
 
+static const struct idle_cpu idle_cpu_icx __initconst = {
+       .state_table = icx_cstates,
+       .disable_promotion_to_c1e = true,
+       .use_acpi = true,
+};
+
 static const struct idle_cpu idle_cpu_avn __initconst = {
        .state_table = avn_cstates,
        .disable_promotion_to_c1e = true,
@@ -1110,6 +1145,7 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = {
        X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE_L,          &idle_cpu_skl),
        X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE,            &idle_cpu_skl),
        X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X,           &idle_cpu_skx),
+       X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X,           &idle_cpu_icx),
        X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNL,        &idle_cpu_knl),
        X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNM,        &idle_cpu_knl),
        X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT,       &idle_cpu_bxt),
@@ -1562,7 +1598,7 @@ static int intel_idle_cpu_online(unsigned int cpu)
 {
        struct cpuidle_device *dev;
 
-       if (!lapic_timer_always_reliable)
+       if (!boot_cpu_has(X86_FEATURE_ARAT))
                tick_broadcast_enable();
 
        /*
@@ -1655,16 +1691,13 @@ static int __init intel_idle_init(void)
                goto init_driver_fail;
        }
 
-       if (boot_cpu_has(X86_FEATURE_ARAT))     /* Always Reliable APIC Timer */
-               lapic_timer_always_reliable = true;
-
        retval = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "idle/intel:online",
                                   intel_idle_cpu_online, NULL);
        if (retval < 0)
                goto hp_setup_fail;
 
        pr_debug("Local APIC timer is reliable in %s\n",
-                lapic_timer_always_reliable ? "all C-states" : "C1");
+                boot_cpu_has(X86_FEATURE_ARAT) ? "all C-states" : "C1");
 
        return 0;
 
index 25196d6268e21d5d6d688619b84a5fa34754c7a9..53bfe6b7b703a2f7329d5f971b83ac32c7482479 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/io.h>
 #include <linux/mfd/syscon.h>
 #include <linux/module.h>
+#include <linux/moduleparam.h>
 #include <linux/of_device.h>
 #include <linux/pm_opp.h>
 #include <linux/platform_device.h>
 #include "../jedec_ddr.h"
 #include "../of_memory.h"
 
+static int irqmode;
+module_param(irqmode, int, 0644);
+MODULE_PARM_DESC(irqmode, "Enable IRQ mode (0=off [default], 1=on)");
+
 #define EXYNOS5_DREXI_TIMINGAREF               (0x0030)
 #define EXYNOS5_DREXI_TIMINGROW0               (0x0034)
 #define EXYNOS5_DREXI_TIMINGDATA0              (0x0038)
@@ -945,6 +950,7 @@ static int exynos5_dmc_get_cur_freq(struct device *dev, unsigned long *freq)
  * It provides to the devfreq framework needed functions and polling period.
  */
 static struct devfreq_dev_profile exynos5_dmc_df_profile = {
+       .timer = DEVFREQ_TIMER_DELAYED,
        .target = exynos5_dmc_target,
        .get_dev_status = exynos5_dmc_get_status,
        .get_cur_freq = exynos5_dmc_get_cur_freq,
@@ -1427,7 +1433,7 @@ static int exynos5_dmc_probe(struct platform_device *pdev)
        /* There is two modes in which the driver works: polling or IRQ */
        irq[0] = platform_get_irq_byname(pdev, "drex_0");
        irq[1] = platform_get_irq_byname(pdev, "drex_1");
-       if (irq[0] > 0 && irq[1] > 0) {
+       if (irq[0] > 0 && irq[1] > 0 && irqmode) {
                ret = devm_request_threaded_irq(dev, irq[0], NULL,
                                                dmc_irq_thread, IRQF_ONESHOT,
                                                dev_name(dev), dmc);
@@ -1465,10 +1471,10 @@ static int exynos5_dmc_probe(struct platform_device *pdev)
                 * Setup default thresholds for the devfreq governor.
                 * The values are chosen based on experiments.
                 */
-               dmc->gov_data.upthreshold = 30;
+               dmc->gov_data.upthreshold = 10;
                dmc->gov_data.downdifferential = 5;
 
-               exynos5_dmc_df_profile.polling_ms = 500;
+               exynos5_dmc_df_profile.polling_ms = 100;
        }
 
 
@@ -1484,7 +1490,7 @@ static int exynos5_dmc_probe(struct platform_device *pdev)
        if (dmc->in_irq_mode)
                exynos5_dmc_start_perf_events(dmc, PERF_COUNTER_START_VALUE);
 
-       dev_info(dev, "DMC initialized\n");
+       dev_info(dev, "DMC initialized, in irq mode: %d\n", dmc->in_irq_mode);
 
        return 0;
 
index cba7a6fcd178cc4591d2a5715aa715b2b98ddf60..447552ac25c4a34386e815390d7fac66b418d99a 100644 (file)
@@ -1108,24 +1108,18 @@ static int jz4740_mmc_remove(struct platform_device *pdev)
        return 0;
 }
 
-#ifdef CONFIG_PM_SLEEP
-
-static int jz4740_mmc_suspend(struct device *dev)
+static int __maybe_unused jz4740_mmc_suspend(struct device *dev)
 {
        return pinctrl_pm_select_sleep_state(dev);
 }
 
-static int jz4740_mmc_resume(struct device *dev)
+static int __maybe_unused jz4740_mmc_resume(struct device *dev)
 {
        return pinctrl_select_default_state(dev);
 }
 
 static SIMPLE_DEV_PM_OPS(jz4740_mmc_pm_ops, jz4740_mmc_suspend,
        jz4740_mmc_resume);
-#define JZ4740_MMC_PM_OPS (&jz4740_mmc_pm_ops)
-#else
-#define JZ4740_MMC_PM_OPS NULL
-#endif
 
 static struct platform_driver jz4740_mmc_driver = {
        .probe = jz4740_mmc_probe,
@@ -1133,7 +1127,7 @@ static struct platform_driver jz4740_mmc_driver = {
        .driver = {
                .name = "jz4740-mmc",
                .of_match_table = of_match_ptr(jz4740_mmc_of_match),
-               .pm = JZ4740_MMC_PM_OPS,
+               .pm = pm_ptr(&jz4740_mmc_pm_ops),
        },
 };
 
index dfbd3d10410ca47ab76f890ed84c2840b746cca4..0c8c74a3c868e3f2aec37bf5445972591ef53777 100644 (file)
@@ -118,7 +118,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_voltage);
  */
 unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
 {
-       if (IS_ERR_OR_NULL(opp) || !opp->available) {
+       if (IS_ERR_OR_NULL(opp)) {
                pr_err("%s: Invalid parameters\n", __func__);
                return 0;
        }
@@ -2271,6 +2271,7 @@ adjust_put_table:
        dev_pm_opp_put_opp_table(opp_table);
        return r;
 }
+EXPORT_SYMBOL_GPL(dev_pm_opp_adjust_voltage);
 
 /**
  * dev_pm_opp_enable() - Enable a specific OPP
index 314f306140a1cc13a46b1ac302351111cddc108f..0430290670ab39b55f69d2e16058a50d8f93e08d 100644 (file)
@@ -1209,20 +1209,19 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_of_node);
 
 /*
  * Callback function provided to the Energy Model framework upon registration.
- * This computes the power estimated by @CPU at @kHz if it is the frequency
+ * This computes the power estimated by @dev at @kHz if it is the frequency
  * of an existing OPP, or at the frequency of the first OPP above @kHz otherwise
  * (see dev_pm_opp_find_freq_ceil()). This function updates @kHz to the ceiled
  * frequency and @mW to the associated power. The power is estimated as
- * P = C * V^2 * f with C being the CPU's capacitance and V and f respectively
- * the voltage and frequency of the OPP.
+ * P = C * V^2 * f with C being the device's capacitance and V and f
+ * respectively the voltage and frequency of the OPP.
  *
- * Returns -ENODEV if the CPU device cannot be found, -EINVAL if the power
- * calculation failed because of missing parameters, 0 otherwise.
+ * Returns -EINVAL if the power calculation failed because of missing
+ * parameters, 0 otherwise.
  */
-static int __maybe_unused _get_cpu_power(unsigned long *mW, unsigned long *kHz,
-                                        int cpu)
+static int __maybe_unused _get_power(unsigned long *mW, unsigned long *kHz,
+                                    struct device *dev)
 {
-       struct device *cpu_dev;
        struct dev_pm_opp *opp;
        struct device_node *np;
        unsigned long mV, Hz;
@@ -1230,11 +1229,7 @@ static int __maybe_unused _get_cpu_power(unsigned long *mW, unsigned long *kHz,
        u64 tmp;
        int ret;
 
-       cpu_dev = get_cpu_device(cpu);
-       if (!cpu_dev)
-               return -ENODEV;
-
-       np = of_node_get(cpu_dev->of_node);
+       np = of_node_get(dev->of_node);
        if (!np)
                return -EINVAL;
 
@@ -1244,7 +1239,7 @@ static int __maybe_unused _get_cpu_power(unsigned long *mW, unsigned long *kHz,
                return -EINVAL;
 
        Hz = *kHz * 1000;
-       opp = dev_pm_opp_find_freq_ceil(cpu_dev, &Hz);
+       opp = dev_pm_opp_find_freq_ceil(dev, &Hz);
        if (IS_ERR(opp))
                return -EINVAL;
 
@@ -1264,30 +1259,38 @@ static int __maybe_unused _get_cpu_power(unsigned long *mW, unsigned long *kHz,
 
 /**
  * dev_pm_opp_of_register_em() - Attempt to register an Energy Model
- * @cpus       : CPUs for which an Energy Model has to be registered
+ * @dev                : Device for which an Energy Model has to be registered
+ * @cpus       : CPUs for which an Energy Model has to be registered. For
+ *             other type of devices it should be set to NULL.
  *
  * This checks whether the "dynamic-power-coefficient" devicetree property has
  * been specified, and tries to register an Energy Model with it if it has.
+ * Having this property means the voltages are known for OPPs and the EM
+ * might be calculated.
  */
-void dev_pm_opp_of_register_em(struct cpumask *cpus)
+int dev_pm_opp_of_register_em(struct device *dev, struct cpumask *cpus)
 {
-       struct em_data_callback em_cb = EM_DATA_CB(_get_cpu_power);
-       int ret, nr_opp, cpu = cpumask_first(cpus);
-       struct device *cpu_dev;
+       struct em_data_callback em_cb = EM_DATA_CB(_get_power);
        struct device_node *np;
+       int ret, nr_opp;
        u32 cap;
 
-       cpu_dev = get_cpu_device(cpu);
-       if (!cpu_dev)
-               return;
+       if (IS_ERR_OR_NULL(dev)) {
+               ret = -EINVAL;
+               goto failed;
+       }
 
-       nr_opp = dev_pm_opp_get_opp_count(cpu_dev);
-       if (nr_opp <= 0)
-               return;
+       nr_opp = dev_pm_opp_get_opp_count(dev);
+       if (nr_opp <= 0) {
+               ret = -EINVAL;
+               goto failed;
+       }
 
-       np = of_node_get(cpu_dev->of_node);
-       if (!np)
-               return;
+       np = of_node_get(dev->of_node);
+       if (!np) {
+               ret = -EINVAL;
+               goto failed;
+       }
 
        /*
         * Register an EM only if the 'dynamic-power-coefficient' property is
@@ -1298,9 +1301,20 @@ void dev_pm_opp_of_register_em(struct cpumask *cpus)
         */
        ret = of_property_read_u32(np, "dynamic-power-coefficient", &cap);
        of_node_put(np);
-       if (ret || !cap)
-               return;
+       if (ret || !cap) {
+               dev_dbg(dev, "Couldn't find proper 'dynamic-power-coefficient' in DT\n");
+               ret = -EINVAL;
+               goto failed;
+       }
+
+       ret = em_dev_register_perf_domain(dev, nr_opp, &em_cb, cpus);
+       if (ret)
+               goto failed;
 
-       em_register_perf_domain(cpus, nr_opp, &em_cb);
+       return 0;
+
+failed:
+       dev_dbg(dev, "Couldn't register Energy Model %d\n", ret);
+       return ret;
 }
 EXPORT_SYMBOL_GPL(dev_pm_opp_of_register_em);
index e3357e91decb252957d7ae535a5745fb89c91fc7..bd4771f388ab3656c3c251d972bd40054b5cb23b 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 /*
- * Copyright (C) 2016-2017 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2016-2017 Texas Instruments Incorporated - https://www.ti.com/
  *     Nishanth Menon <nm@ti.com>
  *     Dave Gerlach <d-gerlach@ti.com>
  *
index c90f0990968bebf69d7b00e6c5484302982f0e62..597733ed86e9034e2e6ec6d3b58b40b14c5c8117 100644 (file)
@@ -19,8 +19,8 @@
  * The idle + run duration is specified via separate helpers and that allows
  * idle injection to be started.
  *
- * The idle injection kthreads will call play_idle() with the idle duration
- * specified as per the above.
+ * The idle injection kthreads will call play_idle_precise() with the idle
+ * duration and max allowed latency specified as per the above.
  *
  * After all of them have been woken up, a timer is set to start the next idle
  * injection cycle.
@@ -100,7 +100,7 @@ static void idle_inject_wakeup(struct idle_inject_device *ii_dev)
  *
  * This function is called when the idle injection timer expires.  It wakes up
  * idle injection tasks associated with the timer and they, in turn, invoke
- * play_idle() to inject a specified amount of CPU idle time.
+ * play_idle_precise() to inject a specified amount of CPU idle time.
  *
  * Return: HRTIMER_RESTART.
  */
@@ -124,8 +124,8 @@ static enum hrtimer_restart idle_inject_timer_fn(struct hrtimer *timer)
  * idle_inject_fn - idle injection work function
  * @cpu: the CPU owning the task
  *
- * This function calls play_idle() to inject a specified amount of CPU idle
- * time.
+ * This function calls play_idle_precise() to inject a specified amount of CPU
+ * idle time.
  */
 static void idle_inject_fn(unsigned int cpu)
 {
index 61a63a16b5e7d068f375b7af6a3c3f46f0e61c2e..6f55aaef8afc993dec7c82ac7a5473eb94ef5c74 100644 (file)
@@ -39,6 +39,8 @@
 #define POWER_HIGH_LOCK         BIT_ULL(63)
 #define POWER_LOW_LOCK          BIT(31)
 
+#define POWER_LIMIT4_MASK              0x1FFF
+
 #define TIME_WINDOW1_MASK       (0x7FULL<<17)
 #define TIME_WINDOW2_MASK       (0x7FULL<<49)
 
@@ -82,6 +84,7 @@ enum unit_type {
 
 static const char pl1_name[] = "long_term";
 static const char pl2_name[] = "short_term";
+static const char pl4_name[] = "peak_power";
 
 #define power_zone_to_rapl_domain(_zone) \
        container_of(_zone, struct rapl_domain, power_zone)
@@ -93,6 +96,7 @@ struct rapl_defaults {
        u64 (*compute_time_window)(struct rapl_package *rp, u64 val,
                                    bool to_raw);
        unsigned int dram_domain_energy_unit;
+       unsigned int psys_domain_energy_unit;
 };
 static struct rapl_defaults *rapl_defaults;
 
@@ -337,6 +341,9 @@ static int set_power_limit(struct powercap_zone *power_zone, int cid,
        case PL2_ENABLE:
                rapl_write_data_raw(rd, POWER_LIMIT2, power_limit);
                break;
+       case PL4_ENABLE:
+               rapl_write_data_raw(rd, POWER_LIMIT4, power_limit);
+               break;
        default:
                ret = -EINVAL;
        }
@@ -371,6 +378,9 @@ static int get_current_power_limit(struct powercap_zone *power_zone, int cid,
        case PL2_ENABLE:
                prim = POWER_LIMIT2;
                break;
+       case PL4_ENABLE:
+               prim = POWER_LIMIT4;
+               break;
        default:
                put_online_cpus();
                return -EINVAL;
@@ -440,6 +450,13 @@ static int get_time_window(struct powercap_zone *power_zone, int cid,
        case PL2_ENABLE:
                ret = rapl_read_data_raw(rd, TIME_WINDOW2, true, &val);
                break;
+       case PL4_ENABLE:
+               /*
+                * Time window parameter is not applicable for PL4 entry
+                * so assigining '0' as default value.
+                */
+               val = 0;
+               break;
        default:
                put_online_cpus();
                return -EINVAL;
@@ -483,6 +500,9 @@ static int get_max_power(struct powercap_zone *power_zone, int id, u64 *data)
        case PL2_ENABLE:
                prim = MAX_POWER;
                break;
+       case PL4_ENABLE:
+               prim = MAX_POWER;
+               break;
        default:
                put_online_cpus();
                return -EINVAL;
@@ -492,6 +512,10 @@ static int get_max_power(struct powercap_zone *power_zone, int id, u64 *data)
        else
                *data = val;
 
+       /* As a generalization rule, PL4 would be around two times PL2. */
+       if (rd->rpl[id].prim_id == PL4_ENABLE)
+               *data = *data * 2;
+
        put_online_cpus();
 
        return ret;
@@ -524,21 +548,42 @@ static void rapl_init_domains(struct rapl_package *rp)
                rd->id = i;
                rd->rpl[0].prim_id = PL1_ENABLE;
                rd->rpl[0].name = pl1_name;
-               /* some domain may support two power limits */
-               if (rp->priv->limits[i] == 2) {
+
+               /*
+                * The PL2 power domain is applicable for limits two
+                * and limits three
+                */
+               if (rp->priv->limits[i] >= 2) {
                        rd->rpl[1].prim_id = PL2_ENABLE;
                        rd->rpl[1].name = pl2_name;
                }
 
+               /* Enable PL4 domain if the total power limits are three */
+               if (rp->priv->limits[i] == 3) {
+                       rd->rpl[2].prim_id = PL4_ENABLE;
+                       rd->rpl[2].name = pl4_name;
+               }
+
                for (j = 0; j < RAPL_DOMAIN_REG_MAX; j++)
                        rd->regs[j] = rp->priv->regs[i][j];
 
-               if (i == RAPL_DOMAIN_DRAM) {
+               switch (i) {
+               case RAPL_DOMAIN_DRAM:
                        rd->domain_energy_unit =
                            rapl_defaults->dram_domain_energy_unit;
                        if (rd->domain_energy_unit)
                                pr_info("DRAM domain energy unit %dpj\n",
                                        rd->domain_energy_unit);
+                       break;
+               case RAPL_DOMAIN_PLATFORM:
+                       rd->domain_energy_unit =
+                           rapl_defaults->psys_domain_energy_unit;
+                       if (rd->domain_energy_unit)
+                               pr_info("Platform domain energy unit %dpj\n",
+                                       rd->domain_energy_unit);
+                       break;
+               default:
+                       break;
                }
                rd++;
        }
@@ -587,6 +632,8 @@ static struct rapl_primitive_info rpi[] = {
                            RAPL_DOMAIN_REG_LIMIT, POWER_UNIT, 0),
        PRIMITIVE_INFO_INIT(POWER_LIMIT2, POWER_LIMIT2_MASK, 32,
                            RAPL_DOMAIN_REG_LIMIT, POWER_UNIT, 0),
+       PRIMITIVE_INFO_INIT(POWER_LIMIT4, POWER_LIMIT4_MASK, 0,
+                               RAPL_DOMAIN_REG_PL4, POWER_UNIT, 0),
        PRIMITIVE_INFO_INIT(FW_LOCK, POWER_LOW_LOCK, 31,
                            RAPL_DOMAIN_REG_LIMIT, ARBITRARY_UNIT, 0),
        PRIMITIVE_INFO_INIT(PL1_ENABLE, POWER_LIMIT1_ENABLE, 15,
@@ -597,6 +644,8 @@ static struct rapl_primitive_info rpi[] = {
                            RAPL_DOMAIN_REG_LIMIT, ARBITRARY_UNIT, 0),
        PRIMITIVE_INFO_INIT(PL2_CLAMP, POWER_LIMIT2_CLAMP, 48,
                            RAPL_DOMAIN_REG_LIMIT, ARBITRARY_UNIT, 0),
+       PRIMITIVE_INFO_INIT(PL4_ENABLE, POWER_LIMIT4_MASK, 0,
+                               RAPL_DOMAIN_REG_PL4, ARBITRARY_UNIT, 0),
        PRIMITIVE_INFO_INIT(TIME_WINDOW1, TIME_WINDOW1_MASK, 17,
                            RAPL_DOMAIN_REG_LIMIT, TIME_UNIT, 0),
        PRIMITIVE_INFO_INIT(TIME_WINDOW2, TIME_WINDOW2_MASK, 49,
@@ -919,6 +968,14 @@ static const struct rapl_defaults rapl_defaults_hsw_server = {
        .dram_domain_energy_unit = 15300,
 };
 
+static const struct rapl_defaults rapl_defaults_spr_server = {
+       .check_unit = rapl_check_unit_core,
+       .set_floor_freq = set_floor_freq_default,
+       .compute_time_window = rapl_compute_time_window_core,
+       .dram_domain_energy_unit = 15300,
+       .psys_domain_energy_unit = 1000000000,
+};
+
 static const struct rapl_defaults rapl_defaults_byt = {
        .floor_freq_reg_addr = IOSF_CPU_POWER_BUDGET_CTL_BYT,
        .check_unit = rapl_check_unit_atom,
@@ -978,6 +1035,7 @@ static const struct x86_cpu_id rapl_ids[] __initconst = {
        X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE_L,         &rapl_defaults_core),
        X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE,           &rapl_defaults_core),
        X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L,         &rapl_defaults_core),
+       X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X,    &rapl_defaults_spr_server),
 
        X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT,     &rapl_defaults_byt),
        X86_MATCH_INTEL_FAM6_MODEL(ATOM_AIRMONT,        &rapl_defaults_cht),
@@ -1252,6 +1310,7 @@ void rapl_remove_package(struct rapl_package *rp)
                if (find_nr_power_limit(rd) > 1) {
                        rapl_write_data_raw(rd, PL2_ENABLE, 0);
                        rapl_write_data_raw(rd, PL2_CLAMP, 0);
+                       rapl_write_data_raw(rd, PL4_ENABLE, 0);
                }
                if (rd->id == RAPL_DOMAIN_PACKAGE) {
                        rd_package = rd;
@@ -1360,6 +1419,13 @@ static void power_limit_state_save(void)
                                if (ret)
                                        rd->rpl[i].last_power_limit = 0;
                                break;
+                       case PL4_ENABLE:
+                               ret = rapl_read_data_raw(rd,
+                                                POWER_LIMIT4, true,
+                                                &rd->rpl[i].last_power_limit);
+                               if (ret)
+                                       rd->rpl[i].last_power_limit = 0;
+                               break;
                        }
                }
        }
@@ -1390,6 +1456,11 @@ static void power_limit_state_restore(void)
                                        rapl_write_data_raw(rd, POWER_LIMIT2,
                                            rd->rpl[i].last_power_limit);
                                break;
+                       case PL4_ENABLE:
+                               if (rd->rpl[i].last_power_limit)
+                                       rapl_write_data_raw(rd, POWER_LIMIT4,
+                                           rd->rpl[i].last_power_limit);
+                               break;
                        }
                }
        }
index d5487965bdfe9d743ab0ab8c26d1e5f29ff0997f..d2a2627507a96a3022f5c0d49e09a059a4c79f1c 100644 (file)
@@ -28,6 +28,7 @@
 
 /* Local defines */
 #define MSR_PLATFORM_POWER_LIMIT       0x0000065C
+#define MSR_VR_CURRENT_CONFIG          0x00000601
 
 /* private data for RAPL MSR Interface */
 static struct rapl_if_priv rapl_msr_priv = {
@@ -123,13 +124,27 @@ static int rapl_msr_write_raw(int cpu, struct reg_action *ra)
        return ra->err;
 }
 
+/* List of verified CPUs. */
+static const struct x86_cpu_id pl4_support_ids[] = {
+       { X86_VENDOR_INTEL, 6, INTEL_FAM6_TIGERLAKE_L, X86_FEATURE_ANY },
+       {}
+};
+
 static int rapl_msr_probe(struct platform_device *pdev)
 {
+       const struct x86_cpu_id *id = x86_match_cpu(pl4_support_ids);
        int ret;
 
        rapl_msr_priv.read_raw = rapl_msr_read_raw;
        rapl_msr_priv.write_raw = rapl_msr_write_raw;
 
+       if (id) {
+               rapl_msr_priv.limits[RAPL_DOMAIN_PACKAGE] = 3;
+               rapl_msr_priv.regs[RAPL_DOMAIN_PACKAGE][RAPL_DOMAIN_REG_PL4] =
+                       MSR_VR_CURRENT_CONFIG;
+               pr_info("PL4 support detected.\n");
+       }
+
        rapl_msr_priv.control_type = powercap_register_control_type(NULL, "intel-rapl", NULL);
        if (IS_ERR(rapl_msr_priv.control_type)) {
                pr_debug("failed to register powercap control_type.\n");
index 6c0e1b053126e0b7490f71dca943ca6f64609721..6cf23a54e85366bdaa5138c1c0167a07de8a7229 100644 (file)
@@ -333,18 +333,18 @@ static inline bool em_is_sane(struct cpufreq_cooling_device *cpufreq_cdev,
                return false;
 
        policy = cpufreq_cdev->policy;
-       if (!cpumask_equal(policy->related_cpus, to_cpumask(em->cpus))) {
+       if (!cpumask_equal(policy->related_cpus, em_span_cpus(em))) {
                pr_err("The span of pd %*pbl is misaligned with cpufreq policy %*pbl\n",
-                       cpumask_pr_args(to_cpumask(em->cpus)),
+                       cpumask_pr_args(em_span_cpus(em)),
                        cpumask_pr_args(policy->related_cpus));
                return false;
        }
 
        nr_levels = cpufreq_cdev->max_level + 1;
-       if (em->nr_cap_states != nr_levels) {
-               pr_err("The number of cap states in pd %*pbl (%u) doesn't match the number of cooling levels (%u)\n",
-                       cpumask_pr_args(to_cpumask(em->cpus)),
-                       em->nr_cap_states, nr_levels);
+       if (em_pd_nr_perf_states(em) != nr_levels) {
+               pr_err("The number of performance states in pd %*pbl (%u) doesn't match the number of cooling levels (%u)\n",
+                       cpumask_pr_args(em_span_cpus(em)),
+                       em_pd_nr_perf_states(em), nr_levels);
                return false;
        }
 
index 2e2dac29a9e91507c9f122134a3f8808341f5248..8963062da57ec1e895cf6f746c48101558e7c43b 100644 (file)
@@ -414,7 +414,7 @@ static int nfs4_delay_interruptible(long *timeout)
 {
        might_sleep();
 
-       freezable_schedule_timeout_interruptible(nfs4_update_delay(timeout));
+       freezable_schedule_timeout_interruptible_unsafe(nfs4_update_delay(timeout));
        if (!signal_pending(current))
                return 0;
        return __fatal_signal_pending(current) ? -EINTR :-ERESTARTSYS;
index 3494f6763597e7992af3e2afaa74cee471ce96c3..e62b022cb07e5786dafefc745c70ce8354569148 100644 (file)
@@ -577,6 +577,20 @@ unsigned int cpufreq_policy_transition_delay_us(struct cpufreq_policy *policy);
 int cpufreq_register_governor(struct cpufreq_governor *governor);
 void cpufreq_unregister_governor(struct cpufreq_governor *governor);
 
+#define cpufreq_governor_init(__governor)                      \
+static int __init __governor##_init(void)                      \
+{                                                              \
+       return cpufreq_register_governor(&__governor);  \
+}                                                              \
+core_initcall(__governor##_init)
+
+#define cpufreq_governor_exit(__governor)                      \
+static void __exit __governor##_exit(void)                     \
+{                                                              \
+       return cpufreq_unregister_governor(&__governor);        \
+}                                                              \
+module_exit(__governor##_exit)
+
 struct cpufreq_governor *cpufreq_default_governor(void);
 struct cpufreq_governor *cpufreq_fallback_governor(void);
 
index ec2ef63771f08944f06fe597af058a0ba118a7e4..b65909ae4e201131a39367fdb593296bef568aca 100644 (file)
@@ -65,10 +65,13 @@ struct cpuidle_state {
         * CPUs execute ->enter_s2idle with the local tick or entire timekeeping
         * suspended, so it must not re-enable interrupts at any point (even
         * temporarily) or attempt to change states of clock event devices.
+        *
+        * This callback may point to the same function as ->enter if all of
+        * the above requirements are met by it.
         */
-       void (*enter_s2idle) (struct cpuidle_device *dev,
-                             struct cpuidle_driver *drv,
-                             int index);
+       int (*enter_s2idle)(struct cpuidle_device *dev,
+                           struct cpuidle_driver *drv,
+                           int index);
 };
 
 /* Idle State Flags */
index 57e871a559a9aa27b4ffeb0048c78efd94ba5acc..12782fbb4c25ebe2f800f8cd74eb77784f30ddfd 100644 (file)
 #define        DEVFREQ_PRECHANGE               (0)
 #define DEVFREQ_POSTCHANGE             (1)
 
+/* DEVFREQ work timers */
+enum devfreq_timer {
+       DEVFREQ_TIMER_DEFERRABLE = 0,
+       DEVFREQ_TIMER_DELAYED,
+       DEVFREQ_TIMER_NUM,
+};
+
 struct devfreq;
 struct devfreq_governor;
 
@@ -70,6 +77,7 @@ struct devfreq_dev_status {
  * @initial_freq:      The operating frequency when devfreq_add_device() is
  *                     called.
  * @polling_ms:                The polling interval in ms. 0 disables polling.
+ * @timer:             Timer type is either deferrable or delayed timer.
  * @target:            The device should set its operating frequency at
  *                     freq or lowest-upper-than-freq value. If freq is
  *                     higher than any operable frequency, set maximum.
@@ -96,6 +104,7 @@ struct devfreq_dev_status {
 struct devfreq_dev_profile {
        unsigned long initial_freq;
        unsigned int polling_ms;
+       enum devfreq_timer timer;
 
        int (*target)(struct device *dev, unsigned long *freq, u32 flags);
        int (*get_dev_status)(struct device *dev,
index 5efed864b3871d3f34391fc605ea2bffec79c5a8..4e2e9d3a2eda15afe1cce692d3cd4959fabc6ae1 100644 (file)
@@ -13,6 +13,7 @@
 #define _DEVICE_H_
 
 #include <linux/dev_printk.h>
+#include <linux/energy_model.h>
 #include <linux/ioport.h>
 #include <linux/kobject.h>
 #include <linux/klist.h>
@@ -560,6 +561,10 @@ struct device {
        struct dev_pm_info      power;
        struct dev_pm_domain    *pm_domain;
 
+#ifdef CONFIG_ENERGY_MODEL
+       struct em_perf_domain   *em_pd;
+#endif
+
 #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
        struct irq_domain       *msi_domain;
 #endif
index ade6486a3382486752868509838da30da7134f68..b67a51c574b97501a5d5abe5e93640b8e8e3362a 100644 (file)
@@ -2,6 +2,7 @@
 #ifndef _LINUX_ENERGY_MODEL_H
 #define _LINUX_ENERGY_MODEL_H
 #include <linux/cpumask.h>
+#include <linux/device.h>
 #include <linux/jump_label.h>
 #include <linux/kobject.h>
 #include <linux/rcupdate.h>
 #include <linux/types.h>
 
 /**
- * em_cap_state - Capacity state of a performance domain
- * @frequency: The CPU frequency in KHz, for consistency with CPUFreq
- * @power:     The power consumed by 1 CPU at this level, in milli-watts
+ * em_perf_state - Performance state of a performance domain
+ * @frequency: The frequency in KHz, for consistency with CPUFreq
+ * @power:     The power consumed at this level, in milli-watts (by 1 CPU or
+               by a registered device). It can be a total power: static and
+               dynamic.
  * @cost:      The cost coefficient associated with this level, used during
  *             energy calculation. Equal to: power * max_frequency / frequency
  */
-struct em_cap_state {
+struct em_perf_state {
        unsigned long frequency;
        unsigned long power;
        unsigned long cost;
@@ -24,102 +27,119 @@ struct em_cap_state {
 
 /**
  * em_perf_domain - Performance domain
- * @table:             List of capacity states, in ascending order
- * @nr_cap_states:     Number of capacity states
- * @cpus:              Cpumask covering the CPUs of the domain
+ * @table:             List of performance states, in ascending order
+ * @nr_perf_states:    Number of performance states
+ * @cpus:              Cpumask covering the CPUs of the domain. It's here
+ *                     for performance reasons to avoid potential cache
+ *                     misses during energy calculations in the scheduler
+ *                     and simplifies allocating/freeing that memory region.
  *
- * A "performance domain" represents a group of CPUs whose performance is
- * scaled together. All CPUs of a performance domain must have the same
- * micro-architecture. Performance domains often have a 1-to-1 mapping with
- * CPUFreq policies.
+ * In case of CPU device, a "performance domain" represents a group of CPUs
+ * whose performance is scaled together. All CPUs of a performance domain
+ * must have the same micro-architecture. Performance domains often have
+ * a 1-to-1 mapping with CPUFreq policies. In case of other devices the @cpus
+ * field is unused.
  */
 struct em_perf_domain {
-       struct em_cap_state *table;
-       int nr_cap_states;
+       struct em_perf_state *table;
+       int nr_perf_states;
        unsigned long cpus[];
 };
 
+#define em_span_cpus(em) (to_cpumask((em)->cpus))
+
 #ifdef CONFIG_ENERGY_MODEL
-#define EM_CPU_MAX_POWER 0xFFFF
+#define EM_MAX_POWER 0xFFFF
 
 struct em_data_callback {
        /**
-        * active_power() - Provide power at the next capacity state of a CPU
-        * @power       : Active power at the capacity state in mW (modified)
-        * @freq        : Frequency at the capacity state in kHz (modified)
-        * @cpu         : CPU for which we do this operation
+        * active_power() - Provide power at the next performance state of
+        *              a device
+        * @power       : Active power at the performance state in mW
+        *              (modified)
+        * @freq        : Frequency at the performance state in kHz
+        *              (modified)
+        * @dev         : Device for which we do this operation (can be a CPU)
         *
-        * active_power() must find the lowest capacity state of 'cpu' above
+        * active_power() must find the lowest performance state of 'dev' above
         * 'freq' and update 'power' and 'freq' to the matching active power
         * and frequency.
         *
-        * The power is the one of a single CPU in the domain, expressed in
-        * milli-watts. It is expected to fit in the [0, EM_CPU_MAX_POWER]
-        * range.
+        * In case of CPUs, the power is the one of a single CPU in the domain,
+        * expressed in milli-watts. It is expected to fit in the
+        * [0, EM_MAX_POWER] range.
         *
         * Return 0 on success.
         */
-       int (*active_power)(unsigned long *power, unsigned long *freq, int cpu);
+       int (*active_power)(unsigned long *power, unsigned long *freq,
+                           struct device *dev);
 };
 #define EM_DATA_CB(_active_power_cb) { .active_power = &_active_power_cb }
 
 struct em_perf_domain *em_cpu_get(int cpu);
-int em_register_perf_domain(cpumask_t *span, unsigned int nr_states,
-                                               struct em_data_callback *cb);
+struct em_perf_domain *em_pd_get(struct device *dev);
+int em_dev_register_perf_domain(struct device *dev, unsigned int nr_states,
+                               struct em_data_callback *cb, cpumask_t *span);
+void em_dev_unregister_perf_domain(struct device *dev);
 
 /**
- * em_pd_energy() - Estimates the energy consumed by the CPUs of a perf. domain
+ * em_cpu_energy() - Estimates the energy consumed by the CPUs of a
+               performance domain
  * @pd         : performance domain for which energy has to be estimated
  * @max_util   : highest utilization among CPUs of the domain
  * @sum_util   : sum of the utilization of all CPUs in the domain
  *
+ * This function must be used only for CPU devices. There is no validation,
+ * i.e. if the EM is a CPU type and has cpumask allocated. It is called from
+ * the scheduler code quite frequently and that is why there is not checks.
+ *
  * Return: the sum of the energy consumed by the CPUs of the domain assuming
  * a capacity state satisfying the max utilization of the domain.
  */
-static inline unsigned long em_pd_energy(struct em_perf_domain *pd,
+static inline unsigned long em_cpu_energy(struct em_perf_domain *pd,
                                unsigned long max_util, unsigned long sum_util)
 {
        unsigned long freq, scale_cpu;
-       struct em_cap_state *cs;
+       struct em_perf_state *ps;
        int i, cpu;
 
        /*
-        * In order to predict the capacity state, map the utilization of the
-        * most utilized CPU of the performance domain to a requested frequency,
-        * like schedutil.
+        * In order to predict the performance state, map the utilization of
+        * the most utilized CPU of the performance domain to a requested
+        * frequency, like schedutil.
         */
        cpu = cpumask_first(to_cpumask(pd->cpus));
        scale_cpu = arch_scale_cpu_capacity(cpu);
-       cs = &pd->table[pd->nr_cap_states - 1];
-       freq = map_util_freq(max_util, cs->frequency, scale_cpu);
+       ps = &pd->table[pd->nr_perf_states - 1];
+       freq = map_util_freq(max_util, ps->frequency, scale_cpu);
 
        /*
-        * Find the lowest capacity state of the Energy Model above the
+        * Find the lowest performance state of the Energy Model above the
         * requested frequency.
         */
-       for (i = 0; i < pd->nr_cap_states; i++) {
-               cs = &pd->table[i];
-               if (cs->frequency >= freq)
+       for (i = 0; i < pd->nr_perf_states; i++) {
+               ps = &pd->table[i];
+               if (ps->frequency >= freq)
                        break;
        }
 
        /*
-        * The capacity of a CPU in the domain at that capacity state (cs)
+        * The capacity of a CPU in the domain at the performance state (ps)
         * can be computed as:
         *
-        *             cs->freq * scale_cpu
-        *   cs->cap = --------------------                          (1)
+        *             ps->freq * scale_cpu
+        *   ps->cap = --------------------                          (1)
         *                 cpu_max_freq
         *
         * So, ignoring the costs of idle states (which are not available in
-        * the EM), the energy consumed by this CPU at that capacity state is
-        * estimated as:
+        * the EM), the energy consumed by this CPU at that performance state
+        * is estimated as:
         *
-        *             cs->power * cpu_util
+        *             ps->power * cpu_util
         *   cpu_nrg = --------------------                          (2)
-        *                   cs->cap
+        *                   ps->cap
         *
-        * since 'cpu_util / cs->cap' represents its percentage of busy time.
+        * since 'cpu_util / ps->cap' represents its percentage of busy time.
         *
         *   NOTE: Although the result of this computation actually is in
         *         units of power, it can be manipulated as an energy value
@@ -129,55 +149,64 @@ static inline unsigned long em_pd_energy(struct em_perf_domain *pd,
         * By injecting (1) in (2), 'cpu_nrg' can be re-expressed as a product
         * of two terms:
         *
-        *             cs->power * cpu_max_freq   cpu_util
+        *             ps->power * cpu_max_freq   cpu_util
         *   cpu_nrg = ------------------------ * ---------          (3)
-        *                    cs->freq            scale_cpu
+        *                    ps->freq            scale_cpu
         *
-        * The first term is static, and is stored in the em_cap_state struct
-        * as 'cs->cost'.
+        * The first term is static, and is stored in the em_perf_state struct
+        * as 'ps->cost'.
         *
         * Since all CPUs of the domain have the same micro-architecture, they
-        * share the same 'cs->cost', and the same CPU capacity. Hence, the
+        * share the same 'ps->cost', and the same CPU capacity. Hence, the
         * total energy of the domain (which is the simple sum of the energy of
         * all of its CPUs) can be factorized as:
         *
-        *            cs->cost * \Sum cpu_util
+        *            ps->cost * \Sum cpu_util
         *   pd_nrg = ------------------------                       (4)
         *                  scale_cpu
         */
-       return cs->cost * sum_util / scale_cpu;
+       return ps->cost * sum_util / scale_cpu;
 }
 
 /**
- * em_pd_nr_cap_states() - Get the number of capacity states of a perf. domain
+ * em_pd_nr_perf_states() - Get the number of performance states of a perf.
+ *                             domain
  * @pd         : performance domain for which this must be done
  *
- * Return: the number of capacity states in the performance domain table
+ * Return: the number of performance states in the performance domain table
  */
-static inline int em_pd_nr_cap_states(struct em_perf_domain *pd)
+static inline int em_pd_nr_perf_states(struct em_perf_domain *pd)
 {
-       return pd->nr_cap_states;
+       return pd->nr_perf_states;
 }
 
 #else
 struct em_data_callback {};
 #define EM_DATA_CB(_active_power_cb) { }
 
-static inline int em_register_perf_domain(cpumask_t *span,
-                       unsigned int nr_states, struct em_data_callback *cb)
+static inline
+int em_dev_register_perf_domain(struct device *dev, unsigned int nr_states,
+                               struct em_data_callback *cb, cpumask_t *span)
 {
        return -EINVAL;
 }
+static inline void em_dev_unregister_perf_domain(struct device *dev)
+{
+}
 static inline struct em_perf_domain *em_cpu_get(int cpu)
 {
        return NULL;
 }
-static inline unsigned long em_pd_energy(struct em_perf_domain *pd,
+static inline struct em_perf_domain *em_pd_get(struct device *dev)
+{
+       return NULL;
+}
+static inline unsigned long em_cpu_energy(struct em_perf_domain *pd,
                        unsigned long max_util, unsigned long sum_util)
 {
        return 0;
 }
-static inline int em_pd_nr_cap_states(struct em_perf_domain *pd)
+static inline int em_pd_nr_perf_states(struct em_perf_domain *pd)
 {
        return 0;
 }
index 21f5aa0b217f3c0ce3c04a7d459f7a2e44e93076..27828145ca0985d77d37123602ce0ff01a7a7145 100644 (file)
@@ -207,6 +207,17 @@ static inline long freezable_schedule_timeout_interruptible(long timeout)
        return __retval;
 }
 
+/* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
+static inline long freezable_schedule_timeout_interruptible_unsafe(long timeout)
+{
+       long __retval;
+
+       freezer_do_not_count();
+       __retval = schedule_timeout_interruptible(timeout);
+       freezer_count_unsafe();
+       return __retval;
+}
+
 /* Like schedule_timeout_killable(), but should not block the freezer. */
 static inline long freezable_schedule_timeout_killable(long timeout)
 {
@@ -285,6 +296,9 @@ static inline void set_freezable(void) {}
 #define freezable_schedule_timeout_interruptible(timeout)              \
        schedule_timeout_interruptible(timeout)
 
+#define freezable_schedule_timeout_interruptible_unsafe(timeout)       \
+       schedule_timeout_interruptible(timeout)
+
 #define freezable_schedule_timeout_killable(timeout)                   \
        schedule_timeout_killable(timeout)
 
index efb3ce892c20dc1f87d35abb43a5c32bbe6b0b17..3582176a1eca1ce7d17c4a1eaa33a8b65fb81b76 100644 (file)
@@ -29,6 +29,7 @@ enum rapl_domain_reg_id {
        RAPL_DOMAIN_REG_PERF,
        RAPL_DOMAIN_REG_POLICY,
        RAPL_DOMAIN_REG_INFO,
+       RAPL_DOMAIN_REG_PL4,
        RAPL_DOMAIN_REG_MAX,
 };
 
@@ -38,12 +39,14 @@ enum rapl_primitives {
        ENERGY_COUNTER,
        POWER_LIMIT1,
        POWER_LIMIT2,
+       POWER_LIMIT4,
        FW_LOCK,
 
        PL1_ENABLE,             /* power limit 1, aka long term */
        PL1_CLAMP,              /* allow frequency to go below OS request */
        PL2_ENABLE,             /* power limit 2, aka short term, instantaneous */
        PL2_CLAMP,
+       PL4_ENABLE,             /* power limit 4, aka max peak power */
 
        TIME_WINDOW1,           /* long term */
        TIME_WINDOW2,           /* short term */
@@ -65,7 +68,7 @@ struct rapl_domain_data {
        unsigned long timestamp;
 };
 
-#define NR_POWER_LIMITS (2)
+#define NR_POWER_LIMITS (3)
 struct rapl_power_limit {
        struct powercap_zone_constraint *constraint;
        int prim_id;            /* primitive ID used to enable */
index 121c104a4090e9b9c505a5199c6e0018e7e2269c..a30a4b54df528b9d7a27a26ca74aea11124f67da 100644 (file)
@@ -351,7 +351,7 @@ struct dev_pm_ops {
  * to RAM and hibernation.
  */
 #define SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn) \
-const struct dev_pm_ops name = { \
+const struct dev_pm_ops __maybe_unused name = { \
        SET_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
 }
 
@@ -369,11 +369,17 @@ const struct dev_pm_ops name = { \
  * .runtime_resume(), respectively (and analogously for hibernation).
  */
 #define UNIVERSAL_DEV_PM_OPS(name, suspend_fn, resume_fn, idle_fn) \
-const struct dev_pm_ops name = { \
+const struct dev_pm_ops __maybe_unused name = { \
        SET_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
        SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \
 }
 
+#ifdef CONFIG_PM
+#define pm_ptr(_ptr) (_ptr)
+#else
+#define pm_ptr(_ptr) NULL
+#endif
+
 /*
  * PM_EVENT_ messages
  *
index 9ec78ee53652e009c33ead0e36244612616f4f40..ee11502a575b0ecaf65439a814da2bd71e0ef053 100644 (file)
@@ -95,8 +95,8 @@ struct generic_pm_domain {
        struct device dev;
        struct dev_pm_domain domain;    /* PM domain operations */
        struct list_head gpd_list_node; /* Node in the global PM domains list */
-       struct list_head master_links;  /* Links with PM domain as a master */
-       struct list_head slave_links;   /* Links with PM domain as a slave */
+       struct list_head parent_links;  /* Links with PM domain as a parent */
+       struct list_head child_links;   /* Links with PM domain as a child */
        struct list_head dev_list;      /* List of devices */
        struct dev_power_governor *gov;
        struct work_struct power_off_work;
@@ -151,10 +151,10 @@ static inline struct generic_pm_domain *pd_to_genpd(struct dev_pm_domain *pd)
 }
 
 struct gpd_link {
-       struct generic_pm_domain *master;
-       struct list_head master_node;
-       struct generic_pm_domain *slave;
-       struct list_head slave_node;
+       struct generic_pm_domain *parent;
+       struct list_head parent_node;
+       struct generic_pm_domain *child;
+       struct list_head child_node;
 
        /* Sub-domain's per-master domain performance state */
        unsigned int performance_state;
index d5c4a329321ddcbf16eb4e3953c6d74bb0a6b6a0..ee34c553f6bfc8807d2112359828be8455bc7541 100644 (file)
@@ -11,6 +11,7 @@
 #ifndef __LINUX_OPP_H__
 #define __LINUX_OPP_H__
 
+#include <linux/energy_model.h>
 #include <linux/err.h>
 #include <linux/notifier.h>
 
@@ -373,7 +374,11 @@ struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device *dev);
 struct device_node *dev_pm_opp_get_of_node(struct dev_pm_opp *opp);
 int of_get_required_opp_performance_state(struct device_node *np, int index);
 int dev_pm_opp_of_find_icc_paths(struct device *dev, struct opp_table *opp_table);
-void dev_pm_opp_of_register_em(struct cpumask *cpus);
+int dev_pm_opp_of_register_em(struct device *dev, struct cpumask *cpus);
+static inline void dev_pm_opp_of_unregister_em(struct device *dev)
+{
+       em_dev_unregister_perf_domain(dev);
+}
 #else
 static inline int dev_pm_opp_of_add_table(struct device *dev)
 {
@@ -413,7 +418,13 @@ static inline struct device_node *dev_pm_opp_get_of_node(struct dev_pm_opp *opp)
        return NULL;
 }
 
-static inline void dev_pm_opp_of_register_em(struct cpumask *cpus)
+static inline int dev_pm_opp_of_register_em(struct device *dev,
+                                           struct cpumask *cpus)
+{
+       return -ENOTSUPP;
+}
+
+static inline void dev_pm_opp_of_unregister_em(struct device *dev)
 {
 }
 
index 0a9326f5f42181e8fc0c51ecd659f19589306565..c1ff7fa030abea0fde2eb47d4d5c1d819f6deb47 100644 (file)
@@ -1,9 +1,10 @@
 // SPDX-License-Identifier: GPL-2.0
 /*
- * Energy Model of CPUs
+ * Energy Model of devices
  *
- * Copyright (c) 2018, Arm ltd.
+ * Copyright (c) 2018-2020, Arm ltd.
  * Written by: Quentin Perret, Arm ltd.
+ * Improvements provided by: Lukasz Luba, Arm ltd.
  */
 
 #define pr_fmt(fmt) "energy_model: " fmt
 #include <linux/sched/topology.h>
 #include <linux/slab.h>
 
-/* Mapping of each CPU to the performance domain to which it belongs. */
-static DEFINE_PER_CPU(struct em_perf_domain *, em_data);
-
 /*
  * Mutex serializing the registrations of performance domains and letting
  * callbacks defined by drivers sleep.
  */
 static DEFINE_MUTEX(em_pd_mutex);
 
+static bool _is_cpu_device(struct device *dev)
+{
+       return (dev->bus == &cpu_subsys);
+}
+
 #ifdef CONFIG_DEBUG_FS
 static struct dentry *rootdir;
 
-static void em_debug_create_cs(struct em_cap_state *cs, struct dentry *pd)
+static void em_debug_create_ps(struct em_perf_state *ps, struct dentry *pd)
 {
        struct dentry *d;
        char name[24];
 
-       snprintf(name, sizeof(name), "cs:%lu", cs->frequency);
+       snprintf(name, sizeof(name), "ps:%lu", ps->frequency);
 
-       /* Create per-cs directory */
+       /* Create per-ps directory */
        d = debugfs_create_dir(name, pd);
-       debugfs_create_ulong("frequency", 0444, d, &cs->frequency);
-       debugfs_create_ulong("power", 0444, d, &cs->power);
-       debugfs_create_ulong("cost", 0444, d, &cs->cost);
+       debugfs_create_ulong("frequency", 0444, d, &ps->frequency);
+       debugfs_create_ulong("power", 0444, d, &ps->power);
+       debugfs_create_ulong("cost", 0444, d, &ps->cost);
 }
 
 static int em_debug_cpus_show(struct seq_file *s, void *unused)
@@ -49,22 +52,30 @@ static int em_debug_cpus_show(struct seq_file *s, void *unused)
 }
 DEFINE_SHOW_ATTRIBUTE(em_debug_cpus);
 
-static void em_debug_create_pd(struct em_perf_domain *pd, int cpu)
+static void em_debug_create_pd(struct device *dev)
 {
        struct dentry *d;
-       char name[8];
        int i;
 
-       snprintf(name, sizeof(name), "pd%d", cpu);
-
        /* Create the directory of the performance domain */
-       d = debugfs_create_dir(name, rootdir);
+       d = debugfs_create_dir(dev_name(dev), rootdir);
 
-       debugfs_create_file("cpus", 0444, d, pd->cpus, &em_debug_cpus_fops);
+       if (_is_cpu_device(dev))
+               debugfs_create_file("cpus", 0444, d, dev->em_pd->cpus,
+                                   &em_debug_cpus_fops);
+
+       /* Create a sub-directory for each performance state */
+       for (i = 0; i < dev->em_pd->nr_perf_states; i++)
+               em_debug_create_ps(&dev->em_pd->table[i], d);
 
-       /* Create a sub-directory for each capacity state */
-       for (i = 0; i < pd->nr_cap_states; i++)
-               em_debug_create_cs(&pd->table[i], d);
+}
+
+static void em_debug_remove_pd(struct device *dev)
+{
+       struct dentry *debug_dir;
+
+       debug_dir = debugfs_lookup(dev_name(dev), rootdir);
+       debugfs_remove_recursive(debug_dir);
 }
 
 static int __init em_debug_init(void)
@@ -76,58 +87,55 @@ static int __init em_debug_init(void)
 }
 core_initcall(em_debug_init);
 #else /* CONFIG_DEBUG_FS */
-static void em_debug_create_pd(struct em_perf_domain *pd, int cpu) {}
+static void em_debug_create_pd(struct device *dev) {}
+static void em_debug_remove_pd(struct device *dev) {}
 #endif
-static struct em_perf_domain *em_create_pd(cpumask_t *span, int nr_states,
-                                               struct em_data_callback *cb)
+
+static int em_create_perf_table(struct device *dev, struct em_perf_domain *pd,
+                               int nr_states, struct em_data_callback *cb)
 {
        unsigned long opp_eff, prev_opp_eff = ULONG_MAX;
        unsigned long power, freq, prev_freq = 0;
-       int i, ret, cpu = cpumask_first(span);
-       struct em_cap_state *table;
-       struct em_perf_domain *pd;
+       struct em_perf_state *table;
+       int i, ret;
        u64 fmax;
 
-       if (!cb->active_power)
-               return NULL;
-
-       pd = kzalloc(sizeof(*pd) + cpumask_size(), GFP_KERNEL);
-       if (!pd)
-               return NULL;
-
        table = kcalloc(nr_states, sizeof(*table), GFP_KERNEL);
        if (!table)
-               goto free_pd;
+               return -ENOMEM;
 
-       /* Build the list of capacity states for this performance domain */
+       /* Build the list of performance states for this performance domain */
        for (i = 0, freq = 0; i < nr_states; i++, freq++) {
                /*
                 * active_power() is a driver callback which ceils 'freq' to
-                * lowest capacity state of 'cpu' above 'freq' and updates
+                * lowest performance state of 'dev' above 'freq' and updates
                 * 'power' and 'freq' accordingly.
                 */
-               ret = cb->active_power(&power, &freq, cpu);
+               ret = cb->active_power(&power, &freq, dev);
                if (ret) {
-                       pr_err("pd%d: invalid cap. state: %d\n", cpu, ret);
-                       goto free_cs_table;
+                       dev_err(dev, "EM: invalid perf. state: %d\n",
+                               ret);
+                       goto free_ps_table;
                }
 
                /*
                 * We expect the driver callback to increase the frequency for
-                * higher capacity states.
+                * higher performance states.
                 */
                if (freq <= prev_freq) {
-                       pr_err("pd%d: non-increasing freq: %lu\n", cpu, freq);
-                       goto free_cs_table;
+                       dev_err(dev, "EM: non-increasing freq: %lu\n",
+                               freq);
+                       goto free_ps_table;
                }
 
                /*
                 * The power returned by active_state() is expected to be
                 * positive, in milli-watts and to fit into 16 bits.
                 */
-               if (!power || power > EM_CPU_MAX_POWER) {
-                       pr_err("pd%d: invalid power: %lu\n", cpu, power);
-                       goto free_cs_table;
+               if (!power || power > EM_MAX_POWER) {
+                       dev_err(dev, "EM: invalid power: %lu\n",
+                               power);
+                       goto free_ps_table;
                }
 
                table[i].power = power;
@@ -141,12 +149,12 @@ static struct em_perf_domain *em_create_pd(cpumask_t *span, int nr_states,
                 */
                opp_eff = freq / power;
                if (opp_eff >= prev_opp_eff)
-                       pr_warn("pd%d: hertz/watts ratio non-monotonically decreasing: em_cap_state %d >= em_cap_state%d\n",
-                                       cpu, i, i - 1);
+                       dev_dbg(dev, "EM: hertz/watts ratio non-monotonically decreasing: em_perf_state %d >= em_perf_state%d\n",
+                                       i, i - 1);
                prev_opp_eff = opp_eff;
        }
 
-       /* Compute the cost of each capacity_state. */
+       /* Compute the cost of each performance state. */
        fmax = (u64) table[nr_states - 1].frequency;
        for (i = 0; i < nr_states; i++) {
                table[i].cost = div64_u64(fmax * table[i].power,
@@ -154,39 +162,94 @@ static struct em_perf_domain *em_create_pd(cpumask_t *span, int nr_states,
        }
 
        pd->table = table;
-       pd->nr_cap_states = nr_states;
-       cpumask_copy(to_cpumask(pd->cpus), span);
-
-       em_debug_create_pd(pd, cpu);
+       pd->nr_perf_states = nr_states;
 
-       return pd;
+       return 0;
 
-free_cs_table:
+free_ps_table:
        kfree(table);
-free_pd:
-       kfree(pd);
+       return -EINVAL;
+}
+
+static int em_create_pd(struct device *dev, int nr_states,
+                       struct em_data_callback *cb, cpumask_t *cpus)
+{
+       struct em_perf_domain *pd;
+       struct device *cpu_dev;
+       int cpu, ret;
+
+       if (_is_cpu_device(dev)) {
+               pd = kzalloc(sizeof(*pd) + cpumask_size(), GFP_KERNEL);
+               if (!pd)
+                       return -ENOMEM;
+
+               cpumask_copy(em_span_cpus(pd), cpus);
+       } else {
+               pd = kzalloc(sizeof(*pd), GFP_KERNEL);
+               if (!pd)
+                       return -ENOMEM;
+       }
+
+       ret = em_create_perf_table(dev, pd, nr_states, cb);
+       if (ret) {
+               kfree(pd);
+               return ret;
+       }
+
+       if (_is_cpu_device(dev))
+               for_each_cpu(cpu, cpus) {
+                       cpu_dev = get_cpu_device(cpu);
+                       cpu_dev->em_pd = pd;
+               }
+
+       dev->em_pd = pd;
+
+       return 0;
+}
+
+/**
+ * em_pd_get() - Return the performance domain for a device
+ * @dev : Device to find the performance domain for
+ *
+ * Returns the performance domain to which @dev belongs, or NULL if it doesn't
+ * exist.
+ */
+struct em_perf_domain *em_pd_get(struct device *dev)
+{
+       if (IS_ERR_OR_NULL(dev))
+               return NULL;
 
-       return NULL;
+       return dev->em_pd;
 }
+EXPORT_SYMBOL_GPL(em_pd_get);
 
 /**
  * em_cpu_get() - Return the performance domain for a CPU
  * @cpu : CPU to find the performance domain for
  *
- * Return: the performance domain to which 'cpu' belongs, or NULL if it doesn't
+ * Returns the performance domain to which @cpu belongs, or NULL if it doesn't
  * exist.
  */
 struct em_perf_domain *em_cpu_get(int cpu)
 {
-       return READ_ONCE(per_cpu(em_data, cpu));
+       struct device *cpu_dev;
+
+       cpu_dev = get_cpu_device(cpu);
+       if (!cpu_dev)
+               return NULL;
+
+       return em_pd_get(cpu_dev);
 }
 EXPORT_SYMBOL_GPL(em_cpu_get);
 
 /**
- * em_register_perf_domain() - Register the Energy Model of a performance domain
- * @span       : Mask of CPUs in the performance domain
- * @nr_states  : Number of capacity states to register
+ * em_dev_register_perf_domain() - Register the Energy Model (EM) for a device
+ * @dev                : Device for which the EM is to register
+ * @nr_states  : Number of performance states to register
  * @cb         : Callback functions providing the data of the Energy Model
+ * @cpus       : Pointer to cpumask_t, which in case of a CPU device is
+ *             obligatory. It can be taken from i.e. 'policy->cpus'. For other
+ *             type of devices this should be set to NULL.
  *
  * Create Energy Model tables for a performance domain using the callbacks
  * defined in cb.
@@ -196,14 +259,13 @@ EXPORT_SYMBOL_GPL(em_cpu_get);
  *
  * Return 0 on success
  */
-int em_register_perf_domain(cpumask_t *span, unsigned int nr_states,
-                                               struct em_data_callback *cb)
+int em_dev_register_perf_domain(struct device *dev, unsigned int nr_states,
+                               struct em_data_callback *cb, cpumask_t *cpus)
 {
        unsigned long cap, prev_cap = 0;
-       struct em_perf_domain *pd;
-       int cpu, ret = 0;
+       int cpu, ret;
 
-       if (!span || !nr_states || !cb)
+       if (!dev || !nr_states || !cb)
                return -EINVAL;
 
        /*
@@ -212,47 +274,79 @@ int em_register_perf_domain(cpumask_t *span, unsigned int nr_states,
         */
        mutex_lock(&em_pd_mutex);
 
-       for_each_cpu(cpu, span) {
-               /* Make sure we don't register again an existing domain. */
-               if (READ_ONCE(per_cpu(em_data, cpu))) {
-                       ret = -EEXIST;
-                       goto unlock;
-               }
+       if (dev->em_pd) {
+               ret = -EEXIST;
+               goto unlock;
+       }
 
-               /*
-                * All CPUs of a domain must have the same micro-architecture
-                * since they all share the same table.
-                */
-               cap = arch_scale_cpu_capacity(cpu);
-               if (prev_cap && prev_cap != cap) {
-                       pr_err("CPUs of %*pbl must have the same capacity\n",
-                                                       cpumask_pr_args(span));
+       if (_is_cpu_device(dev)) {
+               if (!cpus) {
+                       dev_err(dev, "EM: invalid CPU mask\n");
                        ret = -EINVAL;
                        goto unlock;
                }
-               prev_cap = cap;
+
+               for_each_cpu(cpu, cpus) {
+                       if (em_cpu_get(cpu)) {
+                               dev_err(dev, "EM: exists for CPU%d\n", cpu);
+                               ret = -EEXIST;
+                               goto unlock;
+                       }
+                       /*
+                        * All CPUs of a domain must have the same
+                        * micro-architecture since they all share the same
+                        * table.
+                        */
+                       cap = arch_scale_cpu_capacity(cpu);
+                       if (prev_cap && prev_cap != cap) {
+                               dev_err(dev, "EM: CPUs of %*pbl must have the same capacity\n",
+                                       cpumask_pr_args(cpus));
+
+                               ret = -EINVAL;
+                               goto unlock;
+                       }
+                       prev_cap = cap;
+               }
        }
 
-       /* Create the performance domain and add it to the Energy Model. */
-       pd = em_create_pd(span, nr_states, cb);
-       if (!pd) {
-               ret = -EINVAL;
+       ret = em_create_pd(dev, nr_states, cb, cpus);
+       if (ret)
                goto unlock;
-       }
 
-       for_each_cpu(cpu, span) {
-               /*
-                * The per-cpu array can be read concurrently from em_cpu_get().
-                * The barrier enforces the ordering needed to make sure readers
-                * can only access well formed em_perf_domain structs.
-                */
-               smp_store_release(per_cpu_ptr(&em_data, cpu), pd);
-       }
+       em_debug_create_pd(dev);
+       dev_info(dev, "EM: created perf domain\n");
 
-       pr_debug("Created perf domain %*pbl\n", cpumask_pr_args(span));
 unlock:
        mutex_unlock(&em_pd_mutex);
-
        return ret;
 }
-EXPORT_SYMBOL_GPL(em_register_perf_domain);
+EXPORT_SYMBOL_GPL(em_dev_register_perf_domain);
+
+/**
+ * em_dev_unregister_perf_domain() - Unregister Energy Model (EM) for a device
+ * @dev                : Device for which the EM is registered
+ *
+ * Unregister the EM for the specified @dev (but not a CPU device).
+ */
+void em_dev_unregister_perf_domain(struct device *dev)
+{
+       if (IS_ERR_OR_NULL(dev) || !dev->em_pd)
+               return;
+
+       if (_is_cpu_device(dev))
+               return;
+
+       /*
+        * The mutex separates all register/unregister requests and protects
+        * from potential clean-up/setup issues in the debugfs directories.
+        * The debugfs directory name is the same as device's name.
+        */
+       mutex_lock(&em_pd_mutex);
+       em_debug_remove_pd(dev);
+
+       kfree(dev->em_pd->table);
+       kfree(dev->em_pd);
+       dev->em_pd = NULL;
+       mutex_unlock(&em_pd_mutex);
+}
+EXPORT_SYMBOL_GPL(em_dev_unregister_perf_domain);
index 02ec716a492713877caa67ff51b2d83a5d495b3e..5714f51ba9f8090e1033736164fd36f6b18625c8 100644 (file)
@@ -1062,7 +1062,7 @@ power_attr(disk);
 static ssize_t resume_show(struct kobject *kobj, struct kobj_attribute *attr,
                           char *buf)
 {
-       return sprintf(buf,"%d:%d\n", MAJOR(swsusp_resume_device),
+       return sprintf(buf, "%d:%d\n", MAJOR(swsusp_resume_device),
                       MINOR(swsusp_resume_device));
 }
 
@@ -1162,7 +1162,7 @@ static ssize_t reserved_size_store(struct kobject *kobj,
 
 power_attr(reserved_size);
 
-static struct attribute * g[] = {
+static struct attribute *g[] = {
        &disk_attr.attr,
        &resume_offset_attr.attr,
        &resume_attr.attr,
@@ -1190,7 +1190,7 @@ static int __init resume_setup(char *str)
        if (noresume)
                return 1;
 
-       strncpy( resume_file, str, 255 );
+       strncpy(resume_file, str, 255);
        return 1;
 }
 
index ba2094db62949446f5fca3e4f46edbc559f1cce3..32fc89ac96c30fb7bbbdfa692b4c59fec76f9929 100644 (file)
@@ -32,7 +32,7 @@ static inline int init_header_complete(struct swsusp_info *info)
        return arch_hibernation_header_save(info, MAX_ARCH_HEADER_SIZE);
 }
 
-static inline char *check_image_kernel(struct swsusp_info *info)
+static inline const char *check_image_kernel(struct swsusp_info *info)
 {
        return arch_hibernation_header_restore(info) ?
                        "architecture specific data" : NULL;
index 881128b9351e00d092d8ec2db1b03d39666a2896..cef154261fe2f9c64b1775b5a09a78e0b9d73f6d 100644 (file)
@@ -2023,7 +2023,7 @@ static int init_header_complete(struct swsusp_info *info)
        return 0;
 }
 
-static char *check_image_kernel(struct swsusp_info *info)
+static const char *check_image_kernel(struct swsusp_info *info)
 {
        if (info->version_code != LINUX_VERSION_CODE)
                return "kernel version";
@@ -2176,7 +2176,7 @@ static void mark_unsafe_pages(struct memory_bitmap *bm)
 
 static int check_header(struct swsusp_info *info)
 {
-       char *reason;
+       const char *reason;
 
        reason = check_image_kernel(info);
        if (!reason && info->num_physpages != get_num_physpages())
index 7fbaee24c824f032ea34428ca0d50d3e49d51c0e..402a09af9f4305a7515467c8bc3c11bd19cdb7d3 100644 (file)
@@ -909,11 +909,7 @@ struct cpufreq_governor *cpufreq_default_governor(void)
 }
 #endif
 
-static int __init sugov_register(void)
-{
-       return cpufreq_register_governor(&schedutil_gov);
-}
-core_initcall(sugov_register);
+cpufreq_governor_init(schedutil_gov);
 
 #ifdef CONFIG_ENERGY_MODEL
 extern bool sched_energy_update;
index 04fa8dbcfa4d78a33fa557619cd1fd8363b988de..14c80bb862800daa4c93bfe8d764a504896757b1 100644 (file)
@@ -6501,7 +6501,7 @@ compute_energy(struct task_struct *p, int dst_cpu, struct perf_domain *pd)
                max_util = max(max_util, cpu_util);
        }
 
-       return em_pd_energy(pd->em_pd, max_util, sum_util);
+       return em_cpu_energy(pd->em_pd, max_util, sum_util);
 }
 
 /*
index ba81187bb7af10ce628956f5f822e28d8ce952fd..2f91d3126365ebc97b650159bf3925bedf2c3d0b 100644 (file)
@@ -272,10 +272,10 @@ static void perf_domain_debug(const struct cpumask *cpu_map,
        printk(KERN_DEBUG "root_domain %*pbl:", cpumask_pr_args(cpu_map));
 
        while (pd) {
-               printk(KERN_CONT " pd%d:{ cpus=%*pbl nr_cstate=%d }",
+               printk(KERN_CONT " pd%d:{ cpus=%*pbl nr_pstate=%d }",
                                cpumask_first(perf_domain_span(pd)),
                                cpumask_pr_args(perf_domain_span(pd)),
-                               em_pd_nr_cap_states(pd->em_pd));
+                               em_pd_nr_perf_states(pd->em_pd));
                pd = pd->next;
        }
 
@@ -313,26 +313,26 @@ static void sched_energy_set(bool has_eas)
  *
  * The complexity of the Energy Model is defined as:
  *
- *              C = nr_pd * (nr_cpus + nr_cs)
+ *              C = nr_pd * (nr_cpus + nr_ps)
  *
  * with parameters defined as:
  *  - nr_pd:    the number of performance domains
  *  - nr_cpus:  the number of CPUs
- *  - nr_cs:    the sum of the number of capacity states of all performance
+ *  - nr_ps:    the sum of the number of performance states of all performance
  *              domains (for example, on a system with 2 performance domains,
- *              with 10 capacity states each, nr_cs = 2 * 10 = 20).
+ *              with 10 performance states each, nr_ps = 2 * 10 = 20).
  *
  * It is generally not a good idea to use such a model in the wake-up path on
  * very complex platforms because of the associated scheduling overheads. The
  * arbitrary constraint below prevents that. It makes EAS usable up to 16 CPUs
- * with per-CPU DVFS and less than 8 capacity states each, for example.
+ * with per-CPU DVFS and less than 8 performance states each, for example.
  */
 #define EM_MAX_COMPLEXITY 2048
 
 extern struct cpufreq_governor schedutil_gov;
 static bool build_perf_domains(const struct cpumask *cpu_map)
 {
-       int i, nr_pd = 0, nr_cs = 0, nr_cpus = cpumask_weight(cpu_map);
+       int i, nr_pd = 0, nr_ps = 0, nr_cpus = cpumask_weight(cpu_map);
        struct perf_domain *pd = NULL, *tmp;
        int cpu = cpumask_first(cpu_map);
        struct root_domain *rd = cpu_rq(cpu)->rd;
@@ -384,15 +384,15 @@ static bool build_perf_domains(const struct cpumask *cpu_map)
                pd = tmp;
 
                /*
-                * Count performance domains and capacity states for the
+                * Count performance domains and performance states for the
                 * complexity check.
                 */
                nr_pd++;
-               nr_cs += em_pd_nr_cap_states(pd->em_pd);
+               nr_ps += em_pd_nr_perf_states(pd->em_pd);
        }
 
        /* Bail out if the Energy Model complexity is too high. */
-       if (nr_pd * (nr_cs + nr_cpus) > EM_MAX_COMPLEXITY) {
+       if (nr_pd * (nr_ps + nr_cpus) > EM_MAX_COMPLEXITY) {
                WARN(1, "rd %*pbl: Failed to start EAS, EM complexity is too high\n",
                                                cpumask_pr_args(cpu_map));
                goto free;
index 6ca93bd2949e802233be016589da0aeff8393deb..39cd1abd8559078efc8874422f3f2f5c7801f34c 100644 (file)
@@ -49,17 +49,17 @@ Output is similar to /sys/kernel/debug/pm_genpd/pm_genpd_summary'''
         else:
             status_string = 'off-{}'.format(genpd['state_idx'])
 
-        slave_names = []
+        child_names = []
         for link in list_for_each_entry(
-                genpd['master_links'],
+                genpd['parent_links'],
                 device_link_type.get_type().pointer(),
-                'master_node'):
-            slave_names.apend(link['slave']['name'])
+                'parent_node'):
+            child_names.append(link['child']['name'])
 
         gdb.write('%-30s  %-15s %s\n' % (
                 genpd['name'].string(),
                 status_string,
-                ', '.join(slave_names)))
+                ', '.join(child_names)))
 
         # Print devices in domain
         for pm_data in list_for_each_entry(genpd['dev_list'],
@@ -70,7 +70,7 @@ Output is similar to /sys/kernel/debug/pm_genpd/pm_genpd_summary'''
             gdb.write('    %-50s  %s\n' % (kobj_path, rtpm_status_str(dev)))
 
     def invoke(self, arg, from_tty):
-        gdb.write('domain                          status          slaves\n');
+        gdb.write('domain                          status          children\n');
         gdb.write('    /device                                             runtime status\n');
         gdb.write('----------------------------------------------------------------------\n');
         for genpd in list_for_each_entry(