]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blob - drivers/cpuidle/cpuidle-powernv.c
Merge branch 'cleanups-for-4.1-v2' of git://git.kernel.org/pub/scm/linux/kernel/git...
[mirror_ubuntu-focal-kernel.git] / drivers / cpuidle / cpuidle-powernv.c
1 /*
2 * cpuidle-powernv - idle state cpuidle driver.
3 * Adapted from drivers/cpuidle/cpuidle-pseries
4 *
5 */
6
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/init.h>
10 #include <linux/moduleparam.h>
11 #include <linux/cpuidle.h>
12 #include <linux/cpu.h>
13 #include <linux/notifier.h>
14 #include <linux/clockchips.h>
15 #include <linux/of.h>
16 #include <linux/slab.h>
17
18 #include <asm/machdep.h>
19 #include <asm/firmware.h>
20 #include <asm/opal.h>
21 #include <asm/runlatch.h>
22
23 #define MAX_POWERNV_IDLE_STATES 8
24
25 struct cpuidle_driver powernv_idle_driver = {
26 .name = "powernv_idle",
27 .owner = THIS_MODULE,
28 };
29
30 static int max_idle_state;
31 static struct cpuidle_state *cpuidle_state_table;
32
33 static int snooze_loop(struct cpuidle_device *dev,
34 struct cpuidle_driver *drv,
35 int index)
36 {
37 local_irq_enable();
38 set_thread_flag(TIF_POLLING_NRFLAG);
39
40 ppc64_runlatch_off();
41 while (!need_resched()) {
42 HMT_low();
43 HMT_very_low();
44 }
45
46 HMT_medium();
47 ppc64_runlatch_on();
48 clear_thread_flag(TIF_POLLING_NRFLAG);
49 smp_mb();
50 return index;
51 }
52
53 static int nap_loop(struct cpuidle_device *dev,
54 struct cpuidle_driver *drv,
55 int index)
56 {
57 ppc64_runlatch_off();
58 power7_idle();
59 ppc64_runlatch_on();
60 return index;
61 }
62
63 static int fastsleep_loop(struct cpuidle_device *dev,
64 struct cpuidle_driver *drv,
65 int index)
66 {
67 unsigned long old_lpcr = mfspr(SPRN_LPCR);
68 unsigned long new_lpcr;
69
70 if (unlikely(system_state < SYSTEM_RUNNING))
71 return index;
72
73 new_lpcr = old_lpcr;
74 /* Do not exit powersave upon decrementer as we've setup the timer
75 * offload.
76 */
77 new_lpcr &= ~LPCR_PECE1;
78
79 mtspr(SPRN_LPCR, new_lpcr);
80 power7_sleep();
81
82 mtspr(SPRN_LPCR, old_lpcr);
83
84 return index;
85 }
86
87 /*
88 * States for dedicated partition case.
89 */
90 static struct cpuidle_state powernv_states[MAX_POWERNV_IDLE_STATES] = {
91 { /* Snooze */
92 .name = "snooze",
93 .desc = "snooze",
94 .exit_latency = 0,
95 .target_residency = 0,
96 .enter = &snooze_loop },
97 };
98
99 static int powernv_cpuidle_add_cpu_notifier(struct notifier_block *n,
100 unsigned long action, void *hcpu)
101 {
102 int hotcpu = (unsigned long)hcpu;
103 struct cpuidle_device *dev =
104 per_cpu(cpuidle_devices, hotcpu);
105
106 if (dev && cpuidle_get_driver()) {
107 switch (action) {
108 case CPU_ONLINE:
109 case CPU_ONLINE_FROZEN:
110 cpuidle_pause_and_lock();
111 cpuidle_enable_device(dev);
112 cpuidle_resume_and_unlock();
113 break;
114
115 case CPU_DEAD:
116 case CPU_DEAD_FROZEN:
117 cpuidle_pause_and_lock();
118 cpuidle_disable_device(dev);
119 cpuidle_resume_and_unlock();
120 break;
121
122 default:
123 return NOTIFY_DONE;
124 }
125 }
126 return NOTIFY_OK;
127 }
128
129 static struct notifier_block setup_hotplug_notifier = {
130 .notifier_call = powernv_cpuidle_add_cpu_notifier,
131 };
132
133 /*
134 * powernv_cpuidle_driver_init()
135 */
136 static int powernv_cpuidle_driver_init(void)
137 {
138 int idle_state;
139 struct cpuidle_driver *drv = &powernv_idle_driver;
140
141 drv->state_count = 0;
142
143 for (idle_state = 0; idle_state < max_idle_state; ++idle_state) {
144 /* Is the state not enabled? */
145 if (cpuidle_state_table[idle_state].enter == NULL)
146 continue;
147
148 drv->states[drv->state_count] = /* structure copy */
149 cpuidle_state_table[idle_state];
150
151 drv->state_count += 1;
152 }
153
154 return 0;
155 }
156
157 static int powernv_add_idle_states(void)
158 {
159 struct device_node *power_mgt;
160 int nr_idle_states = 1; /* Snooze */
161 int dt_idle_states;
162 u32 *latency_ns, *residency_ns, *flags;
163 int i, rc;
164
165 /* Currently we have snooze statically defined */
166
167 power_mgt = of_find_node_by_path("/ibm,opal/power-mgt");
168 if (!power_mgt) {
169 pr_warn("opal: PowerMgmt Node not found\n");
170 goto out;
171 }
172
173 /* Read values of any property to determine the num of idle states */
174 dt_idle_states = of_property_count_u32_elems(power_mgt, "ibm,cpu-idle-state-flags");
175 if (dt_idle_states < 0) {
176 pr_warn("cpuidle-powernv: no idle states found in the DT\n");
177 goto out;
178 }
179
180 flags = kzalloc(sizeof(*flags) * dt_idle_states, GFP_KERNEL);
181 if (of_property_read_u32_array(power_mgt,
182 "ibm,cpu-idle-state-flags", flags, dt_idle_states)) {
183 pr_warn("cpuidle-powernv : missing ibm,cpu-idle-state-flags in DT\n");
184 goto out_free_flags;
185 }
186
187 latency_ns = kzalloc(sizeof(*latency_ns) * dt_idle_states, GFP_KERNEL);
188 rc = of_property_read_u32_array(power_mgt,
189 "ibm,cpu-idle-state-latencies-ns", latency_ns, dt_idle_states);
190 if (rc) {
191 pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-latencies-ns in DT\n");
192 goto out_free_latency;
193 }
194
195 residency_ns = kzalloc(sizeof(*residency_ns) * dt_idle_states, GFP_KERNEL);
196 rc = of_property_read_u32_array(power_mgt,
197 "ibm,cpu-idle-state-residency-ns", residency_ns, dt_idle_states);
198
199 for (i = 0; i < dt_idle_states; i++) {
200
201 /*
202 * Cpuidle accepts exit_latency and target_residency in us.
203 * Use default target_residency values if f/w does not expose it.
204 */
205 if (flags[i] & OPAL_PM_NAP_ENABLED) {
206 /* Add NAP state */
207 strcpy(powernv_states[nr_idle_states].name, "Nap");
208 strcpy(powernv_states[nr_idle_states].desc, "Nap");
209 powernv_states[nr_idle_states].flags = 0;
210 powernv_states[nr_idle_states].target_residency = 100;
211 powernv_states[nr_idle_states].enter = &nap_loop;
212 } else if (flags[i] & OPAL_PM_SLEEP_ENABLED ||
213 flags[i] & OPAL_PM_SLEEP_ENABLED_ER1) {
214 /* Add FASTSLEEP state */
215 strcpy(powernv_states[nr_idle_states].name, "FastSleep");
216 strcpy(powernv_states[nr_idle_states].desc, "FastSleep");
217 powernv_states[nr_idle_states].flags = CPUIDLE_FLAG_TIMER_STOP;
218 powernv_states[nr_idle_states].target_residency = 300000;
219 powernv_states[nr_idle_states].enter = &fastsleep_loop;
220 }
221
222 powernv_states[nr_idle_states].exit_latency =
223 ((unsigned int)latency_ns[i]) / 1000;
224
225 if (!rc) {
226 powernv_states[nr_idle_states].target_residency =
227 ((unsigned int)residency_ns[i]) / 1000;
228 }
229
230 nr_idle_states++;
231 }
232
233 kfree(residency_ns);
234 out_free_latency:
235 kfree(latency_ns);
236 out_free_flags:
237 kfree(flags);
238 out:
239 return nr_idle_states;
240 }
241
242 /*
243 * powernv_idle_probe()
244 * Choose state table for shared versus dedicated partition
245 */
246 static int powernv_idle_probe(void)
247 {
248 if (cpuidle_disable != IDLE_NO_OVERRIDE)
249 return -ENODEV;
250
251 if (firmware_has_feature(FW_FEATURE_OPALv3)) {
252 cpuidle_state_table = powernv_states;
253 /* Device tree can indicate more idle states */
254 max_idle_state = powernv_add_idle_states();
255 } else
256 return -ENODEV;
257
258 return 0;
259 }
260
261 static int __init powernv_processor_idle_init(void)
262 {
263 int retval;
264
265 retval = powernv_idle_probe();
266 if (retval)
267 return retval;
268
269 powernv_cpuidle_driver_init();
270 retval = cpuidle_register(&powernv_idle_driver, NULL);
271 if (retval) {
272 printk(KERN_DEBUG "Registration of powernv driver failed.\n");
273 return retval;
274 }
275
276 register_cpu_notifier(&setup_hotplug_notifier);
277 printk(KERN_DEBUG "powernv_idle_driver registered\n");
278 return 0;
279 }
280
281 device_initcall(powernv_processor_idle_init);