]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blob - drivers/cpuidle/cpuidle-powernv.c
cpuidle: powernv/pseries: Auto-promotion of snooze to deeper idle state
[mirror_ubuntu-focal-kernel.git] / drivers / cpuidle / cpuidle-powernv.c
1 /*
2 * cpuidle-powernv - idle state cpuidle driver.
3 * Adapted from drivers/cpuidle/cpuidle-pseries
4 *
5 */
6
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/init.h>
10 #include <linux/moduleparam.h>
11 #include <linux/cpuidle.h>
12 #include <linux/cpu.h>
13 #include <linux/notifier.h>
14 #include <linux/clockchips.h>
15 #include <linux/of.h>
16 #include <linux/slab.h>
17
18 #include <asm/machdep.h>
19 #include <asm/firmware.h>
20 #include <asm/opal.h>
21 #include <asm/runlatch.h>
22
23 #define MAX_POWERNV_IDLE_STATES 8
24
25 struct cpuidle_driver powernv_idle_driver = {
26 .name = "powernv_idle",
27 .owner = THIS_MODULE,
28 };
29
30 static int max_idle_state;
31 static struct cpuidle_state *cpuidle_state_table;
32 static u64 snooze_timeout;
33 static bool snooze_timeout_en;
34
35 static int snooze_loop(struct cpuidle_device *dev,
36 struct cpuidle_driver *drv,
37 int index)
38 {
39 u64 snooze_exit_time;
40
41 local_irq_enable();
42 set_thread_flag(TIF_POLLING_NRFLAG);
43
44 snooze_exit_time = get_tb() + snooze_timeout;
45 ppc64_runlatch_off();
46 while (!need_resched()) {
47 HMT_low();
48 HMT_very_low();
49 if (snooze_timeout_en && get_tb() > snooze_exit_time)
50 break;
51 }
52
53 HMT_medium();
54 ppc64_runlatch_on();
55 clear_thread_flag(TIF_POLLING_NRFLAG);
56 smp_mb();
57 return index;
58 }
59
60 static int nap_loop(struct cpuidle_device *dev,
61 struct cpuidle_driver *drv,
62 int index)
63 {
64 ppc64_runlatch_off();
65 power7_idle();
66 ppc64_runlatch_on();
67 return index;
68 }
69
70 static int fastsleep_loop(struct cpuidle_device *dev,
71 struct cpuidle_driver *drv,
72 int index)
73 {
74 unsigned long old_lpcr = mfspr(SPRN_LPCR);
75 unsigned long new_lpcr;
76
77 if (unlikely(system_state < SYSTEM_RUNNING))
78 return index;
79
80 new_lpcr = old_lpcr;
81 /* Do not exit powersave upon decrementer as we've setup the timer
82 * offload.
83 */
84 new_lpcr &= ~LPCR_PECE1;
85
86 mtspr(SPRN_LPCR, new_lpcr);
87 power7_sleep();
88
89 mtspr(SPRN_LPCR, old_lpcr);
90
91 return index;
92 }
93
94 /*
95 * States for dedicated partition case.
96 */
97 static struct cpuidle_state powernv_states[MAX_POWERNV_IDLE_STATES] = {
98 { /* Snooze */
99 .name = "snooze",
100 .desc = "snooze",
101 .exit_latency = 0,
102 .target_residency = 0,
103 .enter = &snooze_loop },
104 };
105
106 static int powernv_cpuidle_add_cpu_notifier(struct notifier_block *n,
107 unsigned long action, void *hcpu)
108 {
109 int hotcpu = (unsigned long)hcpu;
110 struct cpuidle_device *dev =
111 per_cpu(cpuidle_devices, hotcpu);
112
113 if (dev && cpuidle_get_driver()) {
114 switch (action) {
115 case CPU_ONLINE:
116 case CPU_ONLINE_FROZEN:
117 cpuidle_pause_and_lock();
118 cpuidle_enable_device(dev);
119 cpuidle_resume_and_unlock();
120 break;
121
122 case CPU_DEAD:
123 case CPU_DEAD_FROZEN:
124 cpuidle_pause_and_lock();
125 cpuidle_disable_device(dev);
126 cpuidle_resume_and_unlock();
127 break;
128
129 default:
130 return NOTIFY_DONE;
131 }
132 }
133 return NOTIFY_OK;
134 }
135
136 static struct notifier_block setup_hotplug_notifier = {
137 .notifier_call = powernv_cpuidle_add_cpu_notifier,
138 };
139
140 /*
141 * powernv_cpuidle_driver_init()
142 */
143 static int powernv_cpuidle_driver_init(void)
144 {
145 int idle_state;
146 struct cpuidle_driver *drv = &powernv_idle_driver;
147
148 drv->state_count = 0;
149
150 for (idle_state = 0; idle_state < max_idle_state; ++idle_state) {
151 /* Is the state not enabled? */
152 if (cpuidle_state_table[idle_state].enter == NULL)
153 continue;
154
155 drv->states[drv->state_count] = /* structure copy */
156 cpuidle_state_table[idle_state];
157
158 drv->state_count += 1;
159 }
160
161 return 0;
162 }
163
164 static int powernv_add_idle_states(void)
165 {
166 struct device_node *power_mgt;
167 int nr_idle_states = 1; /* Snooze */
168 int dt_idle_states;
169 u32 *latency_ns, *residency_ns, *flags;
170 int i, rc;
171
172 /* Currently we have snooze statically defined */
173
174 power_mgt = of_find_node_by_path("/ibm,opal/power-mgt");
175 if (!power_mgt) {
176 pr_warn("opal: PowerMgmt Node not found\n");
177 goto out;
178 }
179
180 /* Read values of any property to determine the num of idle states */
181 dt_idle_states = of_property_count_u32_elems(power_mgt, "ibm,cpu-idle-state-flags");
182 if (dt_idle_states < 0) {
183 pr_warn("cpuidle-powernv: no idle states found in the DT\n");
184 goto out;
185 }
186
187 flags = kzalloc(sizeof(*flags) * dt_idle_states, GFP_KERNEL);
188 if (of_property_read_u32_array(power_mgt,
189 "ibm,cpu-idle-state-flags", flags, dt_idle_states)) {
190 pr_warn("cpuidle-powernv : missing ibm,cpu-idle-state-flags in DT\n");
191 goto out_free_flags;
192 }
193
194 latency_ns = kzalloc(sizeof(*latency_ns) * dt_idle_states, GFP_KERNEL);
195 rc = of_property_read_u32_array(power_mgt,
196 "ibm,cpu-idle-state-latencies-ns", latency_ns, dt_idle_states);
197 if (rc) {
198 pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-latencies-ns in DT\n");
199 goto out_free_latency;
200 }
201
202 residency_ns = kzalloc(sizeof(*residency_ns) * dt_idle_states, GFP_KERNEL);
203 rc = of_property_read_u32_array(power_mgt,
204 "ibm,cpu-idle-state-residency-ns", residency_ns, dt_idle_states);
205
206 for (i = 0; i < dt_idle_states; i++) {
207
208 /*
209 * Cpuidle accepts exit_latency and target_residency in us.
210 * Use default target_residency values if f/w does not expose it.
211 */
212 if (flags[i] & OPAL_PM_NAP_ENABLED) {
213 /* Add NAP state */
214 strcpy(powernv_states[nr_idle_states].name, "Nap");
215 strcpy(powernv_states[nr_idle_states].desc, "Nap");
216 powernv_states[nr_idle_states].flags = 0;
217 powernv_states[nr_idle_states].target_residency = 100;
218 powernv_states[nr_idle_states].enter = &nap_loop;
219 } else if (flags[i] & OPAL_PM_SLEEP_ENABLED ||
220 flags[i] & OPAL_PM_SLEEP_ENABLED_ER1) {
221 /* Add FASTSLEEP state */
222 strcpy(powernv_states[nr_idle_states].name, "FastSleep");
223 strcpy(powernv_states[nr_idle_states].desc, "FastSleep");
224 powernv_states[nr_idle_states].flags = CPUIDLE_FLAG_TIMER_STOP;
225 powernv_states[nr_idle_states].target_residency = 300000;
226 powernv_states[nr_idle_states].enter = &fastsleep_loop;
227 }
228
229 powernv_states[nr_idle_states].exit_latency =
230 ((unsigned int)latency_ns[i]) / 1000;
231
232 if (!rc) {
233 powernv_states[nr_idle_states].target_residency =
234 ((unsigned int)residency_ns[i]) / 1000;
235 }
236
237 nr_idle_states++;
238 }
239
240 kfree(residency_ns);
241 out_free_latency:
242 kfree(latency_ns);
243 out_free_flags:
244 kfree(flags);
245 out:
246 return nr_idle_states;
247 }
248
249 /*
250 * powernv_idle_probe()
251 * Choose state table for shared versus dedicated partition
252 */
253 static int powernv_idle_probe(void)
254 {
255 if (cpuidle_disable != IDLE_NO_OVERRIDE)
256 return -ENODEV;
257
258 if (firmware_has_feature(FW_FEATURE_OPALv3)) {
259 cpuidle_state_table = powernv_states;
260 /* Device tree can indicate more idle states */
261 max_idle_state = powernv_add_idle_states();
262 if (max_idle_state > 1) {
263 snooze_timeout_en = true;
264 snooze_timeout = powernv_states[1].target_residency *
265 tb_ticks_per_usec;
266 }
267 } else
268 return -ENODEV;
269
270 return 0;
271 }
272
273 static int __init powernv_processor_idle_init(void)
274 {
275 int retval;
276
277 retval = powernv_idle_probe();
278 if (retval)
279 return retval;
280
281 powernv_cpuidle_driver_init();
282 retval = cpuidle_register(&powernv_idle_driver, NULL);
283 if (retval) {
284 printk(KERN_DEBUG "Registration of powernv driver failed.\n");
285 return retval;
286 }
287
288 register_cpu_notifier(&setup_hotplug_notifier);
289 printk(KERN_DEBUG "powernv_idle_driver registered\n");
290 return 0;
291 }
292
293 device_initcall(powernv_processor_idle_init);