4 * Copyright 2015 IBM Corp.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 #include <linux/types.h>
14 #include <linux/slab.h>
16 #include <linux/device.h>
17 #include <linux/cpu.h>
19 #include <asm/firmware.h>
20 #include <asm/machdep.h>
22 #include <asm/cputhreads.h>
23 #include <asm/cpuidle.h>
24 #include <asm/code-patching.h>
30 /* Power ISA 3.0 allows for stop states 0x0 - 0xF */
31 #define MAX_STOP_STATE 0xF
33 static u32 supported_cpuidle_states
;
35 static int pnv_save_sprs_for_deep_states(void)
41 * hid0, hid1, hid4, hid5, hmeer and lpcr values are symmetric across
42 * all cpus at boot. Get these reg values of current cpu and use the
43 * same across all cpus.
45 uint64_t lpcr_val
= mfspr(SPRN_LPCR
) & ~(u64
)LPCR_PECE1
;
46 uint64_t hid0_val
= mfspr(SPRN_HID0
);
47 uint64_t hid1_val
= mfspr(SPRN_HID1
);
48 uint64_t hid4_val
= mfspr(SPRN_HID4
);
49 uint64_t hid5_val
= mfspr(SPRN_HID5
);
50 uint64_t hmeer_val
= mfspr(SPRN_HMEER
);
52 for_each_possible_cpu(cpu
) {
53 uint64_t pir
= get_hard_smp_processor_id(cpu
);
54 uint64_t hsprg0_val
= (uint64_t)&paca
[cpu
];
56 if (!cpu_has_feature(CPU_FTR_ARCH_300
)) {
58 * HSPRG0 is used to store the cpu's pointer to paca.
59 * Hence last 3 bits are guaranteed to be 0. Program
60 * slw to restore HSPRG0 with 63rd bit set, so that
61 * when a thread wakes up at 0x100 we can use this bit
62 * to distinguish between fastsleep and deep winkle.
63 * This is not necessary with stop/psscr since PLS
64 * field of psscr indicates which state we are waking
69 rc
= opal_slw_set_reg(pir
, SPRN_HSPRG0
, hsprg0_val
);
73 rc
= opal_slw_set_reg(pir
, SPRN_LPCR
, lpcr_val
);
77 /* HIDs are per core registers */
78 if (cpu_thread_in_core(cpu
) == 0) {
80 rc
= opal_slw_set_reg(pir
, SPRN_HMEER
, hmeer_val
);
84 rc
= opal_slw_set_reg(pir
, SPRN_HID0
, hid0_val
);
88 rc
= opal_slw_set_reg(pir
, SPRN_HID1
, hid1_val
);
92 rc
= opal_slw_set_reg(pir
, SPRN_HID4
, hid4_val
);
96 rc
= opal_slw_set_reg(pir
, SPRN_HID5
, hid5_val
);
105 static void pnv_alloc_idle_core_states(void)
108 int nr_cores
= cpu_nr_cores();
109 u32
*core_idle_state
;
112 * core_idle_state - First 8 bits track the idle state of each thread
113 * of the core. The 8th bit is the lock bit. Initially all thread bits
114 * are set. They are cleared when the thread enters deep idle state
115 * like sleep and winkle. Initially the lock bit is cleared.
116 * The lock bit has 2 purposes
117 * a. While the first thread is restoring core state, it prevents
118 * other threads in the core from switching to process context.
119 * b. While the last thread in the core is saving the core state, it
120 * prevents a different thread from waking up.
122 for (i
= 0; i
< nr_cores
; i
++) {
123 int first_cpu
= i
* threads_per_core
;
124 int node
= cpu_to_node(first_cpu
);
125 size_t paca_ptr_array_size
;
127 core_idle_state
= kmalloc_node(sizeof(u32
), GFP_KERNEL
, node
);
128 *core_idle_state
= PNV_CORE_IDLE_THREAD_BITS
;
129 paca_ptr_array_size
= (threads_per_core
*
130 sizeof(struct paca_struct
*));
132 for (j
= 0; j
< threads_per_core
; j
++) {
133 int cpu
= first_cpu
+ j
;
135 paca
[cpu
].core_idle_state_ptr
= core_idle_state
;
136 paca
[cpu
].thread_idle_state
= PNV_THREAD_RUNNING
;
137 paca
[cpu
].thread_mask
= 1 << j
;
138 if (!cpu_has_feature(CPU_FTR_POWER9_DD1
))
140 paca
[cpu
].thread_sibling_pacas
=
141 kmalloc_node(paca_ptr_array_size
,
146 update_subcore_sibling_mask();
148 if (supported_cpuidle_states
& OPAL_PM_LOSE_FULL_CONTEXT
)
149 pnv_save_sprs_for_deep_states();
152 u32
pnv_get_supported_cpuidle_states(void)
154 return supported_cpuidle_states
;
156 EXPORT_SYMBOL_GPL(pnv_get_supported_cpuidle_states
);
158 static void pnv_fastsleep_workaround_apply(void *info
)
164 rc
= opal_config_cpu_idle_state(OPAL_CONFIG_IDLE_FASTSLEEP
,
165 OPAL_CONFIG_IDLE_APPLY
);
171 * Used to store fastsleep workaround state
172 * 0 - Workaround applied/undone at fastsleep entry/exit path (Default)
173 * 1 - Workaround applied once, never undone.
175 static u8 fastsleep_workaround_applyonce
;
177 static ssize_t
show_fastsleep_workaround_applyonce(struct device
*dev
,
178 struct device_attribute
*attr
, char *buf
)
180 return sprintf(buf
, "%u\n", fastsleep_workaround_applyonce
);
183 static ssize_t
store_fastsleep_workaround_applyonce(struct device
*dev
,
184 struct device_attribute
*attr
, const char *buf
,
187 cpumask_t primary_thread_mask
;
191 if (kstrtou8(buf
, 0, &val
) || val
!= 1)
194 if (fastsleep_workaround_applyonce
== 1)
198 * fastsleep_workaround_applyonce = 1 implies
199 * fastsleep workaround needs to be left in 'applied' state on all
200 * the cores. Do this by-
201 * 1. Patching out the call to 'undo' workaround in fastsleep exit path
202 * 2. Sending ipi to all the cores which have at least one online thread
203 * 3. Patching out the call to 'apply' workaround in fastsleep entry
205 * There is no need to send ipi to cores which have all threads
206 * offlined, as last thread of the core entering fastsleep or deeper
207 * state would have applied workaround.
209 err
= patch_instruction(
210 (unsigned int *)pnv_fastsleep_workaround_at_exit
,
213 pr_err("fastsleep_workaround_applyonce change failed while patching pnv_fastsleep_workaround_at_exit");
218 primary_thread_mask
= cpu_online_cores_map();
219 on_each_cpu_mask(&primary_thread_mask
,
220 pnv_fastsleep_workaround_apply
,
224 pr_err("fastsleep_workaround_applyonce change failed while running pnv_fastsleep_workaround_apply");
228 err
= patch_instruction(
229 (unsigned int *)pnv_fastsleep_workaround_at_entry
,
232 pr_err("fastsleep_workaround_applyonce change failed while patching pnv_fastsleep_workaround_at_entry");
236 fastsleep_workaround_applyonce
= 1;
243 static DEVICE_ATTR(fastsleep_workaround_applyonce
, 0600,
244 show_fastsleep_workaround_applyonce
,
245 store_fastsleep_workaround_applyonce
);
248 * The default stop state that will be used by ppc_md.power_save
249 * function on platforms that support stop instruction.
251 static u64 pnv_default_stop_val
;
252 static u64 pnv_default_stop_mask
;
253 static bool default_stop_found
;
256 * Used for ppc_md.power_save which needs a function with no parameters
258 static void power9_idle(void)
260 power9_idle_stop(pnv_default_stop_val
, pnv_default_stop_mask
);
264 * First deep stop state. Used to figure out when to save/restore
265 * hypervisor context.
267 u64 pnv_first_deep_stop_state
= MAX_STOP_STATE
;
270 * psscr value and mask of the deepest stop idle state.
271 * Used when a cpu is offlined.
273 static u64 pnv_deepest_stop_psscr_val
;
274 static u64 pnv_deepest_stop_psscr_mask
;
275 static bool deepest_stop_found
;
278 * pnv_cpu_offline: A function that puts the CPU into the deepest
279 * available platform idle state on a CPU-Offline.
281 unsigned long pnv_cpu_offline(unsigned int cpu
)
285 u32 idle_states
= pnv_get_supported_cpuidle_states();
287 if (cpu_has_feature(CPU_FTR_ARCH_300
) && deepest_stop_found
) {
288 srr1
= power9_idle_stop(pnv_deepest_stop_psscr_val
,
289 pnv_deepest_stop_psscr_mask
);
290 } else if (idle_states
& OPAL_PM_WINKLE_ENABLED
) {
291 srr1
= power7_winkle();
292 } else if ((idle_states
& OPAL_PM_SLEEP_ENABLED
) ||
293 (idle_states
& OPAL_PM_SLEEP_ENABLED_ER1
)) {
294 srr1
= power7_sleep();
295 } else if (idle_states
& OPAL_PM_NAP_ENABLED
) {
296 srr1
= power7_nap(1);
298 /* This is the fallback method. We emulate snooze */
299 while (!generic_check_cpu_restart(cpu
)) {
311 * Power ISA 3.0 idle initialization.
313 * POWER ISA 3.0 defines a new SPR Processor stop Status and Control
314 * Register (PSSCR) to control idle behavior.
317 * ----------------------------------------------------------
318 * | PLS | /// | SD | ESL | EC | PSLL | /// | TR | MTL | RL |
319 * ----------------------------------------------------------
320 * 0 4 41 42 43 44 48 54 56 60
323 * Bits 0:3 - Power-Saving Level Status (PLS). This field indicates the
324 * lowest power-saving state the thread entered since stop instruction was
327 * Bit 41 - Status Disable(SD)
328 * 0 - Shows PLS entries
329 * 1 - PLS entries are all 0
331 * Bit 42 - Enable State Loss
332 * 0 - No state is lost irrespective of other fields
333 * 1 - Allows state loss
335 * Bit 43 - Exit Criterion
336 * 0 - Exit from power-save mode on any interrupt
337 * 1 - Exit from power-save mode controlled by LPCR's PECE bits
339 * Bits 44:47 - Power-Saving Level Limit
340 * This limits the power-saving level that can be entered into.
342 * Bits 60:63 - Requested Level
343 * Used to specify which power-saving level must be entered on executing
347 int validate_psscr_val_mask(u64
*psscr_val
, u64
*psscr_mask
, u32 flags
)
352 * psscr_mask == 0xf indicates an older firmware.
353 * Set remaining fields of psscr to the default values.
354 * See NOTE above definition of PSSCR_HV_DEFAULT_VAL
356 if (*psscr_mask
== 0xf) {
357 *psscr_val
= *psscr_val
| PSSCR_HV_DEFAULT_VAL
;
358 *psscr_mask
= PSSCR_HV_DEFAULT_MASK
;
363 * New firmware is expected to set the psscr_val bits correctly.
364 * Validate that the following invariants are correctly maintained by
366 * - ESL bit value matches the EC bit value.
367 * - ESL bit is set for all the deep stop states.
369 if (GET_PSSCR_ESL(*psscr_val
) != GET_PSSCR_EC(*psscr_val
)) {
370 err
= ERR_EC_ESL_MISMATCH
;
371 } else if ((flags
& OPAL_PM_LOSE_FULL_CONTEXT
) &&
372 GET_PSSCR_ESL(*psscr_val
) == 0) {
373 err
= ERR_DEEP_STATE_ESL_MISMATCH
;
380 * pnv_arch300_idle_init: Initializes the default idle state, first
381 * deep idle state and deepest idle state on
384 * @np: /ibm,opal/power-mgt device node
385 * @flags: cpu-idle-state-flags array
386 * @dt_idle_states: Number of idle state entries
387 * Returns 0 on success
389 static int __init
pnv_power9_idle_init(struct device_node
*np
, u32
*flags
,
392 u64
*psscr_val
= NULL
;
393 u64
*psscr_mask
= NULL
;
394 u32
*residency_ns
= NULL
;
395 u64 max_residency_ns
= 0;
398 psscr_val
= kcalloc(dt_idle_states
, sizeof(*psscr_val
), GFP_KERNEL
);
399 psscr_mask
= kcalloc(dt_idle_states
, sizeof(*psscr_mask
), GFP_KERNEL
);
400 residency_ns
= kcalloc(dt_idle_states
, sizeof(*residency_ns
),
403 if (!psscr_val
|| !psscr_mask
|| !residency_ns
) {
408 if (of_property_read_u64_array(np
,
409 "ibm,cpu-idle-state-psscr",
410 psscr_val
, dt_idle_states
)) {
411 pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-psscr in DT\n");
416 if (of_property_read_u64_array(np
,
417 "ibm,cpu-idle-state-psscr-mask",
418 psscr_mask
, dt_idle_states
)) {
419 pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-psscr-mask in DT\n");
424 if (of_property_read_u32_array(np
,
425 "ibm,cpu-idle-state-residency-ns",
426 residency_ns
, dt_idle_states
)) {
427 pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-residency-ns in DT\n");
433 * Set pnv_first_deep_stop_state, pnv_deepest_stop_psscr_{val,mask},
434 * and the pnv_default_stop_{val,mask}.
436 * pnv_first_deep_stop_state should be set to the first stop
437 * level to cause hypervisor state loss.
439 * pnv_deepest_stop_{val,mask} should be set to values corresponding to
440 * the deepest stop state.
442 * pnv_default_stop_{val,mask} should be set to values corresponding to
443 * the shallowest (OPAL_PM_STOP_INST_FAST) loss-less stop state.
445 pnv_first_deep_stop_state
= MAX_STOP_STATE
;
446 for (i
= 0; i
< dt_idle_states
; i
++) {
448 u64 psscr_rl
= psscr_val
[i
] & PSSCR_RL_MASK
;
450 if ((flags
[i
] & OPAL_PM_LOSE_FULL_CONTEXT
) &&
451 (pnv_first_deep_stop_state
> psscr_rl
))
452 pnv_first_deep_stop_state
= psscr_rl
;
454 err
= validate_psscr_val_mask(&psscr_val
[i
], &psscr_mask
[i
],
457 report_invalid_psscr_val(psscr_val
[i
], err
);
461 if (max_residency_ns
< residency_ns
[i
]) {
462 max_residency_ns
= residency_ns
[i
];
463 pnv_deepest_stop_psscr_val
= psscr_val
[i
];
464 pnv_deepest_stop_psscr_mask
= psscr_mask
[i
];
465 deepest_stop_found
= true;
468 if (!default_stop_found
&&
469 (flags
[i
] & OPAL_PM_STOP_INST_FAST
)) {
470 pnv_default_stop_val
= psscr_val
[i
];
471 pnv_default_stop_mask
= psscr_mask
[i
];
472 default_stop_found
= true;
476 if (unlikely(!default_stop_found
)) {
477 pr_warn("cpuidle-powernv: No suitable default stop state found. Disabling platform idle.\n");
479 ppc_md
.power_save
= power9_idle
;
480 pr_info("cpuidle-powernv: Default stop: psscr = 0x%016llx,mask=0x%016llx\n",
481 pnv_default_stop_val
, pnv_default_stop_mask
);
484 if (unlikely(!deepest_stop_found
)) {
485 pr_warn("cpuidle-powernv: No suitable stop state for CPU-Hotplug. Offlined CPUs will busy wait");
487 pr_info("cpuidle-powernv: Deepest stop: psscr = 0x%016llx,mask=0x%016llx\n",
488 pnv_deepest_stop_psscr_val
,
489 pnv_deepest_stop_psscr_mask
);
492 pr_info("cpuidle-powernv: Requested Level (RL) value of first deep stop = 0x%llx\n",
493 pnv_first_deep_stop_state
);
502 * Probe device tree for supported idle states
504 static void __init
pnv_probe_idle_states(void)
506 struct device_node
*np
;
511 np
= of_find_node_by_path("/ibm,opal/power-mgt");
513 pr_warn("opal: PowerMgmt Node not found\n");
516 dt_idle_states
= of_property_count_u32_elems(np
,
517 "ibm,cpu-idle-state-flags");
518 if (dt_idle_states
< 0) {
519 pr_warn("cpuidle-powernv: no idle states found in the DT\n");
523 flags
= kcalloc(dt_idle_states
, sizeof(*flags
), GFP_KERNEL
);
525 if (of_property_read_u32_array(np
,
526 "ibm,cpu-idle-state-flags", flags
, dt_idle_states
)) {
527 pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-flags in DT\n");
531 if (cpu_has_feature(CPU_FTR_ARCH_300
)) {
532 if (pnv_power9_idle_init(np
, flags
, dt_idle_states
))
536 for (i
= 0; i
< dt_idle_states
; i
++)
537 supported_cpuidle_states
|= flags
[i
];
542 static int __init
pnv_init_idle_states(void)
545 supported_cpuidle_states
= 0;
547 if (cpuidle_disable
!= IDLE_NO_OVERRIDE
)
550 pnv_probe_idle_states();
552 if (!(supported_cpuidle_states
& OPAL_PM_SLEEP_ENABLED_ER1
)) {
554 (unsigned int *)pnv_fastsleep_workaround_at_entry
,
557 (unsigned int *)pnv_fastsleep_workaround_at_exit
,
561 * OPAL_PM_SLEEP_ENABLED_ER1 is set. It indicates that
562 * workaround is needed to use fastsleep. Provide sysfs
563 * control to choose how this workaround has to be applied.
565 device_create_file(cpu_subsys
.dev_root
,
566 &dev_attr_fastsleep_workaround_applyonce
);
569 pnv_alloc_idle_core_states();
572 * For each CPU, record its PACA address in each of it's
573 * sibling thread's PACA at the slot corresponding to this
574 * CPU's index in the core.
576 if (cpu_has_feature(CPU_FTR_POWER9_DD1
)) {
579 pr_info("powernv: idle: Saving PACA pointers of all CPUs in their thread sibling PACA\n");
580 for_each_possible_cpu(cpu
) {
581 int base_cpu
= cpu_first_thread_sibling(cpu
);
582 int idx
= cpu_thread_in_core(cpu
);
585 for (i
= 0; i
< threads_per_core
; i
++) {
586 int j
= base_cpu
+ i
;
588 paca
[j
].thread_sibling_pacas
[idx
] = &paca
[cpu
];
593 if (supported_cpuidle_states
& OPAL_PM_NAP_ENABLED
)
594 ppc_md
.power_save
= power7_idle
;
599 machine_subsys_initcall(powernv
, pnv_init_idle_states
);