1 // SPDX-License-Identifier: GPL-2.0-or-later
5 * Copyright 2015 IBM Corp.
8 #include <linux/types.h>
10 #include <linux/slab.h>
12 #include <linux/device.h>
13 #include <linux/cpu.h>
15 #include <asm/asm-prototypes.h>
16 #include <asm/firmware.h>
17 #include <asm/machdep.h>
19 #include <asm/cputhreads.h>
20 #include <asm/cpuidle.h>
21 #include <asm/code-patching.h>
23 #include <asm/runlatch.h>
24 #include <asm/dbell.h>
29 /* Power ISA 3.0 allows for stop states 0x0 - 0xF */
30 #define MAX_STOP_STATE 0xF
32 #define P9_STOP_SPR_MSR 2000
33 #define P9_STOP_SPR_PSSCR 855
35 static u32 supported_cpuidle_states
;
36 struct pnv_idle_states_t
*pnv_idle_states
;
37 int nr_pnv_idle_states
;
40 * The default stop state that will be used by ppc_md.power_save
41 * function on platforms that support stop instruction.
43 static u64 pnv_default_stop_val
;
44 static u64 pnv_default_stop_mask
;
45 static bool default_stop_found
;
48 * First stop state levels when SPR and TB loss can occur.
50 static u64 pnv_first_tb_loss_level
= MAX_STOP_STATE
+ 1;
51 static u64 pnv_first_spr_loss_level
= MAX_STOP_STATE
+ 1;
54 * psscr value and mask of the deepest stop idle state.
55 * Used when a cpu is offlined.
57 static u64 pnv_deepest_stop_psscr_val
;
58 static u64 pnv_deepest_stop_psscr_mask
;
59 static u64 pnv_deepest_stop_flag
;
60 static bool deepest_stop_found
;
62 static unsigned long power7_offline_type
;
64 static int pnv_save_sprs_for_deep_states(void)
70 * hid0, hid1, hid4, hid5, hmeer and lpcr values are symmetric across
71 * all cpus at boot. Get these reg values of current cpu and use the
72 * same across all cpus.
74 uint64_t lpcr_val
= mfspr(SPRN_LPCR
);
75 uint64_t hid0_val
= mfspr(SPRN_HID0
);
76 uint64_t hid1_val
= mfspr(SPRN_HID1
);
77 uint64_t hid4_val
= mfspr(SPRN_HID4
);
78 uint64_t hid5_val
= mfspr(SPRN_HID5
);
79 uint64_t hmeer_val
= mfspr(SPRN_HMEER
);
80 uint64_t msr_val
= MSR_IDLE
;
81 uint64_t psscr_val
= pnv_deepest_stop_psscr_val
;
83 for_each_present_cpu(cpu
) {
84 uint64_t pir
= get_hard_smp_processor_id(cpu
);
85 uint64_t hsprg0_val
= (uint64_t)paca_ptrs
[cpu
];
87 rc
= opal_slw_set_reg(pir
, SPRN_HSPRG0
, hsprg0_val
);
91 rc
= opal_slw_set_reg(pir
, SPRN_LPCR
, lpcr_val
);
95 if (cpu_has_feature(CPU_FTR_ARCH_300
)) {
96 rc
= opal_slw_set_reg(pir
, P9_STOP_SPR_MSR
, msr_val
);
100 rc
= opal_slw_set_reg(pir
,
101 P9_STOP_SPR_PSSCR
, psscr_val
);
107 /* HIDs are per core registers */
108 if (cpu_thread_in_core(cpu
) == 0) {
110 rc
= opal_slw_set_reg(pir
, SPRN_HMEER
, hmeer_val
);
114 rc
= opal_slw_set_reg(pir
, SPRN_HID0
, hid0_val
);
118 /* Only p8 needs to set extra HID regiters */
119 if (!cpu_has_feature(CPU_FTR_ARCH_300
)) {
121 rc
= opal_slw_set_reg(pir
, SPRN_HID1
, hid1_val
);
125 rc
= opal_slw_set_reg(pir
, SPRN_HID4
, hid4_val
);
129 rc
= opal_slw_set_reg(pir
, SPRN_HID5
, hid5_val
);
139 u32
pnv_get_supported_cpuidle_states(void)
141 return supported_cpuidle_states
;
143 EXPORT_SYMBOL_GPL(pnv_get_supported_cpuidle_states
);
145 static void pnv_fastsleep_workaround_apply(void *info
)
151 rc
= opal_config_cpu_idle_state(OPAL_CONFIG_IDLE_FASTSLEEP
,
152 OPAL_CONFIG_IDLE_APPLY
);
157 static bool power7_fastsleep_workaround_entry
= true;
158 static bool power7_fastsleep_workaround_exit
= true;
161 * Used to store fastsleep workaround state
162 * 0 - Workaround applied/undone at fastsleep entry/exit path (Default)
163 * 1 - Workaround applied once, never undone.
165 static u8 fastsleep_workaround_applyonce
;
167 static ssize_t
show_fastsleep_workaround_applyonce(struct device
*dev
,
168 struct device_attribute
*attr
, char *buf
)
170 return sprintf(buf
, "%u\n", fastsleep_workaround_applyonce
);
173 static ssize_t
store_fastsleep_workaround_applyonce(struct device
*dev
,
174 struct device_attribute
*attr
, const char *buf
,
177 cpumask_t primary_thread_mask
;
181 if (kstrtou8(buf
, 0, &val
) || val
!= 1)
184 if (fastsleep_workaround_applyonce
== 1)
188 * fastsleep_workaround_applyonce = 1 implies
189 * fastsleep workaround needs to be left in 'applied' state on all
190 * the cores. Do this by-
191 * 1. Disable the 'undo' workaround in fastsleep exit path
192 * 2. Sendi IPIs to all the cores which have at least one online thread
193 * 3. Disable the 'apply' workaround in fastsleep entry path
195 * There is no need to send ipi to cores which have all threads
196 * offlined, as last thread of the core entering fastsleep or deeper
197 * state would have applied workaround.
199 power7_fastsleep_workaround_exit
= false;
202 primary_thread_mask
= cpu_online_cores_map();
203 on_each_cpu_mask(&primary_thread_mask
,
204 pnv_fastsleep_workaround_apply
,
208 pr_err("fastsleep_workaround_applyonce change failed while running pnv_fastsleep_workaround_apply");
212 power7_fastsleep_workaround_entry
= false;
214 fastsleep_workaround_applyonce
= 1;
221 static DEVICE_ATTR(fastsleep_workaround_applyonce
, 0600,
222 show_fastsleep_workaround_applyonce
,
223 store_fastsleep_workaround_applyonce
);
225 static inline void atomic_start_thread_idle(void)
227 int cpu
= raw_smp_processor_id();
228 int first
= cpu_first_thread_sibling(cpu
);
229 int thread_nr
= cpu_thread_in_core(cpu
);
230 unsigned long *state
= &paca_ptrs
[first
]->idle_state
;
232 clear_bit(thread_nr
, state
);
235 static inline void atomic_stop_thread_idle(void)
237 int cpu
= raw_smp_processor_id();
238 int first
= cpu_first_thread_sibling(cpu
);
239 int thread_nr
= cpu_thread_in_core(cpu
);
240 unsigned long *state
= &paca_ptrs
[first
]->idle_state
;
242 set_bit(thread_nr
, state
);
245 static inline void atomic_lock_thread_idle(void)
247 int cpu
= raw_smp_processor_id();
248 int first
= cpu_first_thread_sibling(cpu
);
249 unsigned long *state
= &paca_ptrs
[first
]->idle_state
;
251 while (unlikely(test_and_set_bit_lock(NR_PNV_CORE_IDLE_LOCK_BIT
, state
)))
255 static inline void atomic_unlock_and_stop_thread_idle(void)
257 int cpu
= raw_smp_processor_id();
258 int first
= cpu_first_thread_sibling(cpu
);
259 unsigned long thread
= 1UL << cpu_thread_in_core(cpu
);
260 unsigned long *state
= &paca_ptrs
[first
]->idle_state
;
261 u64 s
= READ_ONCE(*state
);
264 BUG_ON(!(s
& PNV_CORE_IDLE_LOCK_BIT
));
268 new = (s
| thread
) & ~PNV_CORE_IDLE_LOCK_BIT
;
269 tmp
= cmpxchg(state
, s
, new);
270 if (unlikely(tmp
!= s
)) {
276 static inline void atomic_unlock_thread_idle(void)
278 int cpu
= raw_smp_processor_id();
279 int first
= cpu_first_thread_sibling(cpu
);
280 unsigned long *state
= &paca_ptrs
[first
]->idle_state
;
282 BUG_ON(!test_bit(NR_PNV_CORE_IDLE_LOCK_BIT
, state
));
283 clear_bit_unlock(NR_PNV_CORE_IDLE_LOCK_BIT
, state
);
305 /* per thread SPRs that get lost in shallow states */
312 static unsigned long power7_idle_insn(unsigned long type
)
314 int cpu
= raw_smp_processor_id();
315 int first
= cpu_first_thread_sibling(cpu
);
316 unsigned long *state
= &paca_ptrs
[first
]->idle_state
;
317 unsigned long thread
= 1UL << cpu_thread_in_core(cpu
);
318 unsigned long core_thread_mask
= (1UL << threads_per_core
) - 1;
321 struct p7_sprs sprs
= {}; /* avoid false use-uninitialised */
322 bool sprs_saved
= false;
325 if (unlikely(type
!= PNV_THREAD_NAP
)) {
326 atomic_lock_thread_idle();
328 BUG_ON(!(*state
& thread
));
331 if (power7_fastsleep_workaround_entry
) {
332 if ((*state
& core_thread_mask
) == 0) {
333 rc
= opal_config_cpu_idle_state(
334 OPAL_CONFIG_IDLE_FASTSLEEP
,
335 OPAL_CONFIG_IDLE_APPLY
);
340 if (type
== PNV_THREAD_WINKLE
) {
341 sprs
.tscr
= mfspr(SPRN_TSCR
);
342 sprs
.worc
= mfspr(SPRN_WORC
);
344 sprs
.sdr1
= mfspr(SPRN_SDR1
);
345 sprs
.rpr
= mfspr(SPRN_RPR
);
347 sprs
.lpcr
= mfspr(SPRN_LPCR
);
348 if (cpu_has_feature(CPU_FTR_ARCH_207S
)) {
349 sprs
.hfscr
= mfspr(SPRN_HFSCR
);
350 sprs
.fscr
= mfspr(SPRN_FSCR
);
352 sprs
.purr
= mfspr(SPRN_PURR
);
353 sprs
.spurr
= mfspr(SPRN_SPURR
);
354 sprs
.dscr
= mfspr(SPRN_DSCR
);
355 sprs
.wort
= mfspr(SPRN_WORT
);
360 * Increment winkle counter and set all winkle bits if
361 * all threads are winkling. This allows wakeup side to
362 * distinguish between fast sleep and winkle state
363 * loss. Fast sleep still has to resync the timebase so
364 * this may not be a really big win.
366 *state
+= 1 << PNV_CORE_IDLE_WINKLE_COUNT_SHIFT
;
367 if ((*state
& PNV_CORE_IDLE_WINKLE_COUNT_BITS
)
368 >> PNV_CORE_IDLE_WINKLE_COUNT_SHIFT
370 *state
|= PNV_CORE_IDLE_THREAD_WINKLE_BITS
;
371 WARN_ON((*state
& PNV_CORE_IDLE_WINKLE_COUNT_BITS
) == 0);
374 atomic_unlock_thread_idle();
377 if (cpu_has_feature(CPU_FTR_ARCH_207S
)) {
378 sprs
.amr
= mfspr(SPRN_AMR
);
379 sprs
.iamr
= mfspr(SPRN_IAMR
);
380 sprs
.amor
= mfspr(SPRN_AMOR
);
381 sprs
.uamor
= mfspr(SPRN_UAMOR
);
384 local_paca
->thread_idle_state
= type
;
385 srr1
= isa206_idle_insn_mayloss(type
); /* go idle */
386 local_paca
->thread_idle_state
= PNV_THREAD_RUNNING
;
389 WARN_ON_ONCE(mfmsr() & (MSR_IR
|MSR_DR
));
391 if (cpu_has_feature(CPU_FTR_ARCH_207S
)) {
392 if ((srr1
& SRR1_WAKESTATE
) != SRR1_WS_NOLOSS
) {
394 * We don't need an isync after the mtsprs here because
395 * the upcoming mtmsrd is execution synchronizing.
397 mtspr(SPRN_AMR
, sprs
.amr
);
398 mtspr(SPRN_IAMR
, sprs
.iamr
);
399 mtspr(SPRN_AMOR
, sprs
.amor
);
400 mtspr(SPRN_UAMOR
, sprs
.uamor
);
404 if (unlikely((srr1
& SRR1_WAKEMASK_P8
) == SRR1_WAKEHMI
))
405 hmi_exception_realmode(NULL
);
407 if (likely((srr1
& SRR1_WAKESTATE
) != SRR1_WS_HVLOSS
)) {
408 if (unlikely(type
!= PNV_THREAD_NAP
)) {
409 atomic_lock_thread_idle();
410 if (type
== PNV_THREAD_WINKLE
) {
411 WARN_ON((*state
& PNV_CORE_IDLE_WINKLE_COUNT_BITS
) == 0);
412 *state
-= 1 << PNV_CORE_IDLE_WINKLE_COUNT_SHIFT
;
413 *state
&= ~(thread
<< PNV_CORE_IDLE_THREAD_WINKLE_BITS_SHIFT
);
415 atomic_unlock_and_stop_thread_idle();
421 BUG_ON(type
== PNV_THREAD_NAP
);
423 atomic_lock_thread_idle();
426 if (type
== PNV_THREAD_WINKLE
) {
427 WARN_ON((*state
& PNV_CORE_IDLE_WINKLE_COUNT_BITS
) == 0);
428 *state
-= 1 << PNV_CORE_IDLE_WINKLE_COUNT_SHIFT
;
429 if (*state
& (thread
<< PNV_CORE_IDLE_THREAD_WINKLE_BITS_SHIFT
)) {
430 *state
&= ~(thread
<< PNV_CORE_IDLE_THREAD_WINKLE_BITS_SHIFT
);
436 WARN_ON(*state
& thread
);
438 if ((*state
& core_thread_mask
) != 0)
443 mtspr(SPRN_TSCR
, sprs
.tscr
);
444 mtspr(SPRN_WORC
, sprs
.worc
);
447 if (power7_fastsleep_workaround_exit
) {
448 rc
= opal_config_cpu_idle_state(OPAL_CONFIG_IDLE_FASTSLEEP
,
449 OPAL_CONFIG_IDLE_UNDO
);
454 if (opal_resync_timebase() != OPAL_SUCCESS
)
461 if ((*state
& local_paca
->subcore_sibling_mask
) != 0)
464 /* Per-subcore SPRs */
465 mtspr(SPRN_SDR1
, sprs
.sdr1
);
466 mtspr(SPRN_RPR
, sprs
.rpr
);
470 * isync after restoring shared SPRs and before unlocking. Unlock
471 * only contains hwsync which does not necessarily do the right
475 atomic_unlock_and_stop_thread_idle();
477 /* Fast sleep does not lose SPRs */
481 /* Per-thread SPRs */
482 mtspr(SPRN_LPCR
, sprs
.lpcr
);
483 if (cpu_has_feature(CPU_FTR_ARCH_207S
)) {
484 mtspr(SPRN_HFSCR
, sprs
.hfscr
);
485 mtspr(SPRN_FSCR
, sprs
.fscr
);
487 mtspr(SPRN_PURR
, sprs
.purr
);
488 mtspr(SPRN_SPURR
, sprs
.spurr
);
489 mtspr(SPRN_DSCR
, sprs
.dscr
);
490 mtspr(SPRN_WORT
, sprs
.wort
);
492 mtspr(SPRN_SPRG3
, local_paca
->sprg_vdso
);
495 * The SLB has to be restored here, but it sometimes still
496 * contains entries, so the __ variant must be used to prevent
499 __slb_restore_bolted_realmode();
504 extern unsigned long idle_kvm_start_guest(unsigned long srr1
);
506 #ifdef CONFIG_HOTPLUG_CPU
507 static unsigned long power7_offline(void)
513 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
514 /* Tell KVM we're entering idle. */
515 /******************************************************/
516 /* N O T E W E L L ! ! ! N O T E W E L L */
517 /* The following store to HSTATE_HWTHREAD_STATE(r13) */
518 /* MUST occur in real mode, i.e. with the MMU off, */
519 /* and the MMU must stay off until we clear this flag */
520 /* and test HSTATE_HWTHREAD_REQ(r13) in */
521 /* pnv_powersave_wakeup in this file. */
522 /* The reason is that another thread can switch the */
523 /* MMU to a guest context whenever this flag is set */
524 /* to KVM_HWTHREAD_IN_IDLE, and if the MMU was on, */
525 /* that would potentially cause this thread to start */
526 /* executing instructions from guest memory in */
527 /* hypervisor mode, leading to a host crash or data */
528 /* corruption, or worse. */
529 /******************************************************/
530 local_paca
->kvm_hstate
.hwthread_state
= KVM_HWTHREAD_IN_IDLE
;
533 __ppc64_runlatch_off();
534 srr1
= power7_idle_insn(power7_offline_type
);
535 __ppc64_runlatch_on();
537 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
538 local_paca
->kvm_hstate
.hwthread_state
= KVM_HWTHREAD_IN_KERNEL
;
539 /* Order setting hwthread_state vs. testing hwthread_req */
541 if (local_paca
->kvm_hstate
.hwthread_req
)
542 srr1
= idle_kvm_start_guest(srr1
);
551 void power7_idle_type(unsigned long type
)
555 if (!prep_irq_for_idle_irqsoff())
559 __ppc64_runlatch_off();
560 srr1
= power7_idle_insn(type
);
561 __ppc64_runlatch_on();
564 fini_irq_for_idle_irqsoff();
565 irq_set_pending_from_srr1(srr1
);
568 void power7_idle(void)
573 power7_idle_type(PNV_THREAD_NAP
);
598 /* per thread SPRs that get lost in shallow states */
605 static unsigned long power9_idle_stop(unsigned long psscr
, bool mmu_on
)
607 int cpu
= raw_smp_processor_id();
608 int first
= cpu_first_thread_sibling(cpu
);
609 unsigned long *state
= &paca_ptrs
[first
]->idle_state
;
610 unsigned long core_thread_mask
= (1UL << threads_per_core
) - 1;
613 unsigned long mmcr0
= 0;
614 struct p9_sprs sprs
= {}; /* avoid false used-uninitialised */
615 bool sprs_saved
= false;
617 if (!(psscr
& (PSSCR_EC
|PSSCR_ESL
))) {
623 * Wake synchronously. SRESET via xscom may still cause
624 * a 0x100 powersave wakeup with SRR1 reason!
626 srr1
= isa300_idle_stop_noloss(psscr
); /* go idle */
631 * Registers not saved, can't recover!
632 * This would be a hardware bug
634 BUG_ON((srr1
& SRR1_WAKESTATE
) != SRR1_WS_NOLOSS
);
640 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
641 if (cpu_has_feature(CPU_FTR_P9_TM_XER_SO_BUG
)) {
642 local_paca
->requested_psscr
= psscr
;
643 /* order setting requested_psscr vs testing dont_stop */
645 if (atomic_read(&local_paca
->dont_stop
)) {
646 local_paca
->requested_psscr
= 0;
652 if (!cpu_has_feature(CPU_FTR_POWER9_DD2_1
)) {
654 * POWER9 DD2 can incorrectly set PMAO when waking up
655 * after a state-loss idle. Saving and restoring MMCR0
656 * over idle is a workaround.
658 mmcr0
= mfspr(SPRN_MMCR0
);
660 if ((psscr
& PSSCR_RL_MASK
) >= pnv_first_spr_loss_level
) {
661 sprs
.lpcr
= mfspr(SPRN_LPCR
);
662 sprs
.hfscr
= mfspr(SPRN_HFSCR
);
663 sprs
.fscr
= mfspr(SPRN_FSCR
);
664 sprs
.pid
= mfspr(SPRN_PID
);
665 sprs
.purr
= mfspr(SPRN_PURR
);
666 sprs
.spurr
= mfspr(SPRN_SPURR
);
667 sprs
.dscr
= mfspr(SPRN_DSCR
);
668 sprs
.wort
= mfspr(SPRN_WORT
);
670 sprs
.mmcra
= mfspr(SPRN_MMCRA
);
671 sprs
.mmcr0
= mfspr(SPRN_MMCR0
);
672 sprs
.mmcr1
= mfspr(SPRN_MMCR1
);
673 sprs
.mmcr2
= mfspr(SPRN_MMCR2
);
675 sprs
.ptcr
= mfspr(SPRN_PTCR
);
676 sprs
.rpr
= mfspr(SPRN_RPR
);
677 sprs
.tscr
= mfspr(SPRN_TSCR
);
678 sprs
.ldbar
= mfspr(SPRN_LDBAR
);
682 atomic_start_thread_idle();
685 sprs
.amr
= mfspr(SPRN_AMR
);
686 sprs
.iamr
= mfspr(SPRN_IAMR
);
687 sprs
.amor
= mfspr(SPRN_AMOR
);
688 sprs
.uamor
= mfspr(SPRN_UAMOR
);
690 srr1
= isa300_idle_stop_mayloss(psscr
); /* go idle */
692 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
693 local_paca
->requested_psscr
= 0;
696 psscr
= mfspr(SPRN_PSSCR
);
699 WARN_ON_ONCE(mfmsr() & (MSR_IR
|MSR_DR
));
701 if ((srr1
& SRR1_WAKESTATE
) != SRR1_WS_NOLOSS
) {
705 * We don't need an isync after the mtsprs here because the
706 * upcoming mtmsrd is execution synchronizing.
708 mtspr(SPRN_AMR
, sprs
.amr
);
709 mtspr(SPRN_IAMR
, sprs
.iamr
);
710 mtspr(SPRN_AMOR
, sprs
.amor
);
711 mtspr(SPRN_UAMOR
, sprs
.uamor
);
714 * Workaround for POWER9 DD2.0, if we lost resources, the ERAT
715 * might have been corrupted and needs flushing. We also need
716 * to reload MMCR0 (see mmcr0 comment above).
718 if (!cpu_has_feature(CPU_FTR_POWER9_DD2_1
)) {
719 asm volatile(PPC_INVALIDATE_ERAT
);
720 mtspr(SPRN_MMCR0
, mmcr0
);
724 * DD2.2 and earlier need to set then clear bit 60 in MMCRA
725 * to ensure the PMU starts running.
727 mmcra
= mfspr(SPRN_MMCRA
);
728 mmcra
|= PPC_BIT(60);
729 mtspr(SPRN_MMCRA
, mmcra
);
730 mmcra
&= ~PPC_BIT(60);
731 mtspr(SPRN_MMCRA
, mmcra
);
734 if (unlikely((srr1
& SRR1_WAKEMASK_P8
) == SRR1_WAKEHMI
))
735 hmi_exception_realmode(NULL
);
738 * On POWER9, SRR1 bits do not match exactly as expected.
739 * SRR1_WS_GPRLOSS (10b) can also result in SPR loss, so
740 * just always test PSSCR for SPR/TB state loss.
742 pls
= (psscr
& PSSCR_PLS
) >> PSSCR_PLS_SHIFT
;
743 if (likely(pls
< pnv_first_spr_loss_level
)) {
745 atomic_stop_thread_idle();
752 atomic_lock_thread_idle();
754 if ((*state
& core_thread_mask
) != 0)
758 mtspr(SPRN_PTCR
, sprs
.ptcr
);
759 mtspr(SPRN_RPR
, sprs
.rpr
);
760 mtspr(SPRN_TSCR
, sprs
.tscr
);
761 mtspr(SPRN_LDBAR
, sprs
.ldbar
);
763 if (pls
>= pnv_first_tb_loss_level
) {
765 if (opal_resync_timebase() != OPAL_SUCCESS
)
770 * isync after restoring shared SPRs and before unlocking. Unlock
771 * only contains hwsync which does not necessarily do the right
777 atomic_unlock_and_stop_thread_idle();
779 /* Per-thread SPRs */
780 mtspr(SPRN_LPCR
, sprs
.lpcr
);
781 mtspr(SPRN_HFSCR
, sprs
.hfscr
);
782 mtspr(SPRN_FSCR
, sprs
.fscr
);
783 mtspr(SPRN_PID
, sprs
.pid
);
784 mtspr(SPRN_PURR
, sprs
.purr
);
785 mtspr(SPRN_SPURR
, sprs
.spurr
);
786 mtspr(SPRN_DSCR
, sprs
.dscr
);
787 mtspr(SPRN_WORT
, sprs
.wort
);
789 mtspr(SPRN_MMCRA
, sprs
.mmcra
);
790 mtspr(SPRN_MMCR0
, sprs
.mmcr0
);
791 mtspr(SPRN_MMCR1
, sprs
.mmcr1
);
792 mtspr(SPRN_MMCR2
, sprs
.mmcr2
);
794 mtspr(SPRN_SPRG3
, local_paca
->sprg_vdso
);
796 if (!radix_enabled())
797 __slb_restore_bolted_realmode();
806 #ifdef CONFIG_HOTPLUG_CPU
807 static unsigned long power9_offline_stop(unsigned long psscr
)
811 #ifndef CONFIG_KVM_BOOK3S_HV_POSSIBLE
812 __ppc64_runlatch_off();
813 srr1
= power9_idle_stop(psscr
, true);
814 __ppc64_runlatch_on();
817 * Tell KVM we're entering idle.
818 * This does not have to be done in real mode because the P9 MMU
819 * is independent per-thread. Some steppings share radix/hash mode
820 * between threads, but in that case KVM has a barrier sync in real
821 * mode before and after switching between radix and hash.
823 * kvm_start_guest must still be called in real mode though, hence
824 * the false argument.
826 local_paca
->kvm_hstate
.hwthread_state
= KVM_HWTHREAD_IN_IDLE
;
828 __ppc64_runlatch_off();
829 srr1
= power9_idle_stop(psscr
, false);
830 __ppc64_runlatch_on();
832 local_paca
->kvm_hstate
.hwthread_state
= KVM_HWTHREAD_IN_KERNEL
;
833 /* Order setting hwthread_state vs. testing hwthread_req */
835 if (local_paca
->kvm_hstate
.hwthread_req
)
836 srr1
= idle_kvm_start_guest(srr1
);
844 void power9_idle_type(unsigned long stop_psscr_val
,
845 unsigned long stop_psscr_mask
)
850 if (!prep_irq_for_idle_irqsoff())
853 psscr
= mfspr(SPRN_PSSCR
);
854 psscr
= (psscr
& ~stop_psscr_mask
) | stop_psscr_val
;
856 __ppc64_runlatch_off();
857 srr1
= power9_idle_stop(psscr
, true);
858 __ppc64_runlatch_on();
860 fini_irq_for_idle_irqsoff();
862 irq_set_pending_from_srr1(srr1
);
866 * Used for ppc_md.power_save which needs a function with no parameters
868 void power9_idle(void)
870 power9_idle_type(pnv_default_stop_val
, pnv_default_stop_mask
);
873 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
875 * This is used in working around bugs in thread reconfiguration
876 * on POWER9 (at least up to Nimbus DD2.2) relating to transactional
877 * memory and the way that XER[SO] is checkpointed.
878 * This function forces the core into SMT4 in order by asking
879 * all other threads not to stop, and sending a message to any
880 * that are in a stop state.
881 * Must be called with preemption disabled.
883 void pnv_power9_force_smt4_catch(void)
886 int awake_threads
= 1; /* this thread is awake */
887 int poke_threads
= 0;
888 int need_awake
= threads_per_core
;
890 cpu
= smp_processor_id();
891 cpu0
= cpu
& ~(threads_per_core
- 1);
892 for (thr
= 0; thr
< threads_per_core
; ++thr
) {
893 if (cpu
!= cpu0
+ thr
)
894 atomic_inc(&paca_ptrs
[cpu0
+thr
]->dont_stop
);
896 /* order setting dont_stop vs testing requested_psscr */
898 for (thr
= 0; thr
< threads_per_core
; ++thr
) {
899 if (!paca_ptrs
[cpu0
+thr
]->requested_psscr
)
902 poke_threads
|= (1 << thr
);
905 /* If at least 3 threads are awake, the core is in SMT4 already */
906 if (awake_threads
< need_awake
) {
907 /* We have to wake some threads; we'll use msgsnd */
908 for (thr
= 0; thr
< threads_per_core
; ++thr
) {
909 if (poke_threads
& (1 << thr
)) {
911 ppc_msgsnd(PPC_DBELL_MSGTYPE
, 0,
912 paca_ptrs
[cpu0
+thr
]->hw_cpu_id
);
915 /* now spin until at least 3 threads are awake */
917 for (thr
= 0; thr
< threads_per_core
; ++thr
) {
918 if ((poke_threads
& (1 << thr
)) &&
919 !paca_ptrs
[cpu0
+thr
]->requested_psscr
) {
921 poke_threads
&= ~(1 << thr
);
924 } while (awake_threads
< need_awake
);
927 EXPORT_SYMBOL_GPL(pnv_power9_force_smt4_catch
);
929 void pnv_power9_force_smt4_release(void)
933 cpu
= smp_processor_id();
934 cpu0
= cpu
& ~(threads_per_core
- 1);
936 /* clear all the dont_stop flags */
937 for (thr
= 0; thr
< threads_per_core
; ++thr
) {
938 if (cpu
!= cpu0
+ thr
)
939 atomic_dec(&paca_ptrs
[cpu0
+thr
]->dont_stop
);
942 EXPORT_SYMBOL_GPL(pnv_power9_force_smt4_release
);
943 #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
945 #ifdef CONFIG_HOTPLUG_CPU
947 void pnv_program_cpu_hotplug_lpcr(unsigned int cpu
, u64 lpcr_val
)
949 u64 pir
= get_hard_smp_processor_id(cpu
);
951 mtspr(SPRN_LPCR
, lpcr_val
);
954 * Program the LPCR via stop-api only if the deepest stop state
955 * can lose hypervisor context.
957 if (supported_cpuidle_states
& OPAL_PM_LOSE_FULL_CONTEXT
)
958 opal_slw_set_reg(pir
, SPRN_LPCR
, lpcr_val
);
962 * pnv_cpu_offline: A function that puts the CPU into the deepest
963 * available platform idle state on a CPU-Offline.
964 * interrupts hard disabled and no lazy irq pending.
966 unsigned long pnv_cpu_offline(unsigned int cpu
)
970 __ppc64_runlatch_off();
972 if (cpu_has_feature(CPU_FTR_ARCH_300
) && deepest_stop_found
) {
975 psscr
= mfspr(SPRN_PSSCR
);
976 psscr
= (psscr
& ~pnv_deepest_stop_psscr_mask
) |
977 pnv_deepest_stop_psscr_val
;
978 srr1
= power9_offline_stop(psscr
);
979 } else if (cpu_has_feature(CPU_FTR_ARCH_206
) && power7_offline_type
) {
980 srr1
= power7_offline();
982 /* This is the fallback method. We emulate snooze */
983 while (!generic_check_cpu_restart(cpu
)) {
991 __ppc64_runlatch_on();
998 * Power ISA 3.0 idle initialization.
1000 * POWER ISA 3.0 defines a new SPR Processor stop Status and Control
1001 * Register (PSSCR) to control idle behavior.
1004 * ----------------------------------------------------------
1005 * | PLS | /// | SD | ESL | EC | PSLL | /// | TR | MTL | RL |
1006 * ----------------------------------------------------------
1007 * 0 4 41 42 43 44 48 54 56 60
1010 * Bits 0:3 - Power-Saving Level Status (PLS). This field indicates the
1011 * lowest power-saving state the thread entered since stop instruction was
1014 * Bit 41 - Status Disable(SD)
1015 * 0 - Shows PLS entries
1016 * 1 - PLS entries are all 0
1018 * Bit 42 - Enable State Loss
1019 * 0 - No state is lost irrespective of other fields
1020 * 1 - Allows state loss
1022 * Bit 43 - Exit Criterion
1023 * 0 - Exit from power-save mode on any interrupt
1024 * 1 - Exit from power-save mode controlled by LPCR's PECE bits
1026 * Bits 44:47 - Power-Saving Level Limit
1027 * This limits the power-saving level that can be entered into.
1029 * Bits 60:63 - Requested Level
1030 * Used to specify which power-saving level must be entered on executing
1034 int validate_psscr_val_mask(u64
*psscr_val
, u64
*psscr_mask
, u32 flags
)
1039 * psscr_mask == 0xf indicates an older firmware.
1040 * Set remaining fields of psscr to the default values.
1041 * See NOTE above definition of PSSCR_HV_DEFAULT_VAL
1043 if (*psscr_mask
== 0xf) {
1044 *psscr_val
= *psscr_val
| PSSCR_HV_DEFAULT_VAL
;
1045 *psscr_mask
= PSSCR_HV_DEFAULT_MASK
;
1050 * New firmware is expected to set the psscr_val bits correctly.
1051 * Validate that the following invariants are correctly maintained by
1053 * - ESL bit value matches the EC bit value.
1054 * - ESL bit is set for all the deep stop states.
1056 if (GET_PSSCR_ESL(*psscr_val
) != GET_PSSCR_EC(*psscr_val
)) {
1057 err
= ERR_EC_ESL_MISMATCH
;
1058 } else if ((flags
& OPAL_PM_LOSE_FULL_CONTEXT
) &&
1059 GET_PSSCR_ESL(*psscr_val
) == 0) {
1060 err
= ERR_DEEP_STATE_ESL_MISMATCH
;
1067 * pnv_arch300_idle_init: Initializes the default idle state, first
1068 * deep idle state and deepest idle state on
1071 * @np: /ibm,opal/power-mgt device node
1072 * @flags: cpu-idle-state-flags array
1073 * @dt_idle_states: Number of idle state entries
1074 * Returns 0 on success
1076 static void __init
pnv_power9_idle_init(void)
1078 u64 max_residency_ns
= 0;
1082 * pnv_deepest_stop_{val,mask} should be set to values corresponding to
1083 * the deepest stop state.
1085 * pnv_default_stop_{val,mask} should be set to values corresponding to
1086 * the deepest loss-less (OPAL_PM_STOP_INST_FAST) stop state.
1088 pnv_first_tb_loss_level
= MAX_STOP_STATE
+ 1;
1089 pnv_first_spr_loss_level
= MAX_STOP_STATE
+ 1;
1090 for (i
= 0; i
< nr_pnv_idle_states
; i
++) {
1092 struct pnv_idle_states_t
*state
= &pnv_idle_states
[i
];
1093 u64 psscr_rl
= state
->psscr_val
& PSSCR_RL_MASK
;
1095 if ((state
->flags
& OPAL_PM_TIMEBASE_STOP
) &&
1096 (pnv_first_tb_loss_level
> psscr_rl
))
1097 pnv_first_tb_loss_level
= psscr_rl
;
1099 if ((state
->flags
& OPAL_PM_LOSE_FULL_CONTEXT
) &&
1100 (pnv_first_spr_loss_level
> psscr_rl
))
1101 pnv_first_spr_loss_level
= psscr_rl
;
1104 * The idle code does not deal with TB loss occurring
1105 * in a shallower state than SPR loss, so force it to
1106 * behave like SPRs are lost if TB is lost. POWER9 would
1107 * never encouter this, but a POWER8 core would if it
1108 * implemented the stop instruction. So this is for forward
1111 if ((state
->flags
& OPAL_PM_TIMEBASE_STOP
) &&
1112 (pnv_first_spr_loss_level
> psscr_rl
))
1113 pnv_first_spr_loss_level
= psscr_rl
;
1115 err
= validate_psscr_val_mask(&state
->psscr_val
,
1119 report_invalid_psscr_val(state
->psscr_val
, err
);
1123 state
->valid
= true;
1125 if (max_residency_ns
< state
->residency_ns
) {
1126 max_residency_ns
= state
->residency_ns
;
1127 pnv_deepest_stop_psscr_val
= state
->psscr_val
;
1128 pnv_deepest_stop_psscr_mask
= state
->psscr_mask
;
1129 pnv_deepest_stop_flag
= state
->flags
;
1130 deepest_stop_found
= true;
1133 if (!default_stop_found
&&
1134 (state
->flags
& OPAL_PM_STOP_INST_FAST
)) {
1135 pnv_default_stop_val
= state
->psscr_val
;
1136 pnv_default_stop_mask
= state
->psscr_mask
;
1137 default_stop_found
= true;
1138 WARN_ON(state
->flags
& OPAL_PM_LOSE_FULL_CONTEXT
);
1142 if (unlikely(!default_stop_found
)) {
1143 pr_warn("cpuidle-powernv: No suitable default stop state found. Disabling platform idle.\n");
1145 ppc_md
.power_save
= power9_idle
;
1146 pr_info("cpuidle-powernv: Default stop: psscr = 0x%016llx,mask=0x%016llx\n",
1147 pnv_default_stop_val
, pnv_default_stop_mask
);
1150 if (unlikely(!deepest_stop_found
)) {
1151 pr_warn("cpuidle-powernv: No suitable stop state for CPU-Hotplug. Offlined CPUs will busy wait");
1153 pr_info("cpuidle-powernv: Deepest stop: psscr = 0x%016llx,mask=0x%016llx\n",
1154 pnv_deepest_stop_psscr_val
,
1155 pnv_deepest_stop_psscr_mask
);
1158 pr_info("cpuidle-powernv: First stop level that may lose SPRs = 0x%lld\n",
1159 pnv_first_spr_loss_level
);
1161 pr_info("cpuidle-powernv: First stop level that may lose timebase = 0x%lld\n",
1162 pnv_first_tb_loss_level
);
1165 static void __init
pnv_disable_deep_states(void)
1168 * The stop-api is unable to restore hypervisor
1169 * resources on wakeup from platform idle states which
1170 * lose full context. So disable such states.
1172 supported_cpuidle_states
&= ~OPAL_PM_LOSE_FULL_CONTEXT
;
1173 pr_warn("cpuidle-powernv: Disabling idle states that lose full context\n");
1174 pr_warn("cpuidle-powernv: Idle power-savings, CPU-Hotplug affected\n");
1176 if (cpu_has_feature(CPU_FTR_ARCH_300
) &&
1177 (pnv_deepest_stop_flag
& OPAL_PM_LOSE_FULL_CONTEXT
)) {
1179 * Use the default stop state for CPU-Hotplug
1182 if (default_stop_found
) {
1183 pnv_deepest_stop_psscr_val
= pnv_default_stop_val
;
1184 pnv_deepest_stop_psscr_mask
= pnv_default_stop_mask
;
1185 pr_warn("cpuidle-powernv: Offlined CPUs will stop with psscr = 0x%016llx\n",
1186 pnv_deepest_stop_psscr_val
);
1187 } else { /* Fallback to snooze loop for CPU-Hotplug */
1188 deepest_stop_found
= false;
1189 pr_warn("cpuidle-powernv: Offlined CPUs will busy wait\n");
1195 * Probe device tree for supported idle states
1197 static void __init
pnv_probe_idle_states(void)
1201 if (nr_pnv_idle_states
< 0) {
1202 pr_warn("cpuidle-powernv: no idle states found in the DT\n");
1206 if (cpu_has_feature(CPU_FTR_ARCH_300
))
1207 pnv_power9_idle_init();
1209 for (i
= 0; i
< nr_pnv_idle_states
; i
++)
1210 supported_cpuidle_states
|= pnv_idle_states
[i
].flags
;
1214 * This function parses device-tree and populates all the information
1215 * into pnv_idle_states structure. It also sets up nr_pnv_idle_states
1216 * which is the number of cpuidle states discovered through device-tree.
1219 static int pnv_parse_cpuidle_dt(void)
1221 struct device_node
*np
;
1222 int nr_idle_states
, i
;
1226 const char **temp_string
;
1228 np
= of_find_node_by_path("/ibm,opal/power-mgt");
1230 pr_warn("opal: PowerMgmt Node not found\n");
1233 nr_idle_states
= of_property_count_u32_elems(np
,
1234 "ibm,cpu-idle-state-flags");
1236 pnv_idle_states
= kcalloc(nr_idle_states
, sizeof(*pnv_idle_states
),
1238 temp_u32
= kcalloc(nr_idle_states
, sizeof(u32
), GFP_KERNEL
);
1239 temp_u64
= kcalloc(nr_idle_states
, sizeof(u64
), GFP_KERNEL
);
1240 temp_string
= kcalloc(nr_idle_states
, sizeof(char *), GFP_KERNEL
);
1242 if (!(pnv_idle_states
&& temp_u32
&& temp_u64
&& temp_string
)) {
1243 pr_err("Could not allocate memory for dt parsing\n");
1249 if (of_property_read_u32_array(np
, "ibm,cpu-idle-state-flags",
1250 temp_u32
, nr_idle_states
)) {
1251 pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-flags in DT\n");
1255 for (i
= 0; i
< nr_idle_states
; i
++)
1256 pnv_idle_states
[i
].flags
= temp_u32
[i
];
1258 /* Read latencies */
1259 if (of_property_read_u32_array(np
, "ibm,cpu-idle-state-latencies-ns",
1260 temp_u32
, nr_idle_states
)) {
1261 pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-latencies-ns in DT\n");
1265 for (i
= 0; i
< nr_idle_states
; i
++)
1266 pnv_idle_states
[i
].latency_ns
= temp_u32
[i
];
1268 /* Read residencies */
1269 if (of_property_read_u32_array(np
, "ibm,cpu-idle-state-residency-ns",
1270 temp_u32
, nr_idle_states
)) {
1271 pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-latencies-ns in DT\n");
1275 for (i
= 0; i
< nr_idle_states
; i
++)
1276 pnv_idle_states
[i
].residency_ns
= temp_u32
[i
];
1279 if (cpu_has_feature(CPU_FTR_ARCH_300
)) {
1280 /* Read pm_crtl_val */
1281 if (of_property_read_u64_array(np
, "ibm,cpu-idle-state-psscr",
1282 temp_u64
, nr_idle_states
)) {
1283 pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-psscr in DT\n");
1287 for (i
= 0; i
< nr_idle_states
; i
++)
1288 pnv_idle_states
[i
].psscr_val
= temp_u64
[i
];
1290 /* Read pm_crtl_mask */
1291 if (of_property_read_u64_array(np
, "ibm,cpu-idle-state-psscr-mask",
1292 temp_u64
, nr_idle_states
)) {
1293 pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-psscr-mask in DT\n");
1297 for (i
= 0; i
< nr_idle_states
; i
++)
1298 pnv_idle_states
[i
].psscr_mask
= temp_u64
[i
];
1302 * power8 specific properties ibm,cpu-idle-state-pmicr-mask and
1303 * ibm,cpu-idle-state-pmicr-val were never used and there is no
1304 * plan to use it in near future. Hence, not parsing these properties
1307 if (of_property_read_string_array(np
, "ibm,cpu-idle-state-names",
1308 temp_string
, nr_idle_states
) < 0) {
1309 pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-names in DT\n");
1313 for (i
= 0; i
< nr_idle_states
; i
++)
1314 strlcpy(pnv_idle_states
[i
].name
, temp_string
[i
],
1316 nr_pnv_idle_states
= nr_idle_states
;
1325 static int __init
pnv_init_idle_states(void)
1330 /* Set up PACA fields */
1331 for_each_present_cpu(cpu
) {
1332 struct paca_struct
*p
= paca_ptrs
[cpu
];
1335 if (cpu
== cpu_first_thread_sibling(cpu
))
1336 p
->idle_state
= (1 << threads_per_core
) - 1;
1338 if (!cpu_has_feature(CPU_FTR_ARCH_300
)) {
1340 p
->thread_idle_state
= PNV_THREAD_RUNNING
;
1343 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
1344 p
->requested_psscr
= 0;
1345 atomic_set(&p
->dont_stop
, 0);
1350 /* In case we error out nr_pnv_idle_states will be zero */
1351 nr_pnv_idle_states
= 0;
1352 supported_cpuidle_states
= 0;
1354 if (cpuidle_disable
!= IDLE_NO_OVERRIDE
)
1356 rc
= pnv_parse_cpuidle_dt();
1359 pnv_probe_idle_states();
1361 if (!cpu_has_feature(CPU_FTR_ARCH_300
)) {
1362 if (!(supported_cpuidle_states
& OPAL_PM_SLEEP_ENABLED_ER1
)) {
1363 power7_fastsleep_workaround_entry
= false;
1364 power7_fastsleep_workaround_exit
= false;
1367 * OPAL_PM_SLEEP_ENABLED_ER1 is set. It indicates that
1368 * workaround is needed to use fastsleep. Provide sysfs
1369 * control to choose how this workaround has to be
1372 device_create_file(cpu_subsys
.dev_root
,
1373 &dev_attr_fastsleep_workaround_applyonce
);
1376 update_subcore_sibling_mask();
1378 if (supported_cpuidle_states
& OPAL_PM_NAP_ENABLED
) {
1379 ppc_md
.power_save
= power7_idle
;
1380 power7_offline_type
= PNV_THREAD_NAP
;
1383 if ((supported_cpuidle_states
& OPAL_PM_WINKLE_ENABLED
) &&
1384 (supported_cpuidle_states
& OPAL_PM_LOSE_FULL_CONTEXT
))
1385 power7_offline_type
= PNV_THREAD_WINKLE
;
1386 else if ((supported_cpuidle_states
& OPAL_PM_SLEEP_ENABLED
) ||
1387 (supported_cpuidle_states
& OPAL_PM_SLEEP_ENABLED_ER1
))
1388 power7_offline_type
= PNV_THREAD_SLEEP
;
1391 if (supported_cpuidle_states
& OPAL_PM_LOSE_FULL_CONTEXT
) {
1392 if (pnv_save_sprs_for_deep_states())
1393 pnv_disable_deep_states();
1399 machine_subsys_initcall(powernv
, pnv_init_idle_states
);