]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/powerpc/kernel/idle_book3s.S
powerpc/83xx/mpc832x_rdb: fix of_irq_to_resource() error check
[mirror_ubuntu-artful-kernel.git] / arch / powerpc / kernel / idle_book3s.S
CommitLineData
948cf67c 1/*
bcef83a0
SP
2 * This file contains idle entry/exit functions for POWER7,
3 * POWER8 and POWER9 CPUs.
948cf67c
BH
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
9 */
10
11#include <linux/threads.h>
12#include <asm/processor.h>
13#include <asm/page.h>
14#include <asm/cputable.h>
15#include <asm/thread_info.h>
16#include <asm/ppc_asm.h>
17#include <asm/asm-offsets.h>
18#include <asm/ppc-opcode.h>
7230c564 19#include <asm/hw_irq.h>
f0888f70 20#include <asm/kvm_book3s_asm.h>
97eb001f 21#include <asm/opal.h>
7cba160a 22#include <asm/cpuidle.h>
bf0153c1 23#include <asm/exception-64s.h>
f64e8084 24#include <asm/book3s/64/mmu-hash.h>
bcef83a0 25#include <asm/mmu.h>
948cf67c
BH
26
27#undef DEBUG
28
77b54e9f
SP
29/*
30 * Use unused space in the interrupt stack to save and restore
31 * registers for winkle support.
32 */
101dd590 33#define _MMCR0 GPR0
77b54e9f 34#define _SDR1 GPR3
cb0be7ec 35#define _PTCR GPR3
77b54e9f
SP
36#define _RPR GPR4
37#define _SPURR GPR5
38#define _PURR GPR6
39#define _TSCR GPR7
40#define _DSCR GPR8
41#define _AMOR GPR9
42#define _WORT GPR10
43#define _WORC GPR11
cb0be7ec 44#define _LPCR GPR12
bcef83a0 45
09206b60 46#define PSSCR_EC_ESL_MASK_SHIFTED (PSSCR_EC | PSSCR_ESL) >> 16
77b54e9f 47
aca79d2b
VS
48 .text
49
0dfffb48
SP
50/*
51 * Used by threads before entering deep idle states. Saves SPRs
52 * in interrupt stack frame
53 */
54save_sprs_to_stack:
55 /*
56 * Note all register i.e per-core, per-subcore or per-thread is saved
57 * here since any thread in the core might wake up first
58 */
bcef83a0 59BEGIN_FTR_SECTION
bcef83a0
SP
60 /*
61 * Note - SDR1 is dropped in Power ISA v3. Hence not restoring
62 * SDR1 here
63 */
cb0be7ec
GS
64 mfspr r3,SPRN_PTCR
65 std r3,_PTCR(r1)
66 mfspr r3,SPRN_LPCR
67 std r3,_LPCR(r1)
bcef83a0 68FTR_SECTION_ELSE
0dfffb48
SP
69 mfspr r3,SPRN_SDR1
70 std r3,_SDR1(r1)
bcef83a0 71ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300)
0dfffb48
SP
72 mfspr r3,SPRN_RPR
73 std r3,_RPR(r1)
74 mfspr r3,SPRN_SPURR
75 std r3,_SPURR(r1)
76 mfspr r3,SPRN_PURR
77 std r3,_PURR(r1)
78 mfspr r3,SPRN_TSCR
79 std r3,_TSCR(r1)
80 mfspr r3,SPRN_DSCR
81 std r3,_DSCR(r1)
82 mfspr r3,SPRN_AMOR
83 std r3,_AMOR(r1)
84 mfspr r3,SPRN_WORT
85 std r3,_WORT(r1)
86 mfspr r3,SPRN_WORC
87 std r3,_WORC(r1)
88
89 blr
90
b32aadc1
SP
91/*
92 * Used by threads when the lock bit of core_idle_state is set.
93 * Threads will spin in HMT_LOW until the lock bit is cleared.
94 * r14 - pointer to core_idle_state
95 * r15 - used to load contents of core_idle_state
09b7e37b 96 * r9 - used as a temporary variable
b32aadc1
SP
97 */
98
99core_idle_lock_held:
100 HMT_LOW
1013: lwz r15,0(r14)
adbcf8d7 102 andis. r15,r15,PNV_CORE_IDLE_LOCK_BIT@h
b32aadc1
SP
103 bne 3b
104 HMT_MEDIUM
105 lwarx r15,0,r14
adbcf8d7
NP
106 andis. r9,r15,PNV_CORE_IDLE_LOCK_BIT@h
107 bne- core_idle_lock_held
b32aadc1
SP
108 blr
109
aca79d2b
VS
110/*
111 * Pass requested state in r3:
bcef83a0 112 * r3 - PNV_THREAD_NAP/SLEEP/WINKLE in POWER8
2201f994 113 * - Requested PSSCR value in POWER9
8d6f7c5a 114 *
9d292501 115 * Address of idle handler to branch to in realmode in r4
aca79d2b 116 */
bf0153c1 117pnv_powersave_common:
aca79d2b 118 /* Use r3 to pass state nap/sleep/winkle */
948cf67c
BH
119 /* NAP is a state loss, we create a regs frame on the
120 * stack, fill it up with the state we care about and
121 * stick a pointer to it in PACAR1. We really only
122 * need to save PC, some CR bits and the NV GPRs,
123 * but for now an interrupt frame will do.
124 */
9d292501
NP
125 mtctr r4
126
948cf67c
BH
127 mflr r0
128 std r0,16(r1)
129 stdu r1,-INT_FRAME_SIZE(r1)
130 std r0,_LINK(r1)
131 std r0,_NIP(r1)
132
7230c564 133 /* We haven't lost state ... yet */
948cf67c 134 li r0,0
2fde6d20 135 stb r0,PACA_NAPSTATELOST(r13)
948cf67c
BH
136
137 /* Continue saving state */
138 SAVE_GPR(2, r1)
139 SAVE_NVGPRS(r1)
2201f994
NP
140 mfcr r5
141 std r5,_CCR(r1)
948cf67c
BH
142 std r1,PACAR1(r13)
143
8117ac6a
PM
144 /*
145 * Go to real mode to do the nap, as required by the architecture.
146 * Also, we need to be in real mode before setting hwthread_state,
147 * because as soon as we do that, another thread can switch
148 * the MMU context to the guest.
149 */
4eae2c9a 150 LOAD_REG_IMMEDIATE(r7, MSR_IDLE)
9d292501
NP
151 mtmsrd r7,0
152 bctr
8117ac6a 153
5fa6b6bd
SP
154 .globl pnv_enter_arch207_idle_mode
155pnv_enter_arch207_idle_mode:
56c46222
PM
156#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
157 /* Tell KVM we're entering idle */
158 li r4,KVM_HWTHREAD_IN_IDLE
159 /******************************************************/
160 /* N O T E W E L L ! ! ! N O T E W E L L */
161 /* The following store to HSTATE_HWTHREAD_STATE(r13) */
162 /* MUST occur in real mode, i.e. with the MMU off, */
163 /* and the MMU must stay off until we clear this flag */
bf0153c1
NP
164 /* and test HSTATE_HWTHREAD_REQ(r13) in */
165 /* pnv_powersave_wakeup in this file. */
56c46222
PM
166 /* The reason is that another thread can switch the */
167 /* MMU to a guest context whenever this flag is set */
168 /* to KVM_HWTHREAD_IN_IDLE, and if the MMU was on, */
169 /* that would potentially cause this thread to start */
170 /* executing instructions from guest memory in */
171 /* hypervisor mode, leading to a host crash or data */
172 /* corruption, or worse. */
173 /******************************************************/
174 stb r4,HSTATE_HWTHREAD_STATE(r13)
175#endif
7cba160a 176 stb r3,PACA_THREAD_IDLE_STATE(r13)
77b54e9f
SP
177 cmpwi cr3,r3,PNV_THREAD_SLEEP
178 bge cr3,2f
823b7bd5 179 IDLE_STATE_ENTER_SEQ_NORET(PPC_NAP)
aca79d2b 180 /* No return */
7cba160a
SP
1812:
182 /* Sleep or winkle */
183 lbz r7,PACA_THREAD_MASK(r13)
184 ld r14,PACA_CORE_IDLE_STATE_PTR(r13)
0d7720a2
NP
185 li r5,0
186 beq cr3,3f
187 lis r5,PNV_CORE_IDLE_WINKLE_COUNT@h
1883:
7cba160a
SP
189lwarx_loop1:
190 lwarx r15,0,r14
b32aadc1 191
adbcf8d7
NP
192 andis. r9,r15,PNV_CORE_IDLE_LOCK_BIT@h
193 bnel- core_idle_lock_held
b32aadc1 194
0d7720a2 195 add r15,r15,r5 /* Add if winkle */
7cba160a
SP
196 andc r15,r15,r7 /* Clear thread bit */
197
0d7720a2 198 andi. r9,r15,PNV_CORE_IDLE_THREAD_BITS
7cba160a
SP
199
200/*
201 * If cr0 = 0, then current thread is the last thread of the core entering
202 * sleep. Last thread needs to execute the hardware bug workaround code if
203 * required by the platform.
204 * Make the workaround call unconditionally here. The below branch call is
205 * patched out when the idle states are discovered if the platform does not
206 * require it.
207 */
208.global pnv_fastsleep_workaround_at_entry
209pnv_fastsleep_workaround_at_entry:
210 beq fastsleep_workaround_at_entry
211
212 stwcx. r15,0,r14
213 bne- lwarx_loop1
214 isync
215
77b54e9f
SP
216common_enter: /* common code for all the threads entering sleep or winkle */
217 bgt cr3,enter_winkle
823b7bd5 218 IDLE_STATE_ENTER_SEQ_NORET(PPC_SLEEP)
7cba160a
SP
219
220fastsleep_workaround_at_entry:
adbcf8d7 221 oris r15,r15,PNV_CORE_IDLE_LOCK_BIT@h
7cba160a
SP
222 stwcx. r15,0,r14
223 bne- lwarx_loop1
224 isync
225
226 /* Fast sleep workaround */
227 li r3,1
228 li r4,1
ab9bad0e 229 bl opal_config_cpu_idle_state
7cba160a 230
adbcf8d7
NP
231 /* Unlock */
232 xoris r15,r15,PNV_CORE_IDLE_LOCK_BIT@h
7cba160a 233 lwsync
adbcf8d7 234 stw r15,0(r14)
7cba160a
SP
235 b common_enter
236
77b54e9f 237enter_winkle:
0dfffb48
SP
238 bl save_sprs_to_stack
239
823b7bd5 240 IDLE_STATE_ENTER_SEQ_NORET(PPC_WINKLE)
f0888f70 241
bcef83a0 242/*
09206b60 243 * r3 - PSSCR value corresponding to the requested stop state.
bcef83a0
SP
244 */
245power_enter_stop:
56c46222
PM
246#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
247 /* Tell KVM we're entering idle */
248 li r4,KVM_HWTHREAD_IN_IDLE
249 /* DO THIS IN REAL MODE! See comment above. */
250 stb r4,HSTATE_HWTHREAD_STATE(r13)
251#endif
09206b60
GS
252/*
253 * Check if we are executing the lite variant with ESL=EC=0
254 */
255 andis. r4,r3,PSSCR_EC_ESL_MASK_SHIFTED
256 clrldi r3,r3,60 /* r3 = Bits[60:63] = Requested Level (RL) */
424f8acd 257 bne .Lhandle_esl_ec_set
09206b60
GS
258 IDLE_STATE_ENTER_SEQ(PPC_STOP)
259 li r3,0 /* Since we didn't lose state, return 0 */
4d0d7c02
AA
260
261 /*
262 * pnv_wakeup_noloss() expects r12 to contain the SRR1 value so
263 * it can determine if the wakeup reason is an HMI in
264 * CHECK_HMI_INTERRUPT.
265 *
266 * However, when we wakeup with ESL=0, SRR1 will not contain the wakeup
267 * reason, so there is no point setting r12 to SRR1.
268 *
269 * Further, we clear r12 here, so that we don't accidentally enter the
270 * HMI in pnv_wakeup_noloss() if the value of r12[42:45] == WAKE_HMI.
271 */
272 li r12, 0
09206b60 273 b pnv_wakeup_noloss
424f8acd
GS
274
275.Lhandle_esl_ec_set:
101dd590
NP
276 /*
277 * POWER9 DD2 can incorrectly set PMAO when waking up after a
278 * state-loss idle. Saving and restoring MMCR0 over idle is a
279 * workaround.
280 */
281 mfspr r4,SPRN_MMCR0
282 std r4,_MMCR0(r1)
283
bcef83a0
SP
284/*
285 * Check if the requested state is a deep idle state.
286 */
424f8acd 287 LOAD_REG_ADDRBASE(r5,pnv_first_deep_stop_state)
bcef83a0
SP
288 ld r4,ADDROFF(pnv_first_deep_stop_state)(r5)
289 cmpd r3,r4
424f8acd 290 bge .Lhandle_deep_stop
823b7bd5 291 IDLE_STATE_ENTER_SEQ_NORET(PPC_STOP)
424f8acd 292.Lhandle_deep_stop:
bcef83a0
SP
293/*
294 * Entering deep idle state.
295 * Clear thread bit in PACA_CORE_IDLE_STATE, save SPRs to
296 * stack and enter stop
297 */
298 lbz r7,PACA_THREAD_MASK(r13)
299 ld r14,PACA_CORE_IDLE_STATE_PTR(r13)
300
301lwarx_loop_stop:
302 lwarx r15,0,r14
adbcf8d7
NP
303 andis. r9,r15,PNV_CORE_IDLE_LOCK_BIT@h
304 bnel- core_idle_lock_held
bcef83a0
SP
305 andc r15,r15,r7 /* Clear thread bit */
306
307 stwcx. r15,0,r14
308 bne- lwarx_loop_stop
309 isync
310
311 bl save_sprs_to_stack
312
823b7bd5 313 IDLE_STATE_ENTER_SEQ_NORET(PPC_STOP)
bcef83a0 314
2201f994
NP
315/*
316 * Entered with MSR[EE]=0 and no soft-masked interrupts pending.
317 * r3 contains desired idle state (PNV_THREAD_NAP/SLEEP/WINKLE).
318 */
319_GLOBAL(power7_idle_insn)
aca79d2b 320 /* Now check if user or arch enabled NAP mode */
2201f994 321 LOAD_REG_ADDR(r4, pnv_enter_arch207_idle_mode)
5fa6b6bd 322 b pnv_powersave_common
77b54e9f 323
bbdb760d 324#define CHECK_HMI_INTERRUPT \
bbdb760d 325BEGIN_FTR_SECTION_NESTED(66); \
9d292501 326 rlwinm r0,r12,45-31,0xf; /* extract wake reason field (P8) */ \
bbdb760d 327FTR_SECTION_ELSE_NESTED(66); \
9d292501 328 rlwinm r0,r12,45-31,0xe; /* P7 wake reason field is 3 bits */ \
bbdb760d
MS
329ALT_FTR_SECTION_END_NESTED_IFSET(CPU_FTR_ARCH_207S, 66); \
330 cmpwi r0,0xa; /* Hypervisor maintenance ? */ \
95acdc07 331 bne+ 20f; \
bbdb760d
MS
332 /* Invoke opal call to handle hmi */ \
333 ld r2,PACATOC(r13); \
334 ld r1,PACAR1(r13); \
335 std r3,ORIG_GPR3(r1); /* Save original r3 */ \
fd7bacbc
MS
336 li r3,0; /* NULL argument */ \
337 bl hmi_exception_realmode; \
338 nop; \
bbdb760d
MS
339 ld r3,ORIG_GPR3(r1); /* Restore original r3 */ \
34020: nop;
341
bcef83a0 342/*
2201f994
NP
343 * Entered with MSR[EE]=0 and no soft-masked interrupts pending.
344 * r3 contains desired PSSCR register value.
bcef83a0
SP
345 */
346_GLOBAL(power9_idle_stop)
22c6663d 347 std r3, PACA_REQ_PSSCR(r13)
09206b60 348 mtspr SPRN_PSSCR,r3
2201f994 349 LOAD_REG_ADDR(r4,power_enter_stop)
bcef83a0
SP
350 b pnv_powersave_common
351 /* No return */
17ed4c8f 352
17065671 353/*
17ed4c8f
GS
354 * On waking up from stop 0,1,2 with ESL=1 on POWER9 DD1,
355 * HSPRG0 will be set to the HSPRG0 value of one of the
356 * threads in this core. Thus the value we have in r13
357 * may not be this thread's paca pointer.
358 *
359 * Fortunately, the TIR remains invariant. Since this thread's
360 * paca pointer is recorded in all its sibling's paca, we can
361 * correctly recover this thread's paca pointer if we
362 * know the index of this thread in the core.
363 *
364 * This index can be obtained from the TIR.
17065671 365 *
17ed4c8f
GS
366 * i.e, thread's position in the core = TIR.
367 * If this value is i, then this thread's paca is
368 * paca->thread_sibling_pacas[i].
369 */
370power9_dd1_recover_paca:
371 mfspr r4, SPRN_TIR
372 /*
373 * Since each entry in thread_sibling_pacas is 8 bytes
374 * we need to left-shift by 3 bits. Thus r4 = i * 8
375 */
376 sldi r4, r4, 3
377 /* Get &paca->thread_sibling_pacas[0] in r5 */
378 ld r5, PACA_SIBLING_PACA_PTRS(r13)
379 /* Load paca->thread_sibling_pacas[i] into r13 */
380 ldx r13, r4, r5
381 SET_PACA(r13)
17ed4c8f
GS
382 /*
383 * Indicate that we have lost NVGPR state
384 * which needs to be restored from the stack.
385 */
386 li r3, 1
bbb075dd 387 stb r3,PACA_NAPSTATELOST(r13)
17ed4c8f
GS
388 blr
389
1945bc45
NP
390/*
391 * Called from machine check handler for powersave wakeups.
392 * Low level machine check processing has already been done. Now just
393 * go through the wake up path to get everything in order.
394 *
395 * r3 - The original SRR1 value.
396 * Original SRR[01] have been clobbered.
397 * MSR_RI is clear.
398 */
399.global pnv_powersave_wakeup_mce
400pnv_powersave_wakeup_mce:
401 /* Set cr3 for pnv_powersave_wakeup */
402 rlwinm r11,r3,47-31,30,31
403 cmpwi cr3,r11,2
404
405 /*
406 * Now put the original SRR1 with SRR1_WAKEMCE_RESVD as the wake
9d292501 407 * reason into r12, which allows reuse of the system reset wakeup
1945bc45
NP
408 * code without being mistaken for another type of wakeup.
409 */
9d292501 410 oris r12,r3,SRR1_WAKEMCE_RESVD@h
1945bc45
NP
411
412 b pnv_powersave_wakeup
413
544686ca
NP
414/*
415 * Called from reset vector for powersave wakeups.
17065671 416 * cr3 - set to gt if waking up with partial/complete hypervisor state loss
9d292501 417 * r12 - SRR1
17065671 418 */
bf0153c1
NP
419.global pnv_powersave_wakeup
420pnv_powersave_wakeup:
9cba253d
NP
421 ld r2, PACATOC(r13)
422
bcef83a0 423BEGIN_FTR_SECTION
9cba253d
NP
424BEGIN_FTR_SECTION_NESTED(70)
425 bl power9_dd1_recover_paca
426END_FTR_SECTION_NESTED_IFSET(CPU_FTR_POWER9_DD1, 70)
10101aa9
NP
427 bl pnv_restore_hyp_resource_arch300
428FTR_SECTION_ELSE
429 bl pnv_restore_hyp_resource_arch207
430ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300)
bf0153c1
NP
431
432 li r0,PNV_THREAD_RUNNING
433 stb r0,PACA_THREAD_IDLE_STATE(r13) /* Clear thread state */
434
9d292501
NP
435 mr r3,r12
436
bf0153c1
NP
437#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
438 li r0,KVM_HWTHREAD_IN_KERNEL
439 stb r0,HSTATE_HWTHREAD_STATE(r13)
440 /* Order setting hwthread_state vs. testing hwthread_req */
441 sync
442 lbz r0,HSTATE_HWTHREAD_REQ(r13)
443 cmpwi r0,0
444 beq 1f
445 b kvm_start_guest
4461:
447#endif
448
449 /* Return SRR1 from power7_nap() */
bf0153c1
NP
450 blt cr3,pnv_wakeup_noloss
451 b pnv_wakeup_loss
452
17065671 453/*
bf0153c1
NP
454 * Check whether we have woken up with hypervisor state loss.
455 * If yes, restore hypervisor state and return back to link.
17065671 456 *
17065671
SP
457 * cr3 - set to gt if waking up with partial/complete hypervisor state loss
458 */
10101aa9 459pnv_restore_hyp_resource_arch300:
ba6d334a
BH
460 /*
461 * Workaround for POWER9, if we lost resources, the ERAT
101dd590
NP
462 * might have been mixed up and needs flushing. We also need
463 * to reload MMCR0 (see comment above).
ba6d334a
BH
464 */
465 blt cr3,1f
466 PPC_INVALIDATE_ERAT
101dd590
NP
467 ld r1,PACAR1(r13)
468 ld r4,_MMCR0(r1)
469 mtspr SPRN_MMCR0,r4
ba6d334a 4701:
bcef83a0
SP
471 /*
472 * POWER ISA 3. Use PSSCR to determine if we
473 * are waking up from deep idle state
474 */
475 LOAD_REG_ADDRBASE(r5,pnv_first_deep_stop_state)
476 ld r4,ADDROFF(pnv_first_deep_stop_state)(r5)
477
22c6663d
GS
478BEGIN_FTR_SECTION_NESTED(71)
479 /*
480 * Assume that we are waking up from the state
481 * same as the Requested Level (RL) in the PSSCR
482 * which are Bits 60-63
483 */
484 ld r5,PACA_REQ_PSSCR(r13)
485 rldicl r5,r5,0,60
486FTR_SECTION_ELSE_NESTED(71)
17065671 487 /*
bcef83a0
SP
488 * 0-3 bits correspond to Power-Saving Level Status
489 * which indicates the idle state we are waking up from
490 */
22c6663d 491 mfspr r5, SPRN_PSSCR
bcef83a0 492 rldicl r5,r5,4,60
22c6663d 493ALT_FTR_SECTION_END_NESTED_IFSET(CPU_FTR_POWER9_DD1, 71)
bcef83a0 494 cmpd cr4,r5,r4
bf0153c1 495 bge cr4,pnv_wakeup_tb_loss /* returns to caller */
bcef83a0 496
bf0153c1 497 blr /* Waking up without hypervisor state loss. */
bcef83a0 498
10101aa9
NP
499/* Same calling convention as arch300 */
500pnv_restore_hyp_resource_arch207:
bcef83a0
SP
501 /*
502 * POWER ISA 2.07 or less.
0d7720a2 503 * Check if we slept with sleep or winkle.
17065671 504 */
0d7720a2
NP
505 lbz r4,PACA_THREAD_IDLE_STATE(r13)
506 cmpwi cr2,r4,PNV_THREAD_NAP
507 bgt cr2,pnv_wakeup_tb_loss /* Either sleep or Winkle */
17065671
SP
508
509 /*
510 * We fall through here if PACA_THREAD_IDLE_STATE shows we are waking
511 * up from nap. At this stage CR3 shouldn't contains 'gt' since that
512 * indicates we are waking with hypervisor state loss from nap.
513 */
514 bgt cr3,.
515
bf0153c1 516 blr /* Waking up without hypervisor state loss */
17065671 517
bcef83a0
SP
518/*
519 * Called if waking up from idle state which can cause either partial or
520 * complete hyp state loss.
521 * In POWER8, called if waking up from fastsleep or winkle
522 * In POWER9, called if waking up from stop state >= pnv_first_deep_stop_state
523 *
524 * r13 - PACA
525 * cr3 - gt if waking up with partial/complete hypervisor state loss
0d7720a2
NP
526 *
527 * If ISA300:
bd00a240 528 * cr4 - gt or eq if waking up from complete hypervisor state loss.
0d7720a2
NP
529 *
530 * If ISA207:
531 * r4 - PACA_THREAD_IDLE_STATE
bcef83a0 532 */
bf0153c1 533pnv_wakeup_tb_loss:
97eb001f 534 ld r1,PACAR1(r13)
7cba160a 535 /*
6d98ce0b
NP
536 * Before entering any idle state, the NVGPRs are saved in the stack.
537 * If there was a state loss, or PACA_NAPSTATELOST was set, then the
538 * NVGPRs are restored. If we are here, it is likely that state is lost,
539 * but not guaranteed -- neither ISA207 nor ISA300 tests to reach
540 * here are the same as the test to restore NVGPRS:
541 * PACA_THREAD_IDLE_STATE test for ISA207, PSSCR test for ISA300,
542 * and SRR1 test for restoring NVGPRs.
543 *
544 * We are about to clobber NVGPRs now, so set NAPSTATELOST to
545 * guarantee they will always be restored. This might be tightened
546 * with careful reading of specs (particularly for ISA300) but this
547 * is already a slow wakeup path and it's simpler to be safe.
548 */
549 li r0,1
550 stb r0,PACA_NAPSTATELOST(r13)
551
552 /*
7cba160a 553 *
17065671 554 * Save SRR1 and LR in NVGPRs as they might be clobbered in
69c592ed 555 * opal_call() (called in CHECK_HMI_INTERRUPT). SRR1 is required
17065671
SP
556 * to determine the wakeup reason if we branch to kvm_start_guest. LR
557 * is required to return back to reset vector after hypervisor state
558 * restore is complete.
7cba160a 559 */
9d292501 560 mr r19,r12
0d7720a2 561 mr r18,r4
17065671 562 mflr r17
bbdb760d
MS
563BEGIN_FTR_SECTION
564 CHECK_HMI_INTERRUPT
565END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
7cba160a 566
7cba160a 567 ld r14,PACA_CORE_IDLE_STATE_PTR(r13)
e420249d
NP
568 lbz r7,PACA_THREAD_MASK(r13)
569
7cba160a 570 /*
e420249d
NP
571 * Take the core lock to synchronize against other threads.
572 *
7cba160a
SP
573 * Lock bit is set in one of the 2 cases-
574 * a. In the sleep/winkle enter path, the last thread is executing
575 * fastsleep workaround code.
576 * b. In the wake up path, another thread is executing fastsleep
577 * workaround undo code or resyncing timebase or restoring context
578 * In either case loop until the lock bit is cleared.
579 */
e420249d
NP
5801:
581 lwarx r15,0,r14
582 andis. r9,r15,PNV_CORE_IDLE_LOCK_BIT@h
adbcf8d7 583 bnel- core_idle_lock_held
e420249d
NP
584 oris r15,r15,PNV_CORE_IDLE_LOCK_BIT@h
585 stwcx. r15,0,r14
586 bne- 1b
587 isync
7cba160a 588
adbcf8d7
NP
589 andi. r9,r15,PNV_CORE_IDLE_THREAD_BITS
590 cmpwi cr2,r9,0
77b54e9f
SP
591
592 /*
593 * At this stage
bcef83a0
SP
594 * cr2 - eq if first thread to wakeup in core
595 * cr3- gt if waking up with partial/complete hypervisor state loss
0d7720a2 596 * ISA300:
bd00a240 597 * cr4 - gt or eq if waking up from complete hypervisor state loss.
77b54e9f
SP
598 */
599
bcef83a0 600BEGIN_FTR_SECTION
0d7720a2
NP
601 /*
602 * Were we in winkle?
603 * If yes, check if all threads were in winkle, decrement our
604 * winkle count, set all thread winkle bits if all were in winkle.
605 * Check if our thread has a winkle bit set, and set cr4 accordingly
606 * (to match ISA300, above). Pseudo-code for core idle state
607 * transitions for ISA207 is as follows (everything happens atomically
608 * due to store conditional and/or lock bit):
609 *
610 * nap_idle() { }
611 * nap_wake() { }
612 *
613 * sleep_idle()
614 * {
615 * core_idle_state &= ~thread_in_core
616 * }
617 *
618 * sleep_wake()
619 * {
620 * bool first_in_core, first_in_subcore;
621 *
622 * first_in_core = (core_idle_state & IDLE_THREAD_BITS) == 0;
623 * first_in_subcore = (core_idle_state & SUBCORE_SIBLING_MASK) == 0;
624 *
625 * core_idle_state |= thread_in_core;
626 * }
627 *
628 * winkle_idle()
629 * {
630 * core_idle_state &= ~thread_in_core;
631 * core_idle_state += 1 << WINKLE_COUNT_SHIFT;
632 * }
633 *
634 * winkle_wake()
635 * {
636 * bool first_in_core, first_in_subcore, winkle_state_lost;
637 *
638 * first_in_core = (core_idle_state & IDLE_THREAD_BITS) == 0;
639 * first_in_subcore = (core_idle_state & SUBCORE_SIBLING_MASK) == 0;
640 *
641 * core_idle_state |= thread_in_core;
642 *
643 * if ((core_idle_state & WINKLE_MASK) == (8 << WINKLE_COUNT_SIHFT))
644 * core_idle_state |= THREAD_WINKLE_BITS;
645 * core_idle_state -= 1 << WINKLE_COUNT_SHIFT;
646 *
647 * winkle_state_lost = core_idle_state &
648 * (thread_in_core << WINKLE_THREAD_SHIFT);
649 * core_idle_state &= ~(thread_in_core << WINKLE_THREAD_SHIFT);
650 * }
651 *
652 */
653 cmpwi r18,PNV_THREAD_WINKLE
654 bne 2f
655 andis. r9,r15,PNV_CORE_IDLE_WINKLE_COUNT_ALL_BIT@h
656 subis r15,r15,PNV_CORE_IDLE_WINKLE_COUNT@h
657 beq 2f
658 ori r15,r15,PNV_CORE_IDLE_THREAD_WINKLE_BITS /* all were winkle */
6592:
660 /* Shift thread bit to winkle mask, then test if this thread is set,
661 * and remove it from the winkle bits */
662 slwi r8,r7,8
663 and r8,r8,r15
664 andc r15,r15,r8
665 cmpwi cr4,r8,1 /* cr4 will be gt if our bit is set, lt if not */
666
bcef83a0
SP
667 lbz r4,PACA_SUBCORE_SIBLING_MASK(r13)
668 and r4,r4,r15
669 cmpwi r4,0 /* Check if first in subcore */
670
671 or r15,r15,r7 /* Set thread bit */
672 beq first_thread_in_subcore
673END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
674
675 or r15,r15,r7 /* Set thread bit */
676 beq cr2,first_thread_in_core
677
678 /* Not first thread in core or subcore to wake up */
679 b clear_lock
680
681first_thread_in_subcore:
77b54e9f
SP
682 /*
683 * If waking up from sleep, subcore state is not lost. Hence
684 * skip subcore state restore
685 */
bd00a240 686 blt cr4,subcore_state_restored
77b54e9f
SP
687
688 /* Restore per-subcore state */
689 ld r4,_SDR1(r1)
690 mtspr SPRN_SDR1,r4
bcef83a0 691
77b54e9f
SP
692 ld r4,_RPR(r1)
693 mtspr SPRN_RPR,r4
694 ld r4,_AMOR(r1)
695 mtspr SPRN_AMOR,r4
696
697subcore_state_restored:
698 /*
699 * Check if the thread is also the first thread in the core. If not,
700 * skip to clear_lock.
701 */
702 bne cr2,clear_lock
703
704first_thread_in_core:
705
7cba160a 706 /*
bcef83a0
SP
707 * First thread in the core waking up from any state which can cause
708 * partial or complete hypervisor state loss. It needs to
7cba160a
SP
709 * call the fastsleep workaround code if the platform requires it.
710 * Call it unconditionally here. The below branch instruction will
bcef83a0
SP
711 * be patched out if the platform does not have fastsleep or does not
712 * require the workaround. Patching will be performed during the
713 * discovery of idle-states.
7cba160a
SP
714 */
715.global pnv_fastsleep_workaround_at_exit
716pnv_fastsleep_workaround_at_exit:
717 b fastsleep_workaround_at_exit
718
719timebase_resync:
bcef83a0
SP
720 /*
721 * Use cr3 which indicates that we are waking up with atleast partial
722 * hypervisor state loss to determine if TIMEBASE RESYNC is needed.
723 */
ec486735 724 ble cr3,.Ltb_resynced
97eb001f 725 /* Time base re-sync */
ab9bad0e 726 bl opal_resync_timebase;
77b54e9f 727 /*
ec486735
GS
728 * If waking up from sleep (POWER8), per core state
729 * is not lost, skip to clear_lock.
77b54e9f 730 */
ec486735 731.Ltb_resynced:
bd00a240 732 blt cr4,clear_lock
77b54e9f 733
bcef83a0
SP
734 /*
735 * First thread in the core to wake up and its waking up with
736 * complete hypervisor state loss. Restore per core hypervisor
737 * state.
738 */
739BEGIN_FTR_SECTION
740 ld r4,_PTCR(r1)
741 mtspr SPRN_PTCR,r4
742 ld r4,_RPR(r1)
743 mtspr SPRN_RPR,r4
744END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
745
77b54e9f
SP
746 ld r4,_TSCR(r1)
747 mtspr SPRN_TSCR,r4
748 ld r4,_WORC(r1)
749 mtspr SPRN_WORC,r4
750
7cba160a 751clear_lock:
adbcf8d7 752 xoris r15,r15,PNV_CORE_IDLE_LOCK_BIT@h
7cba160a
SP
753 lwsync
754 stw r15,0(r14)
755
756common_exit:
77b54e9f
SP
757 /*
758 * Common to all threads.
759 *
760 * If waking up from sleep, hypervisor state is not lost. Hence
761 * skip hypervisor state restore.
762 */
bd00a240 763 blt cr4,hypervisor_state_restored
77b54e9f
SP
764
765 /* Waking up from winkle */
766
bcef83a0
SP
767BEGIN_MMU_FTR_SECTION
768 b no_segments
5a25b6f5 769END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
77b54e9f
SP
770 /* Restore SLB from PACA */
771 ld r8,PACA_SLBSHADOWPTR(r13)
772
773 .rept SLB_NUM_BOLTED
774 li r3, SLBSHADOW_SAVEAREA
775 LDX_BE r5, r8, r3
776 addi r3, r3, 8
777 LDX_BE r6, r8, r3
778 andis. r7,r5,SLB_ESID_V@h
779 beq 1f
780 slbmte r6,r5
7811: addi r8,r8,16
782 .endr
bcef83a0
SP
783no_segments:
784
785 /* Restore per thread state */
77b54e9f
SP
786
787 ld r4,_SPURR(r1)
788 mtspr SPRN_SPURR,r4
789 ld r4,_PURR(r1)
790 mtspr SPRN_PURR,r4
791 ld r4,_DSCR(r1)
792 mtspr SPRN_DSCR,r4
793 ld r4,_WORT(r1)
794 mtspr SPRN_WORT,r4
795
bcef83a0
SP
796 /* Call cur_cpu_spec->cpu_restore() */
797 LOAD_REG_ADDR(r4, cur_cpu_spec)
798 ld r4,0(r4)
799 ld r12,CPU_SPEC_RESTORE(r4)
800#ifdef PPC64_ELF_ABI_v1
801 ld r12,0(r12)
802#endif
803 mtctr r12
804 bctrl
805
cb0be7ec
GS
806BEGIN_FTR_SECTION
807 ld r4,_LPCR(r1)
808 mtspr SPRN_LPCR,r4
809END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
77b54e9f
SP
810hypervisor_state_restored:
811
9d292501 812 mr r12,r19
17065671 813 mtlr r17
bf0153c1 814 blr /* return to pnv_powersave_wakeup */
97eb001f 815
7cba160a
SP
816fastsleep_workaround_at_exit:
817 li r3,1
818 li r4,0
ab9bad0e 819 bl opal_config_cpu_idle_state
7cba160a
SP
820 b timebase_resync
821
56548fc0
PM
822/*
823 * R3 here contains the value that will be returned to the caller
824 * of power7_nap.
9d292501 825 * R12 contains SRR1 for CHECK_HMI_INTERRUPT.
56548fc0 826 */
bf0153c1
NP
827.global pnv_wakeup_loss
828pnv_wakeup_loss:
948cf67c 829 ld r1,PACAR1(r13)
bbdb760d
MS
830BEGIN_FTR_SECTION
831 CHECK_HMI_INTERRUPT
832END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
948cf67c
BH
833 REST_NVGPRS(r1)
834 REST_GPR(2, r1)
9d292501
NP
835 ld r4,PACAKMSR(r13)
836 ld r5,_LINK(r1)
56548fc0 837 ld r6,_CCR(r1)
948cf67c 838 addi r1,r1,INT_FRAME_SIZE
9d292501 839 mtlr r5
56548fc0 840 mtcr r6
9d292501
NP
841 mtmsrd r4
842 blr
948cf67c 843
56548fc0
PM
844/*
845 * R3 here contains the value that will be returned to the caller
846 * of power7_nap.
9d292501 847 * R12 contains SRR1 for CHECK_HMI_INTERRUPT.
56548fc0 848 */
bf0153c1 849pnv_wakeup_noloss:
2fde6d20
PM
850 lbz r0,PACA_NAPSTATELOST(r13)
851 cmpwi r0,0
5fa6b6bd 852 bne pnv_wakeup_loss
9d292501 853 ld r1,PACAR1(r13)
bbdb760d
MS
854BEGIN_FTR_SECTION
855 CHECK_HMI_INTERRUPT
856END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
9d292501 857 ld r4,PACAKMSR(r13)
948cf67c 858 ld r5,_NIP(r1)
9d292501 859 ld r6,_CCR(r1)
948cf67c 860 addi r1,r1,INT_FRAME_SIZE
9d292501 861 mtlr r5
0aab3747 862 mtcr r6
9d292501
NP
863 mtmsrd r4
864 blr