]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - arch/powerpc/kernel/idle_book3s.S
powernv: Pass PSSCR value and mask to power9_idle_stop
[mirror_ubuntu-zesty-kernel.git] / arch / powerpc / kernel / idle_book3s.S
CommitLineData
948cf67c 1/*
bcef83a0
SP
2 * This file contains idle entry/exit functions for POWER7,
3 * POWER8 and POWER9 CPUs.
948cf67c
BH
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
9 */
10
11#include <linux/threads.h>
12#include <asm/processor.h>
13#include <asm/page.h>
14#include <asm/cputable.h>
15#include <asm/thread_info.h>
16#include <asm/ppc_asm.h>
17#include <asm/asm-offsets.h>
18#include <asm/ppc-opcode.h>
7230c564 19#include <asm/hw_irq.h>
f0888f70 20#include <asm/kvm_book3s_asm.h>
97eb001f 21#include <asm/opal.h>
7cba160a 22#include <asm/cpuidle.h>
f64e8084 23#include <asm/book3s/64/mmu-hash.h>
bcef83a0 24#include <asm/mmu.h>
948cf67c
BH
25
26#undef DEBUG
27
77b54e9f
SP
28/*
29 * Use unused space in the interrupt stack to save and restore
30 * registers for winkle support.
31 */
32#define _SDR1 GPR3
33#define _RPR GPR4
34#define _SPURR GPR5
35#define _PURR GPR6
36#define _TSCR GPR7
37#define _DSCR GPR8
38#define _AMOR GPR9
39#define _WORT GPR10
40#define _WORC GPR11
bcef83a0
SP
41#define _PTCR GPR12
42
e93f2e14 43#define PSSCR_EC_ESL_MASK_SHIFTED (PSSCR_EC | PSSCR_ESL) >> 16
77b54e9f 44
aca79d2b
VS
45 .text
46
0dfffb48
SP
47/*
48 * Used by threads before entering deep idle states. Saves SPRs
49 * in interrupt stack frame
50 */
51save_sprs_to_stack:
52 /*
53 * Note all register i.e per-core, per-subcore or per-thread is saved
54 * here since any thread in the core might wake up first
55 */
bcef83a0
SP
56BEGIN_FTR_SECTION
57 mfspr r3,SPRN_PTCR
58 std r3,_PTCR(r1)
59 /*
60 * Note - SDR1 is dropped in Power ISA v3. Hence not restoring
61 * SDR1 here
62 */
63FTR_SECTION_ELSE
0dfffb48
SP
64 mfspr r3,SPRN_SDR1
65 std r3,_SDR1(r1)
bcef83a0 66ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300)
0dfffb48
SP
67 mfspr r3,SPRN_RPR
68 std r3,_RPR(r1)
69 mfspr r3,SPRN_SPURR
70 std r3,_SPURR(r1)
71 mfspr r3,SPRN_PURR
72 std r3,_PURR(r1)
73 mfspr r3,SPRN_TSCR
74 std r3,_TSCR(r1)
75 mfspr r3,SPRN_DSCR
76 std r3,_DSCR(r1)
77 mfspr r3,SPRN_AMOR
78 std r3,_AMOR(r1)
79 mfspr r3,SPRN_WORT
80 std r3,_WORT(r1)
81 mfspr r3,SPRN_WORC
82 std r3,_WORC(r1)
83
84 blr
85
b32aadc1
SP
86/*
87 * Used by threads when the lock bit of core_idle_state is set.
88 * Threads will spin in HMT_LOW until the lock bit is cleared.
89 * r14 - pointer to core_idle_state
90 * r15 - used to load contents of core_idle_state
09b7e37b 91 * r9 - used as a temporary variable
b32aadc1
SP
92 */
93
94core_idle_lock_held:
95 HMT_LOW
963: lwz r15,0(r14)
97 andi. r15,r15,PNV_CORE_IDLE_LOCK_BIT
98 bne 3b
99 HMT_MEDIUM
100 lwarx r15,0,r14
09b7e37b
PM
101 andi. r9,r15,PNV_CORE_IDLE_LOCK_BIT
102 bne core_idle_lock_held
b32aadc1
SP
103 blr
104
aca79d2b
VS
105/*
106 * Pass requested state in r3:
bcef83a0
SP
107 * r3 - PNV_THREAD_NAP/SLEEP/WINKLE in POWER8
108 * - Requested STOP state in POWER9
8d6f7c5a
ME
109 *
110 * To check IRQ_HAPPENED in r4
111 * 0 - don't check
112 * 1 - check
4eae2c9a
SP
113 *
114 * Address to 'rfid' to in r5
aca79d2b 115 */
5fa6b6bd 116_GLOBAL(pnv_powersave_common)
aca79d2b 117 /* Use r3 to pass state nap/sleep/winkle */
948cf67c
BH
118 /* NAP is a state loss, we create a regs frame on the
119 * stack, fill it up with the state we care about and
120 * stick a pointer to it in PACAR1. We really only
121 * need to save PC, some CR bits and the NV GPRs,
122 * but for now an interrupt frame will do.
123 */
124 mflr r0
125 std r0,16(r1)
126 stdu r1,-INT_FRAME_SIZE(r1)
127 std r0,_LINK(r1)
128 std r0,_NIP(r1)
129
948cf67c
BH
130 /* Hard disable interrupts */
131 mfmsr r9
132 rldicl r9,r9,48,1
133 rotldi r9,r9,16
134 mtmsrd r9,1 /* hard-disable interrupts */
7230c564
BH
135
136 /* Check if something happened while soft-disabled */
137 lbz r0,PACAIRQHAPPENED(r13)
d6a4f709 138 andi. r0,r0,~PACA_IRQ_HARD_DIS@l
7230c564 139 beq 1f
8d6f7c5a
ME
140 cmpwi cr0,r4,0
141 beq 1f
7230c564
BH
142 addi r1,r1,INT_FRAME_SIZE
143 ld r0,16(r1)
f57333a7 144 li r3,0 /* Return 0 (no nap) */
7230c564
BH
145 mtlr r0
146 blr
147
1481: /* We mark irqs hard disabled as this is the state we'll
149 * be in when returning and we need to tell arch_local_irq_restore()
150 * about it
151 */
152 li r0,PACA_IRQ_HARD_DIS
153 stb r0,PACAIRQHAPPENED(r13)
154
155 /* We haven't lost state ... yet */
948cf67c 156 li r0,0
2fde6d20 157 stb r0,PACA_NAPSTATELOST(r13)
948cf67c
BH
158
159 /* Continue saving state */
160 SAVE_GPR(2, r1)
161 SAVE_NVGPRS(r1)
aca79d2b
VS
162 mfcr r4
163 std r4,_CCR(r1)
948cf67c
BH
164 std r9,_MSR(r1)
165 std r1,PACAR1(r13)
166
8117ac6a
PM
167 /*
168 * Go to real mode to do the nap, as required by the architecture.
169 * Also, we need to be in real mode before setting hwthread_state,
170 * because as soon as we do that, another thread can switch
171 * the MMU context to the guest.
172 */
4eae2c9a 173 LOAD_REG_IMMEDIATE(r7, MSR_IDLE)
8117ac6a
PM
174 li r6, MSR_RI
175 andc r6, r9, r6
8117ac6a 176 mtmsrd r6, 1 /* clear RI before setting SRR0/1 */
4eae2c9a
SP
177 mtspr SPRN_SRR0, r5
178 mtspr SPRN_SRR1, r7
8117ac6a
PM
179 rfid
180
5fa6b6bd
SP
181 .globl pnv_enter_arch207_idle_mode
182pnv_enter_arch207_idle_mode:
56c46222
PM
183#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
184 /* Tell KVM we're entering idle */
185 li r4,KVM_HWTHREAD_IN_IDLE
186 /******************************************************/
187 /* N O T E W E L L ! ! ! N O T E W E L L */
188 /* The following store to HSTATE_HWTHREAD_STATE(r13) */
189 /* MUST occur in real mode, i.e. with the MMU off, */
190 /* and the MMU must stay off until we clear this flag */
191 /* and test HSTATE_HWTHREAD_REQ(r13) in the system */
192 /* reset interrupt vector in exceptions-64s.S. */
193 /* The reason is that another thread can switch the */
194 /* MMU to a guest context whenever this flag is set */
195 /* to KVM_HWTHREAD_IN_IDLE, and if the MMU was on, */
196 /* that would potentially cause this thread to start */
197 /* executing instructions from guest memory in */
198 /* hypervisor mode, leading to a host crash or data */
199 /* corruption, or worse. */
200 /******************************************************/
201 stb r4,HSTATE_HWTHREAD_STATE(r13)
202#endif
7cba160a 203 stb r3,PACA_THREAD_IDLE_STATE(r13)
77b54e9f
SP
204 cmpwi cr3,r3,PNV_THREAD_SLEEP
205 bge cr3,2f
407ac19b 206 IDLE_STATE_ENTER_SEQ_NORET(PPC_NAP)
aca79d2b 207 /* No return */
7cba160a
SP
2082:
209 /* Sleep or winkle */
210 lbz r7,PACA_THREAD_MASK(r13)
211 ld r14,PACA_CORE_IDLE_STATE_PTR(r13)
212lwarx_loop1:
213 lwarx r15,0,r14
b32aadc1
SP
214
215 andi. r9,r15,PNV_CORE_IDLE_LOCK_BIT
216 bnel core_idle_lock_held
217
7cba160a
SP
218 andc r15,r15,r7 /* Clear thread bit */
219
220 andi. r15,r15,PNV_CORE_IDLE_THREAD_BITS
221
222/*
223 * If cr0 = 0, then current thread is the last thread of the core entering
224 * sleep. Last thread needs to execute the hardware bug workaround code if
225 * required by the platform.
226 * Make the workaround call unconditionally here. The below branch call is
227 * patched out when the idle states are discovered if the platform does not
228 * require it.
229 */
230.global pnv_fastsleep_workaround_at_entry
231pnv_fastsleep_workaround_at_entry:
232 beq fastsleep_workaround_at_entry
233
234 stwcx. r15,0,r14
235 bne- lwarx_loop1
236 isync
237
77b54e9f
SP
238common_enter: /* common code for all the threads entering sleep or winkle */
239 bgt cr3,enter_winkle
407ac19b 240 IDLE_STATE_ENTER_SEQ_NORET(PPC_SLEEP)
7cba160a
SP
241
242fastsleep_workaround_at_entry:
243 ori r15,r15,PNV_CORE_IDLE_LOCK_BIT
244 stwcx. r15,0,r14
245 bne- lwarx_loop1
246 isync
247
248 /* Fast sleep workaround */
249 li r3,1
250 li r4,1
69c592ed 251 bl opal_rm_config_cpu_idle_state
7cba160a
SP
252
253 /* Clear Lock bit */
254 li r0,0
255 lwsync
256 stw r0,0(r14)
257 b common_enter
258
77b54e9f 259enter_winkle:
0dfffb48
SP
260 bl save_sprs_to_stack
261
407ac19b 262 IDLE_STATE_ENTER_SEQ_NORET(PPC_WINKLE)
f0888f70 263
bcef83a0 264/*
e93f2e14 265 * r3 - PSSCR value corresponding to the requested stop state.
bcef83a0
SP
266 */
267power_enter_stop:
56c46222
PM
268#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
269 /* Tell KVM we're entering idle */
270 li r4,KVM_HWTHREAD_IN_IDLE
271 /* DO THIS IN REAL MODE! See comment above. */
272 stb r4,HSTATE_HWTHREAD_STATE(r13)
273#endif
e93f2e14
GS
274/*
275 * Check if we are executing the lite variant with ESL=EC=0
276 */
277 andis. r4,r3,PSSCR_EC_ESL_MASK_SHIFTED
278 clrldi r3,r3,60 /* r3 = Bits[60:63] = Requested Level (RL) */
279 bne 1f
280 IDLE_STATE_ENTER_SEQ(PPC_STOP)
281 li r3,0 /* Since we didn't lose state, return 0 */
282 b pnv_wakeup_noloss
bcef83a0
SP
283/*
284 * Check if the requested state is a deep idle state.
285 */
e93f2e14 2861: LOAD_REG_ADDRBASE(r5,pnv_first_deep_stop_state)
bcef83a0
SP
287 ld r4,ADDROFF(pnv_first_deep_stop_state)(r5)
288 cmpd r3,r4
289 bge 2f
407ac19b 290 IDLE_STATE_ENTER_SEQ_NORET(PPC_STOP)
bcef83a0
SP
2912:
292/*
293 * Entering deep idle state.
294 * Clear thread bit in PACA_CORE_IDLE_STATE, save SPRs to
295 * stack and enter stop
296 */
297 lbz r7,PACA_THREAD_MASK(r13)
298 ld r14,PACA_CORE_IDLE_STATE_PTR(r13)
299
300lwarx_loop_stop:
301 lwarx r15,0,r14
302 andi. r9,r15,PNV_CORE_IDLE_LOCK_BIT
303 bnel core_idle_lock_held
304 andc r15,r15,r7 /* Clear thread bit */
305
306 stwcx. r15,0,r14
307 bne- lwarx_loop_stop
308 isync
309
310 bl save_sprs_to_stack
311
407ac19b 312 IDLE_STATE_ENTER_SEQ_NORET(PPC_STOP)
bcef83a0 313
aca79d2b
VS
314_GLOBAL(power7_idle)
315 /* Now check if user or arch enabled NAP mode */
316 LOAD_REG_ADDRBASE(r3,powersave_nap)
317 lwz r4,ADDROFF(powersave_nap)(r3)
318 cmpwi 0,r4,0
319 beqlr
8d6f7c5a 320 li r3, 1
aca79d2b
VS
321 /* fall through */
322
323_GLOBAL(power7_nap)
8d6f7c5a 324 mr r4,r3
7cba160a 325 li r3,PNV_THREAD_NAP
4eae2c9a 326 LOAD_REG_ADDR(r5, pnv_enter_arch207_idle_mode)
5fa6b6bd 327 b pnv_powersave_common
aca79d2b
VS
328 /* No return */
329
330_GLOBAL(power7_sleep)
7cba160a 331 li r3,PNV_THREAD_SLEEP
c733cf83 332 li r4,1
4eae2c9a 333 LOAD_REG_ADDR(r5, pnv_enter_arch207_idle_mode)
5fa6b6bd 334 b pnv_powersave_common
aca79d2b 335 /* No return */
948cf67c 336
77b54e9f 337_GLOBAL(power7_winkle)
bfd1b7ae 338 li r3,PNV_THREAD_WINKLE
77b54e9f 339 li r4,1
4eae2c9a 340 LOAD_REG_ADDR(r5, pnv_enter_arch207_idle_mode)
5fa6b6bd 341 b pnv_powersave_common
77b54e9f
SP
342 /* No return */
343
bbdb760d
MS
344#define CHECK_HMI_INTERRUPT \
345 mfspr r0,SPRN_SRR1; \
346BEGIN_FTR_SECTION_NESTED(66); \
347 rlwinm r0,r0,45-31,0xf; /* extract wake reason field (P8) */ \
348FTR_SECTION_ELSE_NESTED(66); \
349 rlwinm r0,r0,45-31,0xe; /* P7 wake reason field is 3 bits */ \
350ALT_FTR_SECTION_END_NESTED_IFSET(CPU_FTR_ARCH_207S, 66); \
351 cmpwi r0,0xa; /* Hypervisor maintenance ? */ \
352 bne 20f; \
353 /* Invoke opal call to handle hmi */ \
354 ld r2,PACATOC(r13); \
355 ld r1,PACAR1(r13); \
356 std r3,ORIG_GPR3(r1); /* Save original r3 */ \
fd7bacbc
MS
357 li r3,0; /* NULL argument */ \
358 bl hmi_exception_realmode; \
359 nop; \
bbdb760d
MS
360 ld r3,ORIG_GPR3(r1); /* Restore original r3 */ \
36120: nop;
362
bcef83a0 363/*
e93f2e14
GS
364 * r3 - The PSSCR value corresponding to the stop state.
365 * r4 - The PSSCR mask corrresonding to the stop state.
bcef83a0
SP
366 */
367_GLOBAL(power9_idle_stop)
e93f2e14
GS
368 mfspr r5,SPRN_PSSCR
369 andc r5,r5,r4
370 or r3,r3,r5
371 mtspr SPRN_PSSCR,r3
bcef83a0 372 LOAD_REG_ADDR(r5,power_enter_stop)
e93f2e14 373 li r4,1
bcef83a0
SP
374 b pnv_powersave_common
375 /* No return */
17065671
SP
376/*
377 * Called from reset vector. Check whether we have woken up with
378 * hypervisor state loss. If yes, restore hypervisor state and return
379 * back to reset vector.
380 *
381 * r13 - Contents of HSPRG0
382 * cr3 - set to gt if waking up with partial/complete hypervisor state loss
383 */
5fa6b6bd 384_GLOBAL(pnv_restore_hyp_resource)
bcef83a0 385BEGIN_FTR_SECTION
e325d76f 386 ld r2,PACATOC(r13);
bcef83a0
SP
387 /*
388 * POWER ISA 3. Use PSSCR to determine if we
389 * are waking up from deep idle state
390 */
391 LOAD_REG_ADDRBASE(r5,pnv_first_deep_stop_state)
392 ld r4,ADDROFF(pnv_first_deep_stop_state)(r5)
393
394 mfspr r5,SPRN_PSSCR
17065671 395 /*
bcef83a0
SP
396 * 0-3 bits correspond to Power-Saving Level Status
397 * which indicates the idle state we are waking up from
398 */
399 rldicl r5,r5,4,60
400 cmpd cr4,r5,r4
401 bge cr4,pnv_wakeup_tb_loss
402 /*
403 * Waking up without hypervisor state loss. Return to
404 * reset vector
405 */
406 blr
407
408END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
409
410 /*
411 * POWER ISA 2.07 or less.
17065671
SP
412 * Check if last bit of HSPGR0 is set. This indicates whether we are
413 * waking up from winkle.
414 */
415 clrldi r5,r13,63
416 clrrdi r13,r13,1
e325d76f
MS
417
418 /* Now that we are sure r13 is corrected, load TOC */
419 ld r2,PACATOC(r13);
17065671
SP
420 cmpwi cr4,r5,1
421 mtspr SPRN_HSPRG0,r13
422
423 lbz r0,PACA_THREAD_IDLE_STATE(r13)
424 cmpwi cr2,r0,PNV_THREAD_NAP
5fa6b6bd 425 bgt cr2,pnv_wakeup_tb_loss /* Either sleep or Winkle */
17065671
SP
426
427 /*
428 * We fall through here if PACA_THREAD_IDLE_STATE shows we are waking
429 * up from nap. At this stage CR3 shouldn't contains 'gt' since that
430 * indicates we are waking with hypervisor state loss from nap.
431 */
432 bgt cr3,.
433
434 blr /* Return back to System Reset vector from where
5fa6b6bd 435 pnv_restore_hyp_resource was invoked */
17065671 436
bcef83a0
SP
437/*
438 * Called if waking up from idle state which can cause either partial or
439 * complete hyp state loss.
440 * In POWER8, called if waking up from fastsleep or winkle
441 * In POWER9, called if waking up from stop state >= pnv_first_deep_stop_state
442 *
443 * r13 - PACA
444 * cr3 - gt if waking up with partial/complete hypervisor state loss
bd00a240 445 * cr4 - gt or eq if waking up from complete hypervisor state loss.
bcef83a0 446 */
5fa6b6bd 447_GLOBAL(pnv_wakeup_tb_loss)
97eb001f 448 ld r1,PACAR1(r13)
7cba160a
SP
449 /*
450 * Before entering any idle state, the NVGPRs are saved in the stack
451 * and they are restored before switching to the process context. Hence
452 * until they are restored, they are free to be used.
453 *
17065671 454 * Save SRR1 and LR in NVGPRs as they might be clobbered in
69c592ed 455 * opal_call() (called in CHECK_HMI_INTERRUPT). SRR1 is required
17065671
SP
456 * to determine the wakeup reason if we branch to kvm_start_guest. LR
457 * is required to return back to reset vector after hypervisor state
458 * restore is complete.
7cba160a 459 */
17065671 460 mflr r17
7cba160a 461 mfspr r16,SPRN_SRR1
bbdb760d
MS
462BEGIN_FTR_SECTION
463 CHECK_HMI_INTERRUPT
464END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
7cba160a
SP
465
466 lbz r7,PACA_THREAD_MASK(r13)
467 ld r14,PACA_CORE_IDLE_STATE_PTR(r13)
468lwarx_loop2:
469 lwarx r15,0,r14
470 andi. r9,r15,PNV_CORE_IDLE_LOCK_BIT
471 /*
472 * Lock bit is set in one of the 2 cases-
473 * a. In the sleep/winkle enter path, the last thread is executing
474 * fastsleep workaround code.
475 * b. In the wake up path, another thread is executing fastsleep
476 * workaround undo code or resyncing timebase or restoring context
477 * In either case loop until the lock bit is cleared.
478 */
b32aadc1 479 bnel core_idle_lock_held
7cba160a
SP
480
481 cmpwi cr2,r15,0
77b54e9f
SP
482
483 /*
484 * At this stage
bcef83a0
SP
485 * cr2 - eq if first thread to wakeup in core
486 * cr3- gt if waking up with partial/complete hypervisor state loss
bd00a240 487 * cr4 - gt or eq if waking up from complete hypervisor state loss.
77b54e9f
SP
488 */
489
7cba160a
SP
490 ori r15,r15,PNV_CORE_IDLE_LOCK_BIT
491 stwcx. r15,0,r14
492 bne- lwarx_loop2
493 isync
494
bcef83a0
SP
495BEGIN_FTR_SECTION
496 lbz r4,PACA_SUBCORE_SIBLING_MASK(r13)
497 and r4,r4,r15
498 cmpwi r4,0 /* Check if first in subcore */
499
500 or r15,r15,r7 /* Set thread bit */
501 beq first_thread_in_subcore
502END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
503
504 or r15,r15,r7 /* Set thread bit */
505 beq cr2,first_thread_in_core
506
507 /* Not first thread in core or subcore to wake up */
508 b clear_lock
509
510first_thread_in_subcore:
77b54e9f
SP
511 /*
512 * If waking up from sleep, subcore state is not lost. Hence
513 * skip subcore state restore
514 */
bd00a240 515 blt cr4,subcore_state_restored
77b54e9f
SP
516
517 /* Restore per-subcore state */
518 ld r4,_SDR1(r1)
519 mtspr SPRN_SDR1,r4
bcef83a0 520
77b54e9f
SP
521 ld r4,_RPR(r1)
522 mtspr SPRN_RPR,r4
523 ld r4,_AMOR(r1)
524 mtspr SPRN_AMOR,r4
525
526subcore_state_restored:
527 /*
528 * Check if the thread is also the first thread in the core. If not,
529 * skip to clear_lock.
530 */
531 bne cr2,clear_lock
532
533first_thread_in_core:
534
7cba160a 535 /*
bcef83a0
SP
536 * First thread in the core waking up from any state which can cause
537 * partial or complete hypervisor state loss. It needs to
7cba160a
SP
538 * call the fastsleep workaround code if the platform requires it.
539 * Call it unconditionally here. The below branch instruction will
bcef83a0
SP
540 * be patched out if the platform does not have fastsleep or does not
541 * require the workaround. Patching will be performed during the
542 * discovery of idle-states.
7cba160a
SP
543 */
544.global pnv_fastsleep_workaround_at_exit
545pnv_fastsleep_workaround_at_exit:
546 b fastsleep_workaround_at_exit
547
548timebase_resync:
bcef83a0
SP
549 /*
550 * Use cr3 which indicates that we are waking up with atleast partial
551 * hypervisor state loss to determine if TIMEBASE RESYNC is needed.
552 */
7cba160a 553 ble cr3,clear_lock
97eb001f 554 /* Time base re-sync */
69c592ed 555 bl opal_rm_resync_timebase;
77b54e9f
SP
556 /*
557 * If waking up from sleep, per core state is not lost, skip to
558 * clear_lock.
559 */
bd00a240 560 blt cr4,clear_lock
77b54e9f 561
bcef83a0
SP
562 /*
563 * First thread in the core to wake up and its waking up with
564 * complete hypervisor state loss. Restore per core hypervisor
565 * state.
566 */
567BEGIN_FTR_SECTION
568 ld r4,_PTCR(r1)
569 mtspr SPRN_PTCR,r4
570 ld r4,_RPR(r1)
571 mtspr SPRN_RPR,r4
572END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
573
77b54e9f
SP
574 ld r4,_TSCR(r1)
575 mtspr SPRN_TSCR,r4
576 ld r4,_WORC(r1)
577 mtspr SPRN_WORC,r4
578
7cba160a
SP
579clear_lock:
580 andi. r15,r15,PNV_CORE_IDLE_THREAD_BITS
581 lwsync
582 stw r15,0(r14)
583
584common_exit:
77b54e9f
SP
585 /*
586 * Common to all threads.
587 *
588 * If waking up from sleep, hypervisor state is not lost. Hence
589 * skip hypervisor state restore.
590 */
bd00a240 591 blt cr4,hypervisor_state_restored
77b54e9f
SP
592
593 /* Waking up from winkle */
594
bcef83a0
SP
595BEGIN_MMU_FTR_SECTION
596 b no_segments
5a25b6f5 597END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
77b54e9f
SP
598 /* Restore SLB from PACA */
599 ld r8,PACA_SLBSHADOWPTR(r13)
600
601 .rept SLB_NUM_BOLTED
602 li r3, SLBSHADOW_SAVEAREA
603 LDX_BE r5, r8, r3
604 addi r3, r3, 8
605 LDX_BE r6, r8, r3
606 andis. r7,r5,SLB_ESID_V@h
607 beq 1f
608 slbmte r6,r5
6091: addi r8,r8,16
610 .endr
bcef83a0
SP
611no_segments:
612
613 /* Restore per thread state */
77b54e9f
SP
614
615 ld r4,_SPURR(r1)
616 mtspr SPRN_SPURR,r4
617 ld r4,_PURR(r1)
618 mtspr SPRN_PURR,r4
619 ld r4,_DSCR(r1)
620 mtspr SPRN_DSCR,r4
621 ld r4,_WORT(r1)
622 mtspr SPRN_WORT,r4
623
bcef83a0
SP
624 /* Call cur_cpu_spec->cpu_restore() */
625 LOAD_REG_ADDR(r4, cur_cpu_spec)
626 ld r4,0(r4)
627 ld r12,CPU_SPEC_RESTORE(r4)
628#ifdef PPC64_ELF_ABI_v1
629 ld r12,0(r12)
630#endif
631 mtctr r12
632 bctrl
633
77b54e9f
SP
634hypervisor_state_restored:
635
7cba160a 636 mtspr SPRN_SRR1,r16
17065671
SP
637 mtlr r17
638 blr /* Return back to System Reset vector from where
5fa6b6bd 639 pnv_restore_hyp_resource was invoked */
97eb001f 640
7cba160a
SP
641fastsleep_workaround_at_exit:
642 li r3,1
643 li r4,0
69c592ed 644 bl opal_rm_config_cpu_idle_state
7cba160a
SP
645 b timebase_resync
646
56548fc0
PM
647/*
648 * R3 here contains the value that will be returned to the caller
649 * of power7_nap.
650 */
5fa6b6bd 651_GLOBAL(pnv_wakeup_loss)
948cf67c 652 ld r1,PACAR1(r13)
bbdb760d
MS
653BEGIN_FTR_SECTION
654 CHECK_HMI_INTERRUPT
655END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
948cf67c
BH
656 REST_NVGPRS(r1)
657 REST_GPR(2, r1)
56548fc0 658 ld r6,_CCR(r1)
948cf67c
BH
659 ld r4,_MSR(r1)
660 ld r5,_NIP(r1)
661 addi r1,r1,INT_FRAME_SIZE
56548fc0 662 mtcr r6
948cf67c
BH
663 mtspr SPRN_SRR1,r4
664 mtspr SPRN_SRR0,r5
665 rfid
666
56548fc0
PM
667/*
668 * R3 here contains the value that will be returned to the caller
669 * of power7_nap.
670 */
5fa6b6bd 671_GLOBAL(pnv_wakeup_noloss)
2fde6d20
PM
672 lbz r0,PACA_NAPSTATELOST(r13)
673 cmpwi r0,0
5fa6b6bd 674 bne pnv_wakeup_loss
bbdb760d
MS
675BEGIN_FTR_SECTION
676 CHECK_HMI_INTERRUPT
677END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
948cf67c 678 ld r1,PACAR1(r13)
0aab3747 679 ld r6,_CCR(r1)
948cf67c
BH
680 ld r4,_MSR(r1)
681 ld r5,_NIP(r1)
682 addi r1,r1,INT_FRAME_SIZE
0aab3747 683 mtcr r6
948cf67c
BH
684 mtspr SPRN_SRR1,r4
685 mtspr SPRN_SRR0,r5
686 rfid