]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - arch/powerpc/kernel/idle_book3s.S
Merge remote-tracking branches 'spi/topic/devprop', 'spi/topic/fsl', 'spi/topic/fsl...
[mirror_ubuntu-bionic-kernel.git] / arch / powerpc / kernel / idle_book3s.S
1 /*
2 * This file contains idle entry/exit functions for POWER7,
3 * POWER8 and POWER9 CPUs.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
9 */
10
11 #include <linux/threads.h>
12 #include <asm/processor.h>
13 #include <asm/page.h>
14 #include <asm/cputable.h>
15 #include <asm/thread_info.h>
16 #include <asm/ppc_asm.h>
17 #include <asm/asm-offsets.h>
18 #include <asm/ppc-opcode.h>
19 #include <asm/hw_irq.h>
20 #include <asm/kvm_book3s_asm.h>
21 #include <asm/opal.h>
22 #include <asm/cpuidle.h>
23 #include <asm/book3s/64/mmu-hash.h>
24 #include <asm/mmu.h>
25
26 #undef DEBUG
27
28 /*
29 * Use unused space in the interrupt stack to save and restore
30 * registers for winkle support.
31 */
32 #define _SDR1 GPR3
33 #define _RPR GPR4
34 #define _SPURR GPR5
35 #define _PURR GPR6
36 #define _TSCR GPR7
37 #define _DSCR GPR8
38 #define _AMOR GPR9
39 #define _WORT GPR10
40 #define _WORC GPR11
41 #define _PTCR GPR12
42
43 #define PSSCR_EC_ESL_MASK_SHIFTED (PSSCR_EC | PSSCR_ESL) >> 16
44
45 .text
46
47 /*
48 * Used by threads before entering deep idle states. Saves SPRs
49 * in interrupt stack frame
50 */
51 save_sprs_to_stack:
52 /*
53 * Note all register i.e per-core, per-subcore or per-thread is saved
54 * here since any thread in the core might wake up first
55 */
56 BEGIN_FTR_SECTION
57 mfspr r3,SPRN_PTCR
58 std r3,_PTCR(r1)
59 /*
60 * Note - SDR1 is dropped in Power ISA v3. Hence not restoring
61 * SDR1 here
62 */
63 FTR_SECTION_ELSE
64 mfspr r3,SPRN_SDR1
65 std r3,_SDR1(r1)
66 ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300)
67 mfspr r3,SPRN_RPR
68 std r3,_RPR(r1)
69 mfspr r3,SPRN_SPURR
70 std r3,_SPURR(r1)
71 mfspr r3,SPRN_PURR
72 std r3,_PURR(r1)
73 mfspr r3,SPRN_TSCR
74 std r3,_TSCR(r1)
75 mfspr r3,SPRN_DSCR
76 std r3,_DSCR(r1)
77 mfspr r3,SPRN_AMOR
78 std r3,_AMOR(r1)
79 mfspr r3,SPRN_WORT
80 std r3,_WORT(r1)
81 mfspr r3,SPRN_WORC
82 std r3,_WORC(r1)
83
84 blr
85
86 /*
87 * Used by threads when the lock bit of core_idle_state is set.
88 * Threads will spin in HMT_LOW until the lock bit is cleared.
89 * r14 - pointer to core_idle_state
90 * r15 - used to load contents of core_idle_state
91 * r9 - used as a temporary variable
92 */
93
94 core_idle_lock_held:
95 HMT_LOW
96 3: lwz r15,0(r14)
97 andi. r15,r15,PNV_CORE_IDLE_LOCK_BIT
98 bne 3b
99 HMT_MEDIUM
100 lwarx r15,0,r14
101 andi. r9,r15,PNV_CORE_IDLE_LOCK_BIT
102 bne core_idle_lock_held
103 blr
104
105 /*
106 * Pass requested state in r3:
107 * r3 - PNV_THREAD_NAP/SLEEP/WINKLE in POWER8
108 * - Requested STOP state in POWER9
109 *
110 * To check IRQ_HAPPENED in r4
111 * 0 - don't check
112 * 1 - check
113 *
114 * Address to 'rfid' to in r5
115 */
116 _GLOBAL(pnv_powersave_common)
117 /* Use r3 to pass state nap/sleep/winkle */
118 /* NAP is a state loss, we create a regs frame on the
119 * stack, fill it up with the state we care about and
120 * stick a pointer to it in PACAR1. We really only
121 * need to save PC, some CR bits and the NV GPRs,
122 * but for now an interrupt frame will do.
123 */
124 mflr r0
125 std r0,16(r1)
126 stdu r1,-INT_FRAME_SIZE(r1)
127 std r0,_LINK(r1)
128 std r0,_NIP(r1)
129
130 /* Hard disable interrupts */
131 mfmsr r9
132 rldicl r9,r9,48,1
133 rotldi r9,r9,16
134 mtmsrd r9,1 /* hard-disable interrupts */
135
136 /* Check if something happened while soft-disabled */
137 lbz r0,PACAIRQHAPPENED(r13)
138 andi. r0,r0,~PACA_IRQ_HARD_DIS@l
139 beq 1f
140 cmpwi cr0,r4,0
141 beq 1f
142 addi r1,r1,INT_FRAME_SIZE
143 ld r0,16(r1)
144 li r3,0 /* Return 0 (no nap) */
145 mtlr r0
146 blr
147
148 1: /* We mark irqs hard disabled as this is the state we'll
149 * be in when returning and we need to tell arch_local_irq_restore()
150 * about it
151 */
152 li r0,PACA_IRQ_HARD_DIS
153 stb r0,PACAIRQHAPPENED(r13)
154
155 /* We haven't lost state ... yet */
156 li r0,0
157 stb r0,PACA_NAPSTATELOST(r13)
158
159 /* Continue saving state */
160 SAVE_GPR(2, r1)
161 SAVE_NVGPRS(r1)
162 mfcr r4
163 std r4,_CCR(r1)
164 std r9,_MSR(r1)
165 std r1,PACAR1(r13)
166
167 /*
168 * Go to real mode to do the nap, as required by the architecture.
169 * Also, we need to be in real mode before setting hwthread_state,
170 * because as soon as we do that, another thread can switch
171 * the MMU context to the guest.
172 */
173 LOAD_REG_IMMEDIATE(r7, MSR_IDLE)
174 li r6, MSR_RI
175 andc r6, r9, r6
176 mtmsrd r6, 1 /* clear RI before setting SRR0/1 */
177 mtspr SPRN_SRR0, r5
178 mtspr SPRN_SRR1, r7
179 rfid
180
181 .globl pnv_enter_arch207_idle_mode
182 pnv_enter_arch207_idle_mode:
183 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
184 /* Tell KVM we're entering idle */
185 li r4,KVM_HWTHREAD_IN_IDLE
186 /******************************************************/
187 /* N O T E W E L L ! ! ! N O T E W E L L */
188 /* The following store to HSTATE_HWTHREAD_STATE(r13) */
189 /* MUST occur in real mode, i.e. with the MMU off, */
190 /* and the MMU must stay off until we clear this flag */
191 /* and test HSTATE_HWTHREAD_REQ(r13) in the system */
192 /* reset interrupt vector in exceptions-64s.S. */
193 /* The reason is that another thread can switch the */
194 /* MMU to a guest context whenever this flag is set */
195 /* to KVM_HWTHREAD_IN_IDLE, and if the MMU was on, */
196 /* that would potentially cause this thread to start */
197 /* executing instructions from guest memory in */
198 /* hypervisor mode, leading to a host crash or data */
199 /* corruption, or worse. */
200 /******************************************************/
201 stb r4,HSTATE_HWTHREAD_STATE(r13)
202 #endif
203 stb r3,PACA_THREAD_IDLE_STATE(r13)
204 cmpwi cr3,r3,PNV_THREAD_SLEEP
205 bge cr3,2f
206 IDLE_STATE_ENTER_SEQ_NORET(PPC_NAP)
207 /* No return */
208 2:
209 /* Sleep or winkle */
210 lbz r7,PACA_THREAD_MASK(r13)
211 ld r14,PACA_CORE_IDLE_STATE_PTR(r13)
212 lwarx_loop1:
213 lwarx r15,0,r14
214
215 andi. r9,r15,PNV_CORE_IDLE_LOCK_BIT
216 bnel core_idle_lock_held
217
218 andc r15,r15,r7 /* Clear thread bit */
219
220 andi. r15,r15,PNV_CORE_IDLE_THREAD_BITS
221
222 /*
223 * If cr0 = 0, then current thread is the last thread of the core entering
224 * sleep. Last thread needs to execute the hardware bug workaround code if
225 * required by the platform.
226 * Make the workaround call unconditionally here. The below branch call is
227 * patched out when the idle states are discovered if the platform does not
228 * require it.
229 */
230 .global pnv_fastsleep_workaround_at_entry
231 pnv_fastsleep_workaround_at_entry:
232 beq fastsleep_workaround_at_entry
233
234 stwcx. r15,0,r14
235 bne- lwarx_loop1
236 isync
237
238 common_enter: /* common code for all the threads entering sleep or winkle */
239 bgt cr3,enter_winkle
240 IDLE_STATE_ENTER_SEQ_NORET(PPC_SLEEP)
241
242 fastsleep_workaround_at_entry:
243 ori r15,r15,PNV_CORE_IDLE_LOCK_BIT
244 stwcx. r15,0,r14
245 bne- lwarx_loop1
246 isync
247
248 /* Fast sleep workaround */
249 li r3,1
250 li r4,1
251 bl opal_config_cpu_idle_state
252
253 /* Clear Lock bit */
254 li r0,0
255 lwsync
256 stw r0,0(r14)
257 b common_enter
258
259 enter_winkle:
260 bl save_sprs_to_stack
261
262 IDLE_STATE_ENTER_SEQ_NORET(PPC_WINKLE)
263
264 /*
265 * r3 - PSSCR value corresponding to the requested stop state.
266 */
267 power_enter_stop:
268 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
269 /* Tell KVM we're entering idle */
270 li r4,KVM_HWTHREAD_IN_IDLE
271 /* DO THIS IN REAL MODE! See comment above. */
272 stb r4,HSTATE_HWTHREAD_STATE(r13)
273 #endif
274 /*
275 * Check if we are executing the lite variant with ESL=EC=0
276 */
277 andis. r4,r3,PSSCR_EC_ESL_MASK_SHIFTED
278 clrldi r3,r3,60 /* r3 = Bits[60:63] = Requested Level (RL) */
279 bne .Lhandle_esl_ec_set
280 IDLE_STATE_ENTER_SEQ(PPC_STOP)
281 li r3,0 /* Since we didn't lose state, return 0 */
282 b pnv_wakeup_noloss
283
284 .Lhandle_esl_ec_set:
285 /*
286 * Check if the requested state is a deep idle state.
287 */
288 LOAD_REG_ADDRBASE(r5,pnv_first_deep_stop_state)
289 ld r4,ADDROFF(pnv_first_deep_stop_state)(r5)
290 cmpd r3,r4
291 bge .Lhandle_deep_stop
292 IDLE_STATE_ENTER_SEQ_NORET(PPC_STOP)
293 .Lhandle_deep_stop:
294 /*
295 * Entering deep idle state.
296 * Clear thread bit in PACA_CORE_IDLE_STATE, save SPRs to
297 * stack and enter stop
298 */
299 lbz r7,PACA_THREAD_MASK(r13)
300 ld r14,PACA_CORE_IDLE_STATE_PTR(r13)
301
302 lwarx_loop_stop:
303 lwarx r15,0,r14
304 andi. r9,r15,PNV_CORE_IDLE_LOCK_BIT
305 bnel core_idle_lock_held
306 andc r15,r15,r7 /* Clear thread bit */
307
308 stwcx. r15,0,r14
309 bne- lwarx_loop_stop
310 isync
311
312 bl save_sprs_to_stack
313
314 IDLE_STATE_ENTER_SEQ_NORET(PPC_STOP)
315
316 _GLOBAL(power7_idle)
317 /* Now check if user or arch enabled NAP mode */
318 LOAD_REG_ADDRBASE(r3,powersave_nap)
319 lwz r4,ADDROFF(powersave_nap)(r3)
320 cmpwi 0,r4,0
321 beqlr
322 li r3, 1
323 /* fall through */
324
325 _GLOBAL(power7_nap)
326 mr r4,r3
327 li r3,PNV_THREAD_NAP
328 LOAD_REG_ADDR(r5, pnv_enter_arch207_idle_mode)
329 b pnv_powersave_common
330 /* No return */
331
332 _GLOBAL(power7_sleep)
333 li r3,PNV_THREAD_SLEEP
334 li r4,1
335 LOAD_REG_ADDR(r5, pnv_enter_arch207_idle_mode)
336 b pnv_powersave_common
337 /* No return */
338
339 _GLOBAL(power7_winkle)
340 li r3,PNV_THREAD_WINKLE
341 li r4,1
342 LOAD_REG_ADDR(r5, pnv_enter_arch207_idle_mode)
343 b pnv_powersave_common
344 /* No return */
345
346 #define CHECK_HMI_INTERRUPT \
347 mfspr r0,SPRN_SRR1; \
348 BEGIN_FTR_SECTION_NESTED(66); \
349 rlwinm r0,r0,45-31,0xf; /* extract wake reason field (P8) */ \
350 FTR_SECTION_ELSE_NESTED(66); \
351 rlwinm r0,r0,45-31,0xe; /* P7 wake reason field is 3 bits */ \
352 ALT_FTR_SECTION_END_NESTED_IFSET(CPU_FTR_ARCH_207S, 66); \
353 cmpwi r0,0xa; /* Hypervisor maintenance ? */ \
354 bne 20f; \
355 /* Invoke opal call to handle hmi */ \
356 ld r2,PACATOC(r13); \
357 ld r1,PACAR1(r13); \
358 std r3,ORIG_GPR3(r1); /* Save original r3 */ \
359 li r3,0; /* NULL argument */ \
360 bl hmi_exception_realmode; \
361 nop; \
362 ld r3,ORIG_GPR3(r1); /* Restore original r3 */ \
363 20: nop;
364
365 /*
366 * r3 - The PSSCR value corresponding to the stop state.
367 * r4 - The PSSCR mask corrresonding to the stop state.
368 */
369 _GLOBAL(power9_idle_stop)
370 mfspr r5,SPRN_PSSCR
371 andc r5,r5,r4
372 or r3,r3,r5
373 mtspr SPRN_PSSCR,r3
374 LOAD_REG_ADDR(r5,power_enter_stop)
375 li r4,1
376 b pnv_powersave_common
377 /* No return */
378 /*
379 * Called from reset vector. Check whether we have woken up with
380 * hypervisor state loss. If yes, restore hypervisor state and return
381 * back to reset vector.
382 *
383 * r13 - Contents of HSPRG0
384 * cr3 - set to gt if waking up with partial/complete hypervisor state loss
385 */
386 _GLOBAL(pnv_restore_hyp_resource)
387 BEGIN_FTR_SECTION
388 ld r2,PACATOC(r13);
389 /*
390 * POWER ISA 3. Use PSSCR to determine if we
391 * are waking up from deep idle state
392 */
393 LOAD_REG_ADDRBASE(r5,pnv_first_deep_stop_state)
394 ld r4,ADDROFF(pnv_first_deep_stop_state)(r5)
395
396 mfspr r5,SPRN_PSSCR
397 /*
398 * 0-3 bits correspond to Power-Saving Level Status
399 * which indicates the idle state we are waking up from
400 */
401 rldicl r5,r5,4,60
402 cmpd cr4,r5,r4
403 bge cr4,pnv_wakeup_tb_loss
404 /*
405 * Waking up without hypervisor state loss. Return to
406 * reset vector
407 */
408 blr
409
410 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
411
412 /*
413 * POWER ISA 2.07 or less.
414 * Check if last bit of HSPGR0 is set. This indicates whether we are
415 * waking up from winkle.
416 */
417 clrldi r5,r13,63
418 clrrdi r13,r13,1
419
420 /* Now that we are sure r13 is corrected, load TOC */
421 ld r2,PACATOC(r13);
422 cmpwi cr4,r5,1
423 mtspr SPRN_HSPRG0,r13
424
425 lbz r0,PACA_THREAD_IDLE_STATE(r13)
426 cmpwi cr2,r0,PNV_THREAD_NAP
427 bgt cr2,pnv_wakeup_tb_loss /* Either sleep or Winkle */
428
429 /*
430 * We fall through here if PACA_THREAD_IDLE_STATE shows we are waking
431 * up from nap. At this stage CR3 shouldn't contains 'gt' since that
432 * indicates we are waking with hypervisor state loss from nap.
433 */
434 bgt cr3,.
435
436 blr /* Return back to System Reset vector from where
437 pnv_restore_hyp_resource was invoked */
438
439 /*
440 * Called if waking up from idle state which can cause either partial or
441 * complete hyp state loss.
442 * In POWER8, called if waking up from fastsleep or winkle
443 * In POWER9, called if waking up from stop state >= pnv_first_deep_stop_state
444 *
445 * r13 - PACA
446 * cr3 - gt if waking up with partial/complete hypervisor state loss
447 * cr4 - gt or eq if waking up from complete hypervisor state loss.
448 */
449 _GLOBAL(pnv_wakeup_tb_loss)
450 ld r1,PACAR1(r13)
451 /*
452 * Before entering any idle state, the NVGPRs are saved in the stack.
453 * If there was a state loss, or PACA_NAPSTATELOST was set, then the
454 * NVGPRs are restored. If we are here, it is likely that state is lost,
455 * but not guaranteed -- neither ISA207 nor ISA300 tests to reach
456 * here are the same as the test to restore NVGPRS:
457 * PACA_THREAD_IDLE_STATE test for ISA207, PSSCR test for ISA300,
458 * and SRR1 test for restoring NVGPRs.
459 *
460 * We are about to clobber NVGPRs now, so set NAPSTATELOST to
461 * guarantee they will always be restored. This might be tightened
462 * with careful reading of specs (particularly for ISA300) but this
463 * is already a slow wakeup path and it's simpler to be safe.
464 */
465 li r0,1
466 stb r0,PACA_NAPSTATELOST(r13)
467
468 /*
469 *
470 * Save SRR1 and LR in NVGPRs as they might be clobbered in
471 * opal_call() (called in CHECK_HMI_INTERRUPT). SRR1 is required
472 * to determine the wakeup reason if we branch to kvm_start_guest. LR
473 * is required to return back to reset vector after hypervisor state
474 * restore is complete.
475 */
476 mflr r17
477 mfspr r16,SPRN_SRR1
478 BEGIN_FTR_SECTION
479 CHECK_HMI_INTERRUPT
480 END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
481
482 lbz r7,PACA_THREAD_MASK(r13)
483 ld r14,PACA_CORE_IDLE_STATE_PTR(r13)
484 lwarx_loop2:
485 lwarx r15,0,r14
486 andi. r9,r15,PNV_CORE_IDLE_LOCK_BIT
487 /*
488 * Lock bit is set in one of the 2 cases-
489 * a. In the sleep/winkle enter path, the last thread is executing
490 * fastsleep workaround code.
491 * b. In the wake up path, another thread is executing fastsleep
492 * workaround undo code or resyncing timebase or restoring context
493 * In either case loop until the lock bit is cleared.
494 */
495 bnel core_idle_lock_held
496
497 cmpwi cr2,r15,0
498
499 /*
500 * At this stage
501 * cr2 - eq if first thread to wakeup in core
502 * cr3- gt if waking up with partial/complete hypervisor state loss
503 * cr4 - gt or eq if waking up from complete hypervisor state loss.
504 */
505
506 ori r15,r15,PNV_CORE_IDLE_LOCK_BIT
507 stwcx. r15,0,r14
508 bne- lwarx_loop2
509 isync
510
511 BEGIN_FTR_SECTION
512 lbz r4,PACA_SUBCORE_SIBLING_MASK(r13)
513 and r4,r4,r15
514 cmpwi r4,0 /* Check if first in subcore */
515
516 or r15,r15,r7 /* Set thread bit */
517 beq first_thread_in_subcore
518 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
519
520 or r15,r15,r7 /* Set thread bit */
521 beq cr2,first_thread_in_core
522
523 /* Not first thread in core or subcore to wake up */
524 b clear_lock
525
526 first_thread_in_subcore:
527 /*
528 * If waking up from sleep, subcore state is not lost. Hence
529 * skip subcore state restore
530 */
531 blt cr4,subcore_state_restored
532
533 /* Restore per-subcore state */
534 ld r4,_SDR1(r1)
535 mtspr SPRN_SDR1,r4
536
537 ld r4,_RPR(r1)
538 mtspr SPRN_RPR,r4
539 ld r4,_AMOR(r1)
540 mtspr SPRN_AMOR,r4
541
542 subcore_state_restored:
543 /*
544 * Check if the thread is also the first thread in the core. If not,
545 * skip to clear_lock.
546 */
547 bne cr2,clear_lock
548
549 first_thread_in_core:
550
551 /*
552 * First thread in the core waking up from any state which can cause
553 * partial or complete hypervisor state loss. It needs to
554 * call the fastsleep workaround code if the platform requires it.
555 * Call it unconditionally here. The below branch instruction will
556 * be patched out if the platform does not have fastsleep or does not
557 * require the workaround. Patching will be performed during the
558 * discovery of idle-states.
559 */
560 .global pnv_fastsleep_workaround_at_exit
561 pnv_fastsleep_workaround_at_exit:
562 b fastsleep_workaround_at_exit
563
564 timebase_resync:
565 /*
566 * Use cr3 which indicates that we are waking up with atleast partial
567 * hypervisor state loss to determine if TIMEBASE RESYNC is needed.
568 */
569 ble cr3,clear_lock
570 /* Time base re-sync */
571 bl opal_resync_timebase;
572 /*
573 * If waking up from sleep, per core state is not lost, skip to
574 * clear_lock.
575 */
576 blt cr4,clear_lock
577
578 /*
579 * First thread in the core to wake up and its waking up with
580 * complete hypervisor state loss. Restore per core hypervisor
581 * state.
582 */
583 BEGIN_FTR_SECTION
584 ld r4,_PTCR(r1)
585 mtspr SPRN_PTCR,r4
586 ld r4,_RPR(r1)
587 mtspr SPRN_RPR,r4
588 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
589
590 ld r4,_TSCR(r1)
591 mtspr SPRN_TSCR,r4
592 ld r4,_WORC(r1)
593 mtspr SPRN_WORC,r4
594
595 clear_lock:
596 andi. r15,r15,PNV_CORE_IDLE_THREAD_BITS
597 lwsync
598 stw r15,0(r14)
599
600 common_exit:
601 /*
602 * Common to all threads.
603 *
604 * If waking up from sleep, hypervisor state is not lost. Hence
605 * skip hypervisor state restore.
606 */
607 blt cr4,hypervisor_state_restored
608
609 /* Waking up from winkle */
610
611 BEGIN_MMU_FTR_SECTION
612 b no_segments
613 END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
614 /* Restore SLB from PACA */
615 ld r8,PACA_SLBSHADOWPTR(r13)
616
617 .rept SLB_NUM_BOLTED
618 li r3, SLBSHADOW_SAVEAREA
619 LDX_BE r5, r8, r3
620 addi r3, r3, 8
621 LDX_BE r6, r8, r3
622 andis. r7,r5,SLB_ESID_V@h
623 beq 1f
624 slbmte r6,r5
625 1: addi r8,r8,16
626 .endr
627 no_segments:
628
629 /* Restore per thread state */
630
631 ld r4,_SPURR(r1)
632 mtspr SPRN_SPURR,r4
633 ld r4,_PURR(r1)
634 mtspr SPRN_PURR,r4
635 ld r4,_DSCR(r1)
636 mtspr SPRN_DSCR,r4
637 ld r4,_WORT(r1)
638 mtspr SPRN_WORT,r4
639
640 /* Call cur_cpu_spec->cpu_restore() */
641 LOAD_REG_ADDR(r4, cur_cpu_spec)
642 ld r4,0(r4)
643 ld r12,CPU_SPEC_RESTORE(r4)
644 #ifdef PPC64_ELF_ABI_v1
645 ld r12,0(r12)
646 #endif
647 mtctr r12
648 bctrl
649
650 hypervisor_state_restored:
651
652 mtspr SPRN_SRR1,r16
653 mtlr r17
654 blr /* Return back to System Reset vector from where
655 pnv_restore_hyp_resource was invoked */
656
657 fastsleep_workaround_at_exit:
658 li r3,1
659 li r4,0
660 bl opal_config_cpu_idle_state
661 b timebase_resync
662
663 /*
664 * R3 here contains the value that will be returned to the caller
665 * of power7_nap.
666 */
667 _GLOBAL(pnv_wakeup_loss)
668 ld r1,PACAR1(r13)
669 BEGIN_FTR_SECTION
670 CHECK_HMI_INTERRUPT
671 END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
672 REST_NVGPRS(r1)
673 REST_GPR(2, r1)
674 ld r6,_CCR(r1)
675 ld r4,_MSR(r1)
676 ld r5,_NIP(r1)
677 addi r1,r1,INT_FRAME_SIZE
678 mtcr r6
679 mtspr SPRN_SRR1,r4
680 mtspr SPRN_SRR0,r5
681 rfid
682
683 /*
684 * R3 here contains the value that will be returned to the caller
685 * of power7_nap.
686 */
687 _GLOBAL(pnv_wakeup_noloss)
688 lbz r0,PACA_NAPSTATELOST(r13)
689 cmpwi r0,0
690 bne pnv_wakeup_loss
691 BEGIN_FTR_SECTION
692 CHECK_HMI_INTERRUPT
693 END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
694 ld r1,PACAR1(r13)
695 ld r6,_CCR(r1)
696 ld r4,_MSR(r1)
697 ld r5,_NIP(r1)
698 addi r1,r1,INT_FRAME_SIZE
699 mtcr r6
700 mtspr SPRN_SRR1,r4
701 mtspr SPRN_SRR0,r5
702 rfid