]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - arch/powerpc/kernel/idle_book3s.S
powerpc/perf: Avoid spurious PMU interrupts after idle
[mirror_ubuntu-zesty-kernel.git] / arch / powerpc / kernel / idle_book3s.S
1 /*
2 * This file contains idle entry/exit functions for POWER7,
3 * POWER8 and POWER9 CPUs.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
9 */
10
11 #include <linux/threads.h>
12 #include <asm/processor.h>
13 #include <asm/page.h>
14 #include <asm/cputable.h>
15 #include <asm/thread_info.h>
16 #include <asm/ppc_asm.h>
17 #include <asm/asm-offsets.h>
18 #include <asm/ppc-opcode.h>
19 #include <asm/hw_irq.h>
20 #include <asm/kvm_book3s_asm.h>
21 #include <asm/opal.h>
22 #include <asm/cpuidle.h>
23 #include <asm/book3s/64/mmu-hash.h>
24 #include <asm/mmu.h>
25
26 #undef DEBUG
27
28 /*
29 * Use unused space in the interrupt stack to save and restore
30 * registers for winkle support.
31 */
32 #define _MMCR0 GPR0
33 #define _SDR1 GPR3
34 #define _RPR GPR4
35 #define _SPURR GPR5
36 #define _PURR GPR6
37 #define _TSCR GPR7
38 #define _DSCR GPR8
39 #define _AMOR GPR9
40 #define _WORT GPR10
41 #define _WORC GPR11
42 #define _PTCR GPR12
43
44 #define PSSCR_EC_ESL_MASK_SHIFTED (PSSCR_EC | PSSCR_ESL) >> 16
45
46 .text
47
48 /*
49 * Used by threads before entering deep idle states. Saves SPRs
50 * in interrupt stack frame
51 */
52 save_sprs_to_stack:
53 /*
54 * Note all register i.e per-core, per-subcore or per-thread is saved
55 * here since any thread in the core might wake up first
56 */
57 BEGIN_FTR_SECTION
58 mfspr r3,SPRN_PTCR
59 std r3,_PTCR(r1)
60 /*
61 * Note - SDR1 is dropped in Power ISA v3. Hence not restoring
62 * SDR1 here
63 */
64 FTR_SECTION_ELSE
65 mfspr r3,SPRN_SDR1
66 std r3,_SDR1(r1)
67 ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300)
68 mfspr r3,SPRN_RPR
69 std r3,_RPR(r1)
70 mfspr r3,SPRN_SPURR
71 std r3,_SPURR(r1)
72 mfspr r3,SPRN_PURR
73 std r3,_PURR(r1)
74 mfspr r3,SPRN_TSCR
75 std r3,_TSCR(r1)
76 mfspr r3,SPRN_DSCR
77 std r3,_DSCR(r1)
78 mfspr r3,SPRN_AMOR
79 std r3,_AMOR(r1)
80 mfspr r3,SPRN_WORT
81 std r3,_WORT(r1)
82 mfspr r3,SPRN_WORC
83 std r3,_WORC(r1)
84
85 blr
86
87 /*
88 * Used by threads when the lock bit of core_idle_state is set.
89 * Threads will spin in HMT_LOW until the lock bit is cleared.
90 * r14 - pointer to core_idle_state
91 * r15 - used to load contents of core_idle_state
92 * r9 - used as a temporary variable
93 */
94
95 core_idle_lock_held:
96 HMT_LOW
97 3: lwz r15,0(r14)
98 andi. r15,r15,PNV_CORE_IDLE_LOCK_BIT
99 bne 3b
100 HMT_MEDIUM
101 lwarx r15,0,r14
102 andi. r9,r15,PNV_CORE_IDLE_LOCK_BIT
103 bne core_idle_lock_held
104 blr
105
106 /*
107 * Pass requested state in r3:
108 * r3 - PNV_THREAD_NAP/SLEEP/WINKLE in POWER8
109 * - Requested STOP state in POWER9
110 *
111 * To check IRQ_HAPPENED in r4
112 * 0 - don't check
113 * 1 - check
114 *
115 * Address to 'rfid' to in r5
116 */
117 _GLOBAL(pnv_powersave_common)
118 /* Use r3 to pass state nap/sleep/winkle */
119 /* NAP is a state loss, we create a regs frame on the
120 * stack, fill it up with the state we care about and
121 * stick a pointer to it in PACAR1. We really only
122 * need to save PC, some CR bits and the NV GPRs,
123 * but for now an interrupt frame will do.
124 */
125 mflr r0
126 std r0,16(r1)
127 stdu r1,-INT_FRAME_SIZE(r1)
128 std r0,_LINK(r1)
129 std r0,_NIP(r1)
130
131 /* Hard disable interrupts */
132 mfmsr r9
133 rldicl r9,r9,48,1
134 rotldi r9,r9,16
135 mtmsrd r9,1 /* hard-disable interrupts */
136
137 /* Check if something happened while soft-disabled */
138 lbz r0,PACAIRQHAPPENED(r13)
139 andi. r0,r0,~PACA_IRQ_HARD_DIS@l
140 beq 1f
141 cmpwi cr0,r4,0
142 beq 1f
143 addi r1,r1,INT_FRAME_SIZE
144 ld r0,16(r1)
145 li r3,0 /* Return 0 (no nap) */
146 mtlr r0
147 blr
148
149 1: /* We mark irqs hard disabled as this is the state we'll
150 * be in when returning and we need to tell arch_local_irq_restore()
151 * about it
152 */
153 li r0,PACA_IRQ_HARD_DIS
154 stb r0,PACAIRQHAPPENED(r13)
155
156 /* We haven't lost state ... yet */
157 li r0,0
158 stb r0,PACA_NAPSTATELOST(r13)
159
160 /* Continue saving state */
161 SAVE_GPR(2, r1)
162 SAVE_NVGPRS(r1)
163 mfcr r4
164 std r4,_CCR(r1)
165 std r9,_MSR(r1)
166 std r1,PACAR1(r13)
167
168 /*
169 * Go to real mode to do the nap, as required by the architecture.
170 * Also, we need to be in real mode before setting hwthread_state,
171 * because as soon as we do that, another thread can switch
172 * the MMU context to the guest.
173 */
174 LOAD_REG_IMMEDIATE(r7, MSR_IDLE)
175 li r6, MSR_RI
176 andc r6, r9, r6
177 mtmsrd r6, 1 /* clear RI before setting SRR0/1 */
178 mtspr SPRN_SRR0, r5
179 mtspr SPRN_SRR1, r7
180 rfid
181
182 .globl pnv_enter_arch207_idle_mode
183 pnv_enter_arch207_idle_mode:
184 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
185 /* Tell KVM we're entering idle */
186 li r4,KVM_HWTHREAD_IN_IDLE
187 /******************************************************/
188 /* N O T E W E L L ! ! ! N O T E W E L L */
189 /* The following store to HSTATE_HWTHREAD_STATE(r13) */
190 /* MUST occur in real mode, i.e. with the MMU off, */
191 /* and the MMU must stay off until we clear this flag */
192 /* and test HSTATE_HWTHREAD_REQ(r13) in the system */
193 /* reset interrupt vector in exceptions-64s.S. */
194 /* The reason is that another thread can switch the */
195 /* MMU to a guest context whenever this flag is set */
196 /* to KVM_HWTHREAD_IN_IDLE, and if the MMU was on, */
197 /* that would potentially cause this thread to start */
198 /* executing instructions from guest memory in */
199 /* hypervisor mode, leading to a host crash or data */
200 /* corruption, or worse. */
201 /******************************************************/
202 stb r4,HSTATE_HWTHREAD_STATE(r13)
203 #endif
204 stb r3,PACA_THREAD_IDLE_STATE(r13)
205 cmpwi cr3,r3,PNV_THREAD_SLEEP
206 bge cr3,2f
207 IDLE_STATE_ENTER_SEQ_NORET(PPC_NAP)
208 /* No return */
209 2:
210 /* Sleep or winkle */
211 lbz r7,PACA_THREAD_MASK(r13)
212 ld r14,PACA_CORE_IDLE_STATE_PTR(r13)
213 lwarx_loop1:
214 lwarx r15,0,r14
215
216 andi. r9,r15,PNV_CORE_IDLE_LOCK_BIT
217 bnel core_idle_lock_held
218
219 andc r15,r15,r7 /* Clear thread bit */
220
221 andi. r15,r15,PNV_CORE_IDLE_THREAD_BITS
222
223 /*
224 * If cr0 = 0, then current thread is the last thread of the core entering
225 * sleep. Last thread needs to execute the hardware bug workaround code if
226 * required by the platform.
227 * Make the workaround call unconditionally here. The below branch call is
228 * patched out when the idle states are discovered if the platform does not
229 * require it.
230 */
231 .global pnv_fastsleep_workaround_at_entry
232 pnv_fastsleep_workaround_at_entry:
233 beq fastsleep_workaround_at_entry
234
235 stwcx. r15,0,r14
236 bne- lwarx_loop1
237 isync
238
239 common_enter: /* common code for all the threads entering sleep or winkle */
240 bgt cr3,enter_winkle
241 IDLE_STATE_ENTER_SEQ_NORET(PPC_SLEEP)
242
243 fastsleep_workaround_at_entry:
244 ori r15,r15,PNV_CORE_IDLE_LOCK_BIT
245 stwcx. r15,0,r14
246 bne- lwarx_loop1
247 isync
248
249 /* Fast sleep workaround */
250 li r3,1
251 li r4,1
252 bl opal_config_cpu_idle_state
253
254 /* Clear Lock bit */
255 li r0,0
256 lwsync
257 stw r0,0(r14)
258 b common_enter
259
260 enter_winkle:
261 bl save_sprs_to_stack
262
263 IDLE_STATE_ENTER_SEQ_NORET(PPC_WINKLE)
264
265 /*
266 * r3 - PSSCR value corresponding to the requested stop state.
267 */
268 power_enter_stop:
269 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
270 /* Tell KVM we're entering idle */
271 li r4,KVM_HWTHREAD_IN_IDLE
272 /* DO THIS IN REAL MODE! See comment above. */
273 stb r4,HSTATE_HWTHREAD_STATE(r13)
274 #endif
275 /*
276 * Check if we are executing the lite variant with ESL=EC=0
277 */
278 andis. r4,r3,PSSCR_EC_ESL_MASK_SHIFTED
279 clrldi r3,r3,60 /* r3 = Bits[60:63] = Requested Level (RL) */
280 bne .Lhandle_esl_ec_set
281 IDLE_STATE_ENTER_SEQ(PPC_STOP)
282 li r3,0 /* Since we didn't lose state, return 0 */
283 b pnv_wakeup_noloss
284
285 .Lhandle_esl_ec_set:
286 /*
287 * POWER9 DD2 can incorrectly set PMAO when waking up after a
288 * state-loss idle. Saving and restoring MMCR0 over idle is a
289 * workaround.
290 */
291 mfspr r4,SPRN_MMCR0
292 std r4,_MMCR0(r1)
293
294 /*
295 * Check if the requested state is a deep idle state.
296 */
297 LOAD_REG_ADDRBASE(r5,pnv_first_deep_stop_state)
298 ld r4,ADDROFF(pnv_first_deep_stop_state)(r5)
299 cmpd r3,r4
300 bge .Lhandle_deep_stop
301 IDLE_STATE_ENTER_SEQ_NORET(PPC_STOP)
302 .Lhandle_deep_stop:
303 /*
304 * Entering deep idle state.
305 * Clear thread bit in PACA_CORE_IDLE_STATE, save SPRs to
306 * stack and enter stop
307 */
308 lbz r7,PACA_THREAD_MASK(r13)
309 ld r14,PACA_CORE_IDLE_STATE_PTR(r13)
310
311 lwarx_loop_stop:
312 lwarx r15,0,r14
313 andi. r9,r15,PNV_CORE_IDLE_LOCK_BIT
314 bnel core_idle_lock_held
315 andc r15,r15,r7 /* Clear thread bit */
316
317 stwcx. r15,0,r14
318 bne- lwarx_loop_stop
319 isync
320
321 bl save_sprs_to_stack
322
323 IDLE_STATE_ENTER_SEQ_NORET(PPC_STOP)
324
325 _GLOBAL(power7_idle)
326 /* Now check if user or arch enabled NAP mode */
327 LOAD_REG_ADDRBASE(r3,powersave_nap)
328 lwz r4,ADDROFF(powersave_nap)(r3)
329 cmpwi 0,r4,0
330 beqlr
331 li r3, 1
332 /* fall through */
333
334 _GLOBAL(power7_nap)
335 mr r4,r3
336 li r3,PNV_THREAD_NAP
337 LOAD_REG_ADDR(r5, pnv_enter_arch207_idle_mode)
338 b pnv_powersave_common
339 /* No return */
340
341 _GLOBAL(power7_sleep)
342 li r3,PNV_THREAD_SLEEP
343 li r4,1
344 LOAD_REG_ADDR(r5, pnv_enter_arch207_idle_mode)
345 b pnv_powersave_common
346 /* No return */
347
348 _GLOBAL(power7_winkle)
349 li r3,PNV_THREAD_WINKLE
350 li r4,1
351 LOAD_REG_ADDR(r5, pnv_enter_arch207_idle_mode)
352 b pnv_powersave_common
353 /* No return */
354
355 #define CHECK_HMI_INTERRUPT \
356 mfspr r0,SPRN_SRR1; \
357 BEGIN_FTR_SECTION_NESTED(66); \
358 rlwinm r0,r0,45-31,0xf; /* extract wake reason field (P8) */ \
359 FTR_SECTION_ELSE_NESTED(66); \
360 rlwinm r0,r0,45-31,0xe; /* P7 wake reason field is 3 bits */ \
361 ALT_FTR_SECTION_END_NESTED_IFSET(CPU_FTR_ARCH_207S, 66); \
362 cmpwi r0,0xa; /* Hypervisor maintenance ? */ \
363 bne 20f; \
364 /* Invoke opal call to handle hmi */ \
365 ld r2,PACATOC(r13); \
366 ld r1,PACAR1(r13); \
367 std r3,ORIG_GPR3(r1); /* Save original r3 */ \
368 li r3,0; /* NULL argument */ \
369 bl hmi_exception_realmode; \
370 nop; \
371 ld r3,ORIG_GPR3(r1); /* Restore original r3 */ \
372 20: nop;
373
374 /*
375 * r3 - The PSSCR value corresponding to the stop state.
376 * r4 - The PSSCR mask corrresonding to the stop state.
377 */
378 _GLOBAL(power9_idle_stop)
379 mfspr r5,SPRN_PSSCR
380 andc r5,r5,r4
381 or r3,r3,r5
382 mtspr SPRN_PSSCR,r3
383 LOAD_REG_ADDR(r5,power_enter_stop)
384 li r4,1
385 b pnv_powersave_common
386 /* No return */
387
388
389 /*
390 * On waking up from stop 0,1,2 with ESL=1 on POWER9 DD1,
391 * HSPRG0 will be set to the HSPRG0 value of one of the
392 * threads in this core. Thus the value we have in r13
393 * may not be this thread's paca pointer.
394 *
395 * Fortunately, the TIR remains invariant. Since this thread's
396 * paca pointer is recorded in all its sibling's paca, we can
397 * correctly recover this thread's paca pointer if we
398 * know the index of this thread in the core.
399 *
400 * This index can be obtained from the TIR.
401 *
402 * i.e, thread's position in the core = TIR.
403 * If this value is i, then this thread's paca is
404 * paca->thread_sibling_pacas[i].
405 */
406 power9_dd1_recover_paca:
407 mfspr r4, SPRN_TIR
408 /*
409 * Since each entry in thread_sibling_pacas is 8 bytes
410 * we need to left-shift by 3 bits. Thus r4 = i * 8
411 */
412 sldi r4, r4, 3
413 /* Get &paca->thread_sibling_pacas[0] in r5 */
414 ld r5, PACA_SIBLING_PACA_PTRS(r13)
415 /* Load paca->thread_sibling_pacas[i] into r13 */
416 ldx r13, r4, r5
417 SET_PACA(r13)
418 ld r2, PACATOC(r13)
419 /*
420 * Indicate that we have lost NVGPR state
421 * which needs to be restored from the stack.
422 */
423 li r3, 1
424 stb r0,PACA_NAPSTATELOST(r13)
425 blr
426
427 /*
428 * Called from reset vector. Check whether we have woken up with
429 * hypervisor state loss. If yes, restore hypervisor state and return
430 * back to reset vector.
431 *
432 * r13 - Contents of HSPRG0
433 * cr3 - set to gt if waking up with partial/complete hypervisor state loss
434 */
435 _GLOBAL(pnv_restore_hyp_resource)
436 BEGIN_FTR_SECTION
437 BEGIN_FTR_SECTION_NESTED(70)
438 mflr r6
439 bl power9_dd1_recover_paca
440 mtlr r6
441 FTR_SECTION_ELSE_NESTED(70)
442 ld r2, PACATOC(r13)
443 ALT_FTR_SECTION_END_NESTED_IFSET(CPU_FTR_POWER9_DD1, 70)
444 /*
445 * Workaround for POWER9, if we lost resources, the ERAT
446 * might have been mixed up and needs flushing. We also need
447 * to reload MMCR0 (see comment above).
448 */
449 blt cr3,1f
450 PPC_INVALIDATE_ERAT
451 ld r1,PACAR1(r13)
452 ld r4,_MMCR0(r1)
453 mtspr SPRN_MMCR0,r4
454 1:
455 /*
456 * POWER ISA 3. Use PSSCR to determine if we
457 * are waking up from deep idle state
458 */
459 LOAD_REG_ADDRBASE(r5,pnv_first_deep_stop_state)
460 ld r4,ADDROFF(pnv_first_deep_stop_state)(r5)
461
462 mfspr r5,SPRN_PSSCR
463 /*
464 * 0-3 bits correspond to Power-Saving Level Status
465 * which indicates the idle state we are waking up from
466 */
467 rldicl r5,r5,4,60
468 cmpd cr4,r5,r4
469 bge cr4,pnv_wakeup_tb_loss
470 /*
471 * Waking up without hypervisor state loss. Return to
472 * reset vector
473 */
474 blr
475
476 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
477
478 /*
479 * POWER ISA 2.07 or less.
480 * Check if last bit of HSPGR0 is set. This indicates whether we are
481 * waking up from winkle.
482 */
483 clrldi r5,r13,63
484 clrrdi r13,r13,1
485
486 /* Now that we are sure r13 is corrected, load TOC */
487 ld r2,PACATOC(r13);
488 cmpwi cr4,r5,1
489 mtspr SPRN_HSPRG0,r13
490
491 lbz r0,PACA_THREAD_IDLE_STATE(r13)
492 cmpwi cr2,r0,PNV_THREAD_NAP
493 bgt cr2,pnv_wakeup_tb_loss /* Either sleep or Winkle */
494
495 /*
496 * We fall through here if PACA_THREAD_IDLE_STATE shows we are waking
497 * up from nap. At this stage CR3 shouldn't contains 'gt' since that
498 * indicates we are waking with hypervisor state loss from nap.
499 */
500 bgt cr3,.
501
502 blr /* Return back to System Reset vector from where
503 pnv_restore_hyp_resource was invoked */
504
505 /*
506 * Called if waking up from idle state which can cause either partial or
507 * complete hyp state loss.
508 * In POWER8, called if waking up from fastsleep or winkle
509 * In POWER9, called if waking up from stop state >= pnv_first_deep_stop_state
510 *
511 * r13 - PACA
512 * cr3 - gt if waking up with partial/complete hypervisor state loss
513 * cr4 - gt or eq if waking up from complete hypervisor state loss.
514 */
515 _GLOBAL(pnv_wakeup_tb_loss)
516 ld r1,PACAR1(r13)
517 /*
518 * Before entering any idle state, the NVGPRs are saved in the stack.
519 * If there was a state loss, or PACA_NAPSTATELOST was set, then the
520 * NVGPRs are restored. If we are here, it is likely that state is lost,
521 * but not guaranteed -- neither ISA207 nor ISA300 tests to reach
522 * here are the same as the test to restore NVGPRS:
523 * PACA_THREAD_IDLE_STATE test for ISA207, PSSCR test for ISA300,
524 * and SRR1 test for restoring NVGPRs.
525 *
526 * We are about to clobber NVGPRs now, so set NAPSTATELOST to
527 * guarantee they will always be restored. This might be tightened
528 * with careful reading of specs (particularly for ISA300) but this
529 * is already a slow wakeup path and it's simpler to be safe.
530 */
531 li r0,1
532 stb r0,PACA_NAPSTATELOST(r13)
533
534 /*
535 *
536 * Save SRR1 and LR in NVGPRs as they might be clobbered in
537 * opal_call() (called in CHECK_HMI_INTERRUPT). SRR1 is required
538 * to determine the wakeup reason if we branch to kvm_start_guest. LR
539 * is required to return back to reset vector after hypervisor state
540 * restore is complete.
541 */
542 mflr r17
543 mfspr r16,SPRN_SRR1
544 BEGIN_FTR_SECTION
545 CHECK_HMI_INTERRUPT
546 END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
547
548 lbz r7,PACA_THREAD_MASK(r13)
549 ld r14,PACA_CORE_IDLE_STATE_PTR(r13)
550 lwarx_loop2:
551 lwarx r15,0,r14
552 andi. r9,r15,PNV_CORE_IDLE_LOCK_BIT
553 /*
554 * Lock bit is set in one of the 2 cases-
555 * a. In the sleep/winkle enter path, the last thread is executing
556 * fastsleep workaround code.
557 * b. In the wake up path, another thread is executing fastsleep
558 * workaround undo code or resyncing timebase or restoring context
559 * In either case loop until the lock bit is cleared.
560 */
561 bnel core_idle_lock_held
562
563 cmpwi cr2,r15,0
564
565 /*
566 * At this stage
567 * cr2 - eq if first thread to wakeup in core
568 * cr3- gt if waking up with partial/complete hypervisor state loss
569 * cr4 - gt or eq if waking up from complete hypervisor state loss.
570 */
571
572 ori r15,r15,PNV_CORE_IDLE_LOCK_BIT
573 stwcx. r15,0,r14
574 bne- lwarx_loop2
575 isync
576
577 BEGIN_FTR_SECTION
578 lbz r4,PACA_SUBCORE_SIBLING_MASK(r13)
579 and r4,r4,r15
580 cmpwi r4,0 /* Check if first in subcore */
581
582 or r15,r15,r7 /* Set thread bit */
583 beq first_thread_in_subcore
584 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
585
586 or r15,r15,r7 /* Set thread bit */
587 beq cr2,first_thread_in_core
588
589 /* Not first thread in core or subcore to wake up */
590 b clear_lock
591
592 first_thread_in_subcore:
593 /*
594 * If waking up from sleep, subcore state is not lost. Hence
595 * skip subcore state restore
596 */
597 blt cr4,subcore_state_restored
598
599 /* Restore per-subcore state */
600 ld r4,_SDR1(r1)
601 mtspr SPRN_SDR1,r4
602
603 ld r4,_RPR(r1)
604 mtspr SPRN_RPR,r4
605 ld r4,_AMOR(r1)
606 mtspr SPRN_AMOR,r4
607
608 subcore_state_restored:
609 /*
610 * Check if the thread is also the first thread in the core. If not,
611 * skip to clear_lock.
612 */
613 bne cr2,clear_lock
614
615 first_thread_in_core:
616
617 /*
618 * First thread in the core waking up from any state which can cause
619 * partial or complete hypervisor state loss. It needs to
620 * call the fastsleep workaround code if the platform requires it.
621 * Call it unconditionally here. The below branch instruction will
622 * be patched out if the platform does not have fastsleep or does not
623 * require the workaround. Patching will be performed during the
624 * discovery of idle-states.
625 */
626 .global pnv_fastsleep_workaround_at_exit
627 pnv_fastsleep_workaround_at_exit:
628 b fastsleep_workaround_at_exit
629
630 timebase_resync:
631 /*
632 * Use cr3 which indicates that we are waking up with atleast partial
633 * hypervisor state loss to determine if TIMEBASE RESYNC is needed.
634 */
635 ble cr3,clear_lock
636 /* Time base re-sync */
637 bl opal_resync_timebase;
638 /*
639 * If waking up from sleep, per core state is not lost, skip to
640 * clear_lock.
641 */
642 blt cr4,clear_lock
643
644 /*
645 * First thread in the core to wake up and its waking up with
646 * complete hypervisor state loss. Restore per core hypervisor
647 * state.
648 */
649 BEGIN_FTR_SECTION
650 ld r4,_PTCR(r1)
651 mtspr SPRN_PTCR,r4
652 ld r4,_RPR(r1)
653 mtspr SPRN_RPR,r4
654 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
655
656 ld r4,_TSCR(r1)
657 mtspr SPRN_TSCR,r4
658 ld r4,_WORC(r1)
659 mtspr SPRN_WORC,r4
660
661 clear_lock:
662 andi. r15,r15,PNV_CORE_IDLE_THREAD_BITS
663 lwsync
664 stw r15,0(r14)
665
666 common_exit:
667 /*
668 * Common to all threads.
669 *
670 * If waking up from sleep, hypervisor state is not lost. Hence
671 * skip hypervisor state restore.
672 */
673 blt cr4,hypervisor_state_restored
674
675 /* Waking up from winkle */
676
677 BEGIN_MMU_FTR_SECTION
678 b no_segments
679 END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
680 /* Restore SLB from PACA */
681 ld r8,PACA_SLBSHADOWPTR(r13)
682
683 .rept SLB_NUM_BOLTED
684 li r3, SLBSHADOW_SAVEAREA
685 LDX_BE r5, r8, r3
686 addi r3, r3, 8
687 LDX_BE r6, r8, r3
688 andis. r7,r5,SLB_ESID_V@h
689 beq 1f
690 slbmte r6,r5
691 1: addi r8,r8,16
692 .endr
693 no_segments:
694
695 /* Restore per thread state */
696
697 ld r4,_SPURR(r1)
698 mtspr SPRN_SPURR,r4
699 ld r4,_PURR(r1)
700 mtspr SPRN_PURR,r4
701 ld r4,_DSCR(r1)
702 mtspr SPRN_DSCR,r4
703 ld r4,_WORT(r1)
704 mtspr SPRN_WORT,r4
705
706 /* Call cur_cpu_spec->cpu_restore() */
707 LOAD_REG_ADDR(r4, cur_cpu_spec)
708 ld r4,0(r4)
709 ld r12,CPU_SPEC_RESTORE(r4)
710 #ifdef PPC64_ELF_ABI_v1
711 ld r12,0(r12)
712 #endif
713 mtctr r12
714 bctrl
715
716 hypervisor_state_restored:
717
718 mtspr SPRN_SRR1,r16
719 mtlr r17
720 blr /* Return back to System Reset vector from where
721 pnv_restore_hyp_resource was invoked */
722
723 fastsleep_workaround_at_exit:
724 li r3,1
725 li r4,0
726 bl opal_config_cpu_idle_state
727 b timebase_resync
728
729 /*
730 * R3 here contains the value that will be returned to the caller
731 * of power7_nap.
732 */
733 _GLOBAL(pnv_wakeup_loss)
734 ld r1,PACAR1(r13)
735 BEGIN_FTR_SECTION
736 CHECK_HMI_INTERRUPT
737 END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
738 REST_NVGPRS(r1)
739 REST_GPR(2, r1)
740 ld r6,_CCR(r1)
741 ld r4,_MSR(r1)
742 ld r5,_NIP(r1)
743 addi r1,r1,INT_FRAME_SIZE
744 mtcr r6
745 mtspr SPRN_SRR1,r4
746 mtspr SPRN_SRR0,r5
747 rfid
748
749 /*
750 * R3 here contains the value that will be returned to the caller
751 * of power7_nap.
752 */
753 _GLOBAL(pnv_wakeup_noloss)
754 lbz r0,PACA_NAPSTATELOST(r13)
755 cmpwi r0,0
756 bne pnv_wakeup_loss
757 BEGIN_FTR_SECTION
758 CHECK_HMI_INTERRUPT
759 END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
760 ld r1,PACAR1(r13)
761 ld r6,_CCR(r1)
762 ld r4,_MSR(r1)
763 ld r5,_NIP(r1)
764 addi r1,r1,INT_FRAME_SIZE
765 mtcr r6
766 mtspr SPRN_SRR1,r4
767 mtspr SPRN_SRR0,r5
768 rfid