]>
Commit | Line | Data |
---|---|---|
948cf67c | 1 | /* |
bcef83a0 SP |
2 | * This file contains idle entry/exit functions for POWER7, |
3 | * POWER8 and POWER9 CPUs. | |
948cf67c BH |
4 | * |
5 | * This program is free software; you can redistribute it and/or | |
6 | * modify it under the terms of the GNU General Public License | |
7 | * as published by the Free Software Foundation; either version | |
8 | * 2 of the License, or (at your option) any later version. | |
9 | */ | |
10 | ||
11 | #include <linux/threads.h> | |
12 | #include <asm/processor.h> | |
13 | #include <asm/page.h> | |
14 | #include <asm/cputable.h> | |
15 | #include <asm/thread_info.h> | |
16 | #include <asm/ppc_asm.h> | |
17 | #include <asm/asm-offsets.h> | |
18 | #include <asm/ppc-opcode.h> | |
7230c564 | 19 | #include <asm/hw_irq.h> |
f0888f70 | 20 | #include <asm/kvm_book3s_asm.h> |
97eb001f | 21 | #include <asm/opal.h> |
7cba160a | 22 | #include <asm/cpuidle.h> |
f64e8084 | 23 | #include <asm/book3s/64/mmu-hash.h> |
bcef83a0 | 24 | #include <asm/mmu.h> |
948cf67c BH |
25 | |
26 | #undef DEBUG | |
27 | ||
77b54e9f SP |
28 | /* |
29 | * Use unused space in the interrupt stack to save and restore | |
30 | * registers for winkle support. | |
31 | */ | |
32 | #define _SDR1 GPR3 | |
33 | #define _RPR GPR4 | |
34 | #define _SPURR GPR5 | |
35 | #define _PURR GPR6 | |
36 | #define _TSCR GPR7 | |
37 | #define _DSCR GPR8 | |
38 | #define _AMOR GPR9 | |
39 | #define _WORT GPR10 | |
40 | #define _WORC GPR11 | |
bcef83a0 SP |
41 | #define _PTCR GPR12 |
42 | ||
43 | #define PSSCR_HV_TEMPLATE PSSCR_ESL | PSSCR_EC | \ | |
44 | PSSCR_PSLL_MASK | PSSCR_TR_MASK | \ | |
45 | PSSCR_MTL_MASK | |
77b54e9f | 46 | |
aca79d2b VS |
47 | .text |
48 | ||
0dfffb48 SP |
49 | /* |
50 | * Used by threads before entering deep idle states. Saves SPRs | |
51 | * in interrupt stack frame | |
52 | */ | |
53 | save_sprs_to_stack: | |
54 | /* | |
55 | * Note all register i.e per-core, per-subcore or per-thread is saved | |
56 | * here since any thread in the core might wake up first | |
57 | */ | |
bcef83a0 SP |
58 | BEGIN_FTR_SECTION |
59 | mfspr r3,SPRN_PTCR | |
60 | std r3,_PTCR(r1) | |
61 | /* | |
62 | * Note - SDR1 is dropped in Power ISA v3. Hence not restoring | |
63 | * SDR1 here | |
64 | */ | |
65 | FTR_SECTION_ELSE | |
0dfffb48 SP |
66 | mfspr r3,SPRN_SDR1 |
67 | std r3,_SDR1(r1) | |
bcef83a0 | 68 | ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300) |
0dfffb48 SP |
69 | mfspr r3,SPRN_RPR |
70 | std r3,_RPR(r1) | |
71 | mfspr r3,SPRN_SPURR | |
72 | std r3,_SPURR(r1) | |
73 | mfspr r3,SPRN_PURR | |
74 | std r3,_PURR(r1) | |
75 | mfspr r3,SPRN_TSCR | |
76 | std r3,_TSCR(r1) | |
77 | mfspr r3,SPRN_DSCR | |
78 | std r3,_DSCR(r1) | |
79 | mfspr r3,SPRN_AMOR | |
80 | std r3,_AMOR(r1) | |
81 | mfspr r3,SPRN_WORT | |
82 | std r3,_WORT(r1) | |
83 | mfspr r3,SPRN_WORC | |
84 | std r3,_WORC(r1) | |
85 | ||
86 | blr | |
87 | ||
b32aadc1 SP |
88 | /* |
89 | * Used by threads when the lock bit of core_idle_state is set. | |
90 | * Threads will spin in HMT_LOW until the lock bit is cleared. | |
91 | * r14 - pointer to core_idle_state | |
92 | * r15 - used to load contents of core_idle_state | |
09b7e37b | 93 | * r9 - used as a temporary variable |
b32aadc1 SP |
94 | */ |
95 | ||
96 | core_idle_lock_held: | |
97 | HMT_LOW | |
98 | 3: lwz r15,0(r14) | |
99 | andi. r15,r15,PNV_CORE_IDLE_LOCK_BIT | |
100 | bne 3b | |
101 | HMT_MEDIUM | |
102 | lwarx r15,0,r14 | |
09b7e37b PM |
103 | andi. r9,r15,PNV_CORE_IDLE_LOCK_BIT |
104 | bne core_idle_lock_held | |
b32aadc1 SP |
105 | blr |
106 | ||
aca79d2b VS |
107 | /* |
108 | * Pass requested state in r3: | |
bcef83a0 SP |
109 | * r3 - PNV_THREAD_NAP/SLEEP/WINKLE in POWER8 |
110 | * - Requested STOP state in POWER9 | |
8d6f7c5a ME |
111 | * |
112 | * To check IRQ_HAPPENED in r4 | |
113 | * 0 - don't check | |
114 | * 1 - check | |
4eae2c9a SP |
115 | * |
116 | * Address to 'rfid' to in r5 | |
aca79d2b | 117 | */ |
5fa6b6bd | 118 | _GLOBAL(pnv_powersave_common) |
aca79d2b | 119 | /* Use r3 to pass state nap/sleep/winkle */ |
948cf67c BH |
120 | /* NAP is a state loss, we create a regs frame on the |
121 | * stack, fill it up with the state we care about and | |
122 | * stick a pointer to it in PACAR1. We really only | |
123 | * need to save PC, some CR bits and the NV GPRs, | |
124 | * but for now an interrupt frame will do. | |
125 | */ | |
126 | mflr r0 | |
127 | std r0,16(r1) | |
128 | stdu r1,-INT_FRAME_SIZE(r1) | |
129 | std r0,_LINK(r1) | |
130 | std r0,_NIP(r1) | |
131 | ||
948cf67c BH |
132 | /* Hard disable interrupts */ |
133 | mfmsr r9 | |
134 | rldicl r9,r9,48,1 | |
135 | rotldi r9,r9,16 | |
136 | mtmsrd r9,1 /* hard-disable interrupts */ | |
7230c564 BH |
137 | |
138 | /* Check if something happened while soft-disabled */ | |
139 | lbz r0,PACAIRQHAPPENED(r13) | |
d6a4f709 | 140 | andi. r0,r0,~PACA_IRQ_HARD_DIS@l |
7230c564 | 141 | beq 1f |
8d6f7c5a ME |
142 | cmpwi cr0,r4,0 |
143 | beq 1f | |
7230c564 BH |
144 | addi r1,r1,INT_FRAME_SIZE |
145 | ld r0,16(r1) | |
f57333a7 | 146 | li r3,0 /* Return 0 (no nap) */ |
7230c564 BH |
147 | mtlr r0 |
148 | blr | |
149 | ||
150 | 1: /* We mark irqs hard disabled as this is the state we'll | |
151 | * be in when returning and we need to tell arch_local_irq_restore() | |
152 | * about it | |
153 | */ | |
154 | li r0,PACA_IRQ_HARD_DIS | |
155 | stb r0,PACAIRQHAPPENED(r13) | |
156 | ||
157 | /* We haven't lost state ... yet */ | |
948cf67c | 158 | li r0,0 |
2fde6d20 | 159 | stb r0,PACA_NAPSTATELOST(r13) |
948cf67c BH |
160 | |
161 | /* Continue saving state */ | |
162 | SAVE_GPR(2, r1) | |
163 | SAVE_NVGPRS(r1) | |
aca79d2b VS |
164 | mfcr r4 |
165 | std r4,_CCR(r1) | |
948cf67c BH |
166 | std r9,_MSR(r1) |
167 | std r1,PACAR1(r13) | |
168 | ||
8117ac6a PM |
169 | /* |
170 | * Go to real mode to do the nap, as required by the architecture. | |
171 | * Also, we need to be in real mode before setting hwthread_state, | |
172 | * because as soon as we do that, another thread can switch | |
173 | * the MMU context to the guest. | |
174 | */ | |
4eae2c9a | 175 | LOAD_REG_IMMEDIATE(r7, MSR_IDLE) |
8117ac6a PM |
176 | li r6, MSR_RI |
177 | andc r6, r9, r6 | |
8117ac6a | 178 | mtmsrd r6, 1 /* clear RI before setting SRR0/1 */ |
4eae2c9a SP |
179 | mtspr SPRN_SRR0, r5 |
180 | mtspr SPRN_SRR1, r7 | |
8117ac6a PM |
181 | rfid |
182 | ||
5fa6b6bd SP |
183 | .globl pnv_enter_arch207_idle_mode |
184 | pnv_enter_arch207_idle_mode: | |
56c46222 PM |
185 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE |
186 | /* Tell KVM we're entering idle */ | |
187 | li r4,KVM_HWTHREAD_IN_IDLE | |
188 | /******************************************************/ | |
189 | /* N O T E W E L L ! ! ! N O T E W E L L */ | |
190 | /* The following store to HSTATE_HWTHREAD_STATE(r13) */ | |
191 | /* MUST occur in real mode, i.e. with the MMU off, */ | |
192 | /* and the MMU must stay off until we clear this flag */ | |
193 | /* and test HSTATE_HWTHREAD_REQ(r13) in the system */ | |
194 | /* reset interrupt vector in exceptions-64s.S. */ | |
195 | /* The reason is that another thread can switch the */ | |
196 | /* MMU to a guest context whenever this flag is set */ | |
197 | /* to KVM_HWTHREAD_IN_IDLE, and if the MMU was on, */ | |
198 | /* that would potentially cause this thread to start */ | |
199 | /* executing instructions from guest memory in */ | |
200 | /* hypervisor mode, leading to a host crash or data */ | |
201 | /* corruption, or worse. */ | |
202 | /******************************************************/ | |
203 | stb r4,HSTATE_HWTHREAD_STATE(r13) | |
204 | #endif | |
7cba160a | 205 | stb r3,PACA_THREAD_IDLE_STATE(r13) |
77b54e9f SP |
206 | cmpwi cr3,r3,PNV_THREAD_SLEEP |
207 | bge cr3,2f | |
aca79d2b VS |
208 | IDLE_STATE_ENTER_SEQ(PPC_NAP) |
209 | /* No return */ | |
7cba160a SP |
210 | 2: |
211 | /* Sleep or winkle */ | |
212 | lbz r7,PACA_THREAD_MASK(r13) | |
213 | ld r14,PACA_CORE_IDLE_STATE_PTR(r13) | |
214 | lwarx_loop1: | |
215 | lwarx r15,0,r14 | |
b32aadc1 SP |
216 | |
217 | andi. r9,r15,PNV_CORE_IDLE_LOCK_BIT | |
218 | bnel core_idle_lock_held | |
219 | ||
7cba160a SP |
220 | andc r15,r15,r7 /* Clear thread bit */ |
221 | ||
222 | andi. r15,r15,PNV_CORE_IDLE_THREAD_BITS | |
223 | ||
224 | /* | |
225 | * If cr0 = 0, then current thread is the last thread of the core entering | |
226 | * sleep. Last thread needs to execute the hardware bug workaround code if | |
227 | * required by the platform. | |
228 | * Make the workaround call unconditionally here. The below branch call is | |
229 | * patched out when the idle states are discovered if the platform does not | |
230 | * require it. | |
231 | */ | |
232 | .global pnv_fastsleep_workaround_at_entry | |
233 | pnv_fastsleep_workaround_at_entry: | |
234 | beq fastsleep_workaround_at_entry | |
235 | ||
236 | stwcx. r15,0,r14 | |
237 | bne- lwarx_loop1 | |
238 | isync | |
239 | ||
77b54e9f SP |
240 | common_enter: /* common code for all the threads entering sleep or winkle */ |
241 | bgt cr3,enter_winkle | |
7cba160a SP |
242 | IDLE_STATE_ENTER_SEQ(PPC_SLEEP) |
243 | ||
244 | fastsleep_workaround_at_entry: | |
245 | ori r15,r15,PNV_CORE_IDLE_LOCK_BIT | |
246 | stwcx. r15,0,r14 | |
247 | bne- lwarx_loop1 | |
248 | isync | |
249 | ||
250 | /* Fast sleep workaround */ | |
251 | li r3,1 | |
252 | li r4,1 | |
69c592ed | 253 | bl opal_rm_config_cpu_idle_state |
7cba160a SP |
254 | |
255 | /* Clear Lock bit */ | |
256 | li r0,0 | |
257 | lwsync | |
258 | stw r0,0(r14) | |
259 | b common_enter | |
260 | ||
77b54e9f | 261 | enter_winkle: |
0dfffb48 SP |
262 | bl save_sprs_to_stack |
263 | ||
77b54e9f | 264 | IDLE_STATE_ENTER_SEQ(PPC_WINKLE) |
f0888f70 | 265 | |
bcef83a0 SP |
266 | /* |
267 | * r3 - requested stop state | |
268 | */ | |
269 | power_enter_stop: | |
56c46222 PM |
270 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE |
271 | /* Tell KVM we're entering idle */ | |
272 | li r4,KVM_HWTHREAD_IN_IDLE | |
273 | /* DO THIS IN REAL MODE! See comment above. */ | |
274 | stb r4,HSTATE_HWTHREAD_STATE(r13) | |
275 | #endif | |
bcef83a0 SP |
276 | /* |
277 | * Check if the requested state is a deep idle state. | |
278 | */ | |
279 | LOAD_REG_ADDRBASE(r5,pnv_first_deep_stop_state) | |
280 | ld r4,ADDROFF(pnv_first_deep_stop_state)(r5) | |
281 | cmpd r3,r4 | |
282 | bge 2f | |
283 | IDLE_STATE_ENTER_SEQ(PPC_STOP) | |
284 | 2: | |
285 | /* | |
286 | * Entering deep idle state. | |
287 | * Clear thread bit in PACA_CORE_IDLE_STATE, save SPRs to | |
288 | * stack and enter stop | |
289 | */ | |
290 | lbz r7,PACA_THREAD_MASK(r13) | |
291 | ld r14,PACA_CORE_IDLE_STATE_PTR(r13) | |
292 | ||
293 | lwarx_loop_stop: | |
294 | lwarx r15,0,r14 | |
295 | andi. r9,r15,PNV_CORE_IDLE_LOCK_BIT | |
296 | bnel core_idle_lock_held | |
297 | andc r15,r15,r7 /* Clear thread bit */ | |
298 | ||
299 | stwcx. r15,0,r14 | |
300 | bne- lwarx_loop_stop | |
301 | isync | |
302 | ||
303 | bl save_sprs_to_stack | |
304 | ||
305 | IDLE_STATE_ENTER_SEQ(PPC_STOP) | |
306 | ||
aca79d2b VS |
307 | _GLOBAL(power7_idle) |
308 | /* Now check if user or arch enabled NAP mode */ | |
309 | LOAD_REG_ADDRBASE(r3,powersave_nap) | |
310 | lwz r4,ADDROFF(powersave_nap)(r3) | |
311 | cmpwi 0,r4,0 | |
312 | beqlr | |
8d6f7c5a | 313 | li r3, 1 |
aca79d2b VS |
314 | /* fall through */ |
315 | ||
316 | _GLOBAL(power7_nap) | |
8d6f7c5a | 317 | mr r4,r3 |
7cba160a | 318 | li r3,PNV_THREAD_NAP |
4eae2c9a | 319 | LOAD_REG_ADDR(r5, pnv_enter_arch207_idle_mode) |
5fa6b6bd | 320 | b pnv_powersave_common |
aca79d2b VS |
321 | /* No return */ |
322 | ||
323 | _GLOBAL(power7_sleep) | |
7cba160a | 324 | li r3,PNV_THREAD_SLEEP |
c733cf83 | 325 | li r4,1 |
4eae2c9a | 326 | LOAD_REG_ADDR(r5, pnv_enter_arch207_idle_mode) |
5fa6b6bd | 327 | b pnv_powersave_common |
aca79d2b | 328 | /* No return */ |
948cf67c | 329 | |
77b54e9f | 330 | _GLOBAL(power7_winkle) |
bfd1b7ae | 331 | li r3,PNV_THREAD_WINKLE |
77b54e9f | 332 | li r4,1 |
4eae2c9a | 333 | LOAD_REG_ADDR(r5, pnv_enter_arch207_idle_mode) |
5fa6b6bd | 334 | b pnv_powersave_common |
77b54e9f SP |
335 | /* No return */ |
336 | ||
bbdb760d MS |
337 | #define CHECK_HMI_INTERRUPT \ |
338 | mfspr r0,SPRN_SRR1; \ | |
339 | BEGIN_FTR_SECTION_NESTED(66); \ | |
340 | rlwinm r0,r0,45-31,0xf; /* extract wake reason field (P8) */ \ | |
341 | FTR_SECTION_ELSE_NESTED(66); \ | |
342 | rlwinm r0,r0,45-31,0xe; /* P7 wake reason field is 3 bits */ \ | |
343 | ALT_FTR_SECTION_END_NESTED_IFSET(CPU_FTR_ARCH_207S, 66); \ | |
344 | cmpwi r0,0xa; /* Hypervisor maintenance ? */ \ | |
345 | bne 20f; \ | |
346 | /* Invoke opal call to handle hmi */ \ | |
347 | ld r2,PACATOC(r13); \ | |
348 | ld r1,PACAR1(r13); \ | |
349 | std r3,ORIG_GPR3(r1); /* Save original r3 */ \ | |
fd7bacbc MS |
350 | li r3,0; /* NULL argument */ \ |
351 | bl hmi_exception_realmode; \ | |
352 | nop; \ | |
bbdb760d MS |
353 | ld r3,ORIG_GPR3(r1); /* Restore original r3 */ \ |
354 | 20: nop; | |
355 | ||
356 | ||
bcef83a0 SP |
357 | /* |
358 | * r3 - requested stop state | |
359 | */ | |
360 | _GLOBAL(power9_idle_stop) | |
361 | LOAD_REG_IMMEDIATE(r4, PSSCR_HV_TEMPLATE) | |
362 | or r4,r4,r3 | |
363 | mtspr SPRN_PSSCR, r4 | |
364 | li r4, 1 | |
365 | LOAD_REG_ADDR(r5,power_enter_stop) | |
366 | b pnv_powersave_common | |
367 | /* No return */ | |
17065671 SP |
368 | /* |
369 | * Called from reset vector. Check whether we have woken up with | |
370 | * hypervisor state loss. If yes, restore hypervisor state and return | |
371 | * back to reset vector. | |
372 | * | |
373 | * r13 - Contents of HSPRG0 | |
374 | * cr3 - set to gt if waking up with partial/complete hypervisor state loss | |
375 | */ | |
5fa6b6bd | 376 | _GLOBAL(pnv_restore_hyp_resource) |
bcef83a0 | 377 | BEGIN_FTR_SECTION |
e325d76f | 378 | ld r2,PACATOC(r13); |
bcef83a0 SP |
379 | /* |
380 | * POWER ISA 3. Use PSSCR to determine if we | |
381 | * are waking up from deep idle state | |
382 | */ | |
383 | LOAD_REG_ADDRBASE(r5,pnv_first_deep_stop_state) | |
384 | ld r4,ADDROFF(pnv_first_deep_stop_state)(r5) | |
385 | ||
386 | mfspr r5,SPRN_PSSCR | |
17065671 | 387 | /* |
bcef83a0 SP |
388 | * 0-3 bits correspond to Power-Saving Level Status |
389 | * which indicates the idle state we are waking up from | |
390 | */ | |
391 | rldicl r5,r5,4,60 | |
392 | cmpd cr4,r5,r4 | |
393 | bge cr4,pnv_wakeup_tb_loss | |
394 | /* | |
395 | * Waking up without hypervisor state loss. Return to | |
396 | * reset vector | |
397 | */ | |
398 | blr | |
399 | ||
400 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) | |
401 | ||
402 | /* | |
403 | * POWER ISA 2.07 or less. | |
17065671 SP |
404 | * Check if last bit of HSPGR0 is set. This indicates whether we are |
405 | * waking up from winkle. | |
406 | */ | |
407 | clrldi r5,r13,63 | |
408 | clrrdi r13,r13,1 | |
e325d76f MS |
409 | |
410 | /* Now that we are sure r13 is corrected, load TOC */ | |
411 | ld r2,PACATOC(r13); | |
17065671 SP |
412 | cmpwi cr4,r5,1 |
413 | mtspr SPRN_HSPRG0,r13 | |
414 | ||
415 | lbz r0,PACA_THREAD_IDLE_STATE(r13) | |
416 | cmpwi cr2,r0,PNV_THREAD_NAP | |
5fa6b6bd | 417 | bgt cr2,pnv_wakeup_tb_loss /* Either sleep or Winkle */ |
17065671 SP |
418 | |
419 | /* | |
420 | * We fall through here if PACA_THREAD_IDLE_STATE shows we are waking | |
421 | * up from nap. At this stage CR3 shouldn't contains 'gt' since that | |
422 | * indicates we are waking with hypervisor state loss from nap. | |
423 | */ | |
424 | bgt cr3,. | |
425 | ||
426 | blr /* Return back to System Reset vector from where | |
5fa6b6bd | 427 | pnv_restore_hyp_resource was invoked */ |
17065671 | 428 | |
bcef83a0 SP |
429 | /* |
430 | * Called if waking up from idle state which can cause either partial or | |
431 | * complete hyp state loss. | |
432 | * In POWER8, called if waking up from fastsleep or winkle | |
433 | * In POWER9, called if waking up from stop state >= pnv_first_deep_stop_state | |
434 | * | |
435 | * r13 - PACA | |
436 | * cr3 - gt if waking up with partial/complete hypervisor state loss | |
bd00a240 | 437 | * cr4 - gt or eq if waking up from complete hypervisor state loss. |
bcef83a0 | 438 | */ |
5fa6b6bd | 439 | _GLOBAL(pnv_wakeup_tb_loss) |
97eb001f | 440 | ld r1,PACAR1(r13) |
7cba160a SP |
441 | /* |
442 | * Before entering any idle state, the NVGPRs are saved in the stack | |
443 | * and they are restored before switching to the process context. Hence | |
444 | * until they are restored, they are free to be used. | |
445 | * | |
17065671 | 446 | * Save SRR1 and LR in NVGPRs as they might be clobbered in |
69c592ed | 447 | * opal_call() (called in CHECK_HMI_INTERRUPT). SRR1 is required |
17065671 SP |
448 | * to determine the wakeup reason if we branch to kvm_start_guest. LR |
449 | * is required to return back to reset vector after hypervisor state | |
450 | * restore is complete. | |
7cba160a | 451 | */ |
17065671 | 452 | mflr r17 |
7cba160a | 453 | mfspr r16,SPRN_SRR1 |
bbdb760d MS |
454 | BEGIN_FTR_SECTION |
455 | CHECK_HMI_INTERRUPT | |
456 | END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) | |
7cba160a SP |
457 | |
458 | lbz r7,PACA_THREAD_MASK(r13) | |
459 | ld r14,PACA_CORE_IDLE_STATE_PTR(r13) | |
460 | lwarx_loop2: | |
461 | lwarx r15,0,r14 | |
462 | andi. r9,r15,PNV_CORE_IDLE_LOCK_BIT | |
463 | /* | |
464 | * Lock bit is set in one of the 2 cases- | |
465 | * a. In the sleep/winkle enter path, the last thread is executing | |
466 | * fastsleep workaround code. | |
467 | * b. In the wake up path, another thread is executing fastsleep | |
468 | * workaround undo code or resyncing timebase or restoring context | |
469 | * In either case loop until the lock bit is cleared. | |
470 | */ | |
b32aadc1 | 471 | bnel core_idle_lock_held |
7cba160a SP |
472 | |
473 | cmpwi cr2,r15,0 | |
77b54e9f SP |
474 | |
475 | /* | |
476 | * At this stage | |
bcef83a0 SP |
477 | * cr2 - eq if first thread to wakeup in core |
478 | * cr3- gt if waking up with partial/complete hypervisor state loss | |
bd00a240 | 479 | * cr4 - gt or eq if waking up from complete hypervisor state loss. |
77b54e9f SP |
480 | */ |
481 | ||
7cba160a SP |
482 | ori r15,r15,PNV_CORE_IDLE_LOCK_BIT |
483 | stwcx. r15,0,r14 | |
484 | bne- lwarx_loop2 | |
485 | isync | |
486 | ||
bcef83a0 SP |
487 | BEGIN_FTR_SECTION |
488 | lbz r4,PACA_SUBCORE_SIBLING_MASK(r13) | |
489 | and r4,r4,r15 | |
490 | cmpwi r4,0 /* Check if first in subcore */ | |
491 | ||
492 | or r15,r15,r7 /* Set thread bit */ | |
493 | beq first_thread_in_subcore | |
494 | END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) | |
495 | ||
496 | or r15,r15,r7 /* Set thread bit */ | |
497 | beq cr2,first_thread_in_core | |
498 | ||
499 | /* Not first thread in core or subcore to wake up */ | |
500 | b clear_lock | |
501 | ||
502 | first_thread_in_subcore: | |
77b54e9f SP |
503 | /* |
504 | * If waking up from sleep, subcore state is not lost. Hence | |
505 | * skip subcore state restore | |
506 | */ | |
bd00a240 | 507 | blt cr4,subcore_state_restored |
77b54e9f SP |
508 | |
509 | /* Restore per-subcore state */ | |
510 | ld r4,_SDR1(r1) | |
511 | mtspr SPRN_SDR1,r4 | |
bcef83a0 | 512 | |
77b54e9f SP |
513 | ld r4,_RPR(r1) |
514 | mtspr SPRN_RPR,r4 | |
515 | ld r4,_AMOR(r1) | |
516 | mtspr SPRN_AMOR,r4 | |
517 | ||
518 | subcore_state_restored: | |
519 | /* | |
520 | * Check if the thread is also the first thread in the core. If not, | |
521 | * skip to clear_lock. | |
522 | */ | |
523 | bne cr2,clear_lock | |
524 | ||
525 | first_thread_in_core: | |
526 | ||
7cba160a | 527 | /* |
bcef83a0 SP |
528 | * First thread in the core waking up from any state which can cause |
529 | * partial or complete hypervisor state loss. It needs to | |
7cba160a SP |
530 | * call the fastsleep workaround code if the platform requires it. |
531 | * Call it unconditionally here. The below branch instruction will | |
bcef83a0 SP |
532 | * be patched out if the platform does not have fastsleep or does not |
533 | * require the workaround. Patching will be performed during the | |
534 | * discovery of idle-states. | |
7cba160a SP |
535 | */ |
536 | .global pnv_fastsleep_workaround_at_exit | |
537 | pnv_fastsleep_workaround_at_exit: | |
538 | b fastsleep_workaround_at_exit | |
539 | ||
540 | timebase_resync: | |
bcef83a0 SP |
541 | /* |
542 | * Use cr3 which indicates that we are waking up with atleast partial | |
543 | * hypervisor state loss to determine if TIMEBASE RESYNC is needed. | |
544 | */ | |
7cba160a | 545 | ble cr3,clear_lock |
97eb001f | 546 | /* Time base re-sync */ |
69c592ed | 547 | bl opal_rm_resync_timebase; |
77b54e9f SP |
548 | /* |
549 | * If waking up from sleep, per core state is not lost, skip to | |
550 | * clear_lock. | |
551 | */ | |
bd00a240 | 552 | blt cr4,clear_lock |
77b54e9f | 553 | |
bcef83a0 SP |
554 | /* |
555 | * First thread in the core to wake up and its waking up with | |
556 | * complete hypervisor state loss. Restore per core hypervisor | |
557 | * state. | |
558 | */ | |
559 | BEGIN_FTR_SECTION | |
560 | ld r4,_PTCR(r1) | |
561 | mtspr SPRN_PTCR,r4 | |
562 | ld r4,_RPR(r1) | |
563 | mtspr SPRN_RPR,r4 | |
564 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) | |
565 | ||
77b54e9f SP |
566 | ld r4,_TSCR(r1) |
567 | mtspr SPRN_TSCR,r4 | |
568 | ld r4,_WORC(r1) | |
569 | mtspr SPRN_WORC,r4 | |
570 | ||
7cba160a SP |
571 | clear_lock: |
572 | andi. r15,r15,PNV_CORE_IDLE_THREAD_BITS | |
573 | lwsync | |
574 | stw r15,0(r14) | |
575 | ||
576 | common_exit: | |
77b54e9f SP |
577 | /* |
578 | * Common to all threads. | |
579 | * | |
580 | * If waking up from sleep, hypervisor state is not lost. Hence | |
581 | * skip hypervisor state restore. | |
582 | */ | |
bd00a240 | 583 | blt cr4,hypervisor_state_restored |
77b54e9f SP |
584 | |
585 | /* Waking up from winkle */ | |
586 | ||
bcef83a0 SP |
587 | BEGIN_MMU_FTR_SECTION |
588 | b no_segments | |
5a25b6f5 | 589 | END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX) |
77b54e9f SP |
590 | /* Restore SLB from PACA */ |
591 | ld r8,PACA_SLBSHADOWPTR(r13) | |
592 | ||
593 | .rept SLB_NUM_BOLTED | |
594 | li r3, SLBSHADOW_SAVEAREA | |
595 | LDX_BE r5, r8, r3 | |
596 | addi r3, r3, 8 | |
597 | LDX_BE r6, r8, r3 | |
598 | andis. r7,r5,SLB_ESID_V@h | |
599 | beq 1f | |
600 | slbmte r6,r5 | |
601 | 1: addi r8,r8,16 | |
602 | .endr | |
bcef83a0 SP |
603 | no_segments: |
604 | ||
605 | /* Restore per thread state */ | |
77b54e9f SP |
606 | |
607 | ld r4,_SPURR(r1) | |
608 | mtspr SPRN_SPURR,r4 | |
609 | ld r4,_PURR(r1) | |
610 | mtspr SPRN_PURR,r4 | |
611 | ld r4,_DSCR(r1) | |
612 | mtspr SPRN_DSCR,r4 | |
613 | ld r4,_WORT(r1) | |
614 | mtspr SPRN_WORT,r4 | |
615 | ||
bcef83a0 SP |
616 | /* Call cur_cpu_spec->cpu_restore() */ |
617 | LOAD_REG_ADDR(r4, cur_cpu_spec) | |
618 | ld r4,0(r4) | |
619 | ld r12,CPU_SPEC_RESTORE(r4) | |
620 | #ifdef PPC64_ELF_ABI_v1 | |
621 | ld r12,0(r12) | |
622 | #endif | |
623 | mtctr r12 | |
624 | bctrl | |
625 | ||
77b54e9f SP |
626 | hypervisor_state_restored: |
627 | ||
7cba160a | 628 | mtspr SPRN_SRR1,r16 |
17065671 SP |
629 | mtlr r17 |
630 | blr /* Return back to System Reset vector from where | |
5fa6b6bd | 631 | pnv_restore_hyp_resource was invoked */ |
97eb001f | 632 | |
7cba160a SP |
633 | fastsleep_workaround_at_exit: |
634 | li r3,1 | |
635 | li r4,0 | |
69c592ed | 636 | bl opal_rm_config_cpu_idle_state |
7cba160a SP |
637 | b timebase_resync |
638 | ||
56548fc0 PM |
639 | /* |
640 | * R3 here contains the value that will be returned to the caller | |
641 | * of power7_nap. | |
642 | */ | |
5fa6b6bd | 643 | _GLOBAL(pnv_wakeup_loss) |
948cf67c | 644 | ld r1,PACAR1(r13) |
bbdb760d MS |
645 | BEGIN_FTR_SECTION |
646 | CHECK_HMI_INTERRUPT | |
647 | END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) | |
948cf67c BH |
648 | REST_NVGPRS(r1) |
649 | REST_GPR(2, r1) | |
56548fc0 | 650 | ld r6,_CCR(r1) |
948cf67c BH |
651 | ld r4,_MSR(r1) |
652 | ld r5,_NIP(r1) | |
653 | addi r1,r1,INT_FRAME_SIZE | |
56548fc0 | 654 | mtcr r6 |
948cf67c BH |
655 | mtspr SPRN_SRR1,r4 |
656 | mtspr SPRN_SRR0,r5 | |
657 | rfid | |
658 | ||
56548fc0 PM |
659 | /* |
660 | * R3 here contains the value that will be returned to the caller | |
661 | * of power7_nap. | |
662 | */ | |
5fa6b6bd | 663 | _GLOBAL(pnv_wakeup_noloss) |
2fde6d20 PM |
664 | lbz r0,PACA_NAPSTATELOST(r13) |
665 | cmpwi r0,0 | |
5fa6b6bd | 666 | bne pnv_wakeup_loss |
bbdb760d MS |
667 | BEGIN_FTR_SECTION |
668 | CHECK_HMI_INTERRUPT | |
669 | END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) | |
948cf67c | 670 | ld r1,PACAR1(r13) |
0aab3747 | 671 | ld r6,_CCR(r1) |
948cf67c BH |
672 | ld r4,_MSR(r1) |
673 | ld r5,_NIP(r1) | |
674 | addi r1,r1,INT_FRAME_SIZE | |
0aab3747 | 675 | mtcr r6 |
948cf67c BH |
676 | mtspr SPRN_SRR1,r4 |
677 | mtspr SPRN_SRR0,r5 | |
678 | rfid |