]>
Commit | Line | Data |
---|---|---|
948cf67c | 1 | /* |
bcef83a0 SP |
2 | * This file contains idle entry/exit functions for POWER7, |
3 | * POWER8 and POWER9 CPUs. | |
948cf67c BH |
4 | * |
5 | * This program is free software; you can redistribute it and/or | |
6 | * modify it under the terms of the GNU General Public License | |
7 | * as published by the Free Software Foundation; either version | |
8 | * 2 of the License, or (at your option) any later version. | |
9 | */ | |
10 | ||
11 | #include <linux/threads.h> | |
12 | #include <asm/processor.h> | |
13 | #include <asm/page.h> | |
14 | #include <asm/cputable.h> | |
15 | #include <asm/thread_info.h> | |
16 | #include <asm/ppc_asm.h> | |
17 | #include <asm/asm-offsets.h> | |
18 | #include <asm/ppc-opcode.h> | |
7230c564 | 19 | #include <asm/hw_irq.h> |
f0888f70 | 20 | #include <asm/kvm_book3s_asm.h> |
97eb001f | 21 | #include <asm/opal.h> |
7cba160a | 22 | #include <asm/cpuidle.h> |
f64e8084 | 23 | #include <asm/book3s/64/mmu-hash.h> |
bcef83a0 | 24 | #include <asm/mmu.h> |
948cf67c BH |
25 | |
26 | #undef DEBUG | |
27 | ||
77b54e9f SP |
28 | /* |
29 | * Use unused space in the interrupt stack to save and restore | |
30 | * registers for winkle support. | |
31 | */ | |
32 | #define _SDR1 GPR3 | |
33 | #define _RPR GPR4 | |
34 | #define _SPURR GPR5 | |
35 | #define _PURR GPR6 | |
36 | #define _TSCR GPR7 | |
37 | #define _DSCR GPR8 | |
38 | #define _AMOR GPR9 | |
39 | #define _WORT GPR10 | |
40 | #define _WORC GPR11 | |
bcef83a0 SP |
41 | #define _PTCR GPR12 |
42 | ||
09206b60 | 43 | #define PSSCR_EC_ESL_MASK_SHIFTED (PSSCR_EC | PSSCR_ESL) >> 16 |
77b54e9f | 44 | |
aca79d2b VS |
45 | .text |
46 | ||
0dfffb48 SP |
47 | /* |
48 | * Used by threads before entering deep idle states. Saves SPRs | |
49 | * in interrupt stack frame | |
50 | */ | |
51 | save_sprs_to_stack: | |
52 | /* | |
53 | * Note all register i.e per-core, per-subcore or per-thread is saved | |
54 | * here since any thread in the core might wake up first | |
55 | */ | |
bcef83a0 SP |
56 | BEGIN_FTR_SECTION |
57 | mfspr r3,SPRN_PTCR | |
58 | std r3,_PTCR(r1) | |
59 | /* | |
60 | * Note - SDR1 is dropped in Power ISA v3. Hence not restoring | |
61 | * SDR1 here | |
62 | */ | |
63 | FTR_SECTION_ELSE | |
0dfffb48 SP |
64 | mfspr r3,SPRN_SDR1 |
65 | std r3,_SDR1(r1) | |
bcef83a0 | 66 | ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300) |
0dfffb48 SP |
67 | mfspr r3,SPRN_RPR |
68 | std r3,_RPR(r1) | |
69 | mfspr r3,SPRN_SPURR | |
70 | std r3,_SPURR(r1) | |
71 | mfspr r3,SPRN_PURR | |
72 | std r3,_PURR(r1) | |
73 | mfspr r3,SPRN_TSCR | |
74 | std r3,_TSCR(r1) | |
75 | mfspr r3,SPRN_DSCR | |
76 | std r3,_DSCR(r1) | |
77 | mfspr r3,SPRN_AMOR | |
78 | std r3,_AMOR(r1) | |
79 | mfspr r3,SPRN_WORT | |
80 | std r3,_WORT(r1) | |
81 | mfspr r3,SPRN_WORC | |
82 | std r3,_WORC(r1) | |
83 | ||
84 | blr | |
85 | ||
b32aadc1 SP |
86 | /* |
87 | * Used by threads when the lock bit of core_idle_state is set. | |
88 | * Threads will spin in HMT_LOW until the lock bit is cleared. | |
89 | * r14 - pointer to core_idle_state | |
90 | * r15 - used to load contents of core_idle_state | |
09b7e37b | 91 | * r9 - used as a temporary variable |
b32aadc1 SP |
92 | */ |
93 | ||
94 | core_idle_lock_held: | |
95 | HMT_LOW | |
96 | 3: lwz r15,0(r14) | |
97 | andi. r15,r15,PNV_CORE_IDLE_LOCK_BIT | |
98 | bne 3b | |
99 | HMT_MEDIUM | |
100 | lwarx r15,0,r14 | |
09b7e37b PM |
101 | andi. r9,r15,PNV_CORE_IDLE_LOCK_BIT |
102 | bne core_idle_lock_held | |
b32aadc1 SP |
103 | blr |
104 | ||
aca79d2b VS |
105 | /* |
106 | * Pass requested state in r3: | |
bcef83a0 SP |
107 | * r3 - PNV_THREAD_NAP/SLEEP/WINKLE in POWER8 |
108 | * - Requested STOP state in POWER9 | |
8d6f7c5a ME |
109 | * |
110 | * To check IRQ_HAPPENED in r4 | |
111 | * 0 - don't check | |
112 | * 1 - check | |
4eae2c9a SP |
113 | * |
114 | * Address to 'rfid' to in r5 | |
aca79d2b | 115 | */ |
5fa6b6bd | 116 | _GLOBAL(pnv_powersave_common) |
aca79d2b | 117 | /* Use r3 to pass state nap/sleep/winkle */ |
948cf67c BH |
118 | /* NAP is a state loss, we create a regs frame on the |
119 | * stack, fill it up with the state we care about and | |
120 | * stick a pointer to it in PACAR1. We really only | |
121 | * need to save PC, some CR bits and the NV GPRs, | |
122 | * but for now an interrupt frame will do. | |
123 | */ | |
124 | mflr r0 | |
125 | std r0,16(r1) | |
126 | stdu r1,-INT_FRAME_SIZE(r1) | |
127 | std r0,_LINK(r1) | |
128 | std r0,_NIP(r1) | |
129 | ||
948cf67c BH |
130 | /* Hard disable interrupts */ |
131 | mfmsr r9 | |
132 | rldicl r9,r9,48,1 | |
133 | rotldi r9,r9,16 | |
134 | mtmsrd r9,1 /* hard-disable interrupts */ | |
7230c564 BH |
135 | |
136 | /* Check if something happened while soft-disabled */ | |
137 | lbz r0,PACAIRQHAPPENED(r13) | |
d6a4f709 | 138 | andi. r0,r0,~PACA_IRQ_HARD_DIS@l |
7230c564 | 139 | beq 1f |
8d6f7c5a ME |
140 | cmpwi cr0,r4,0 |
141 | beq 1f | |
7230c564 BH |
142 | addi r1,r1,INT_FRAME_SIZE |
143 | ld r0,16(r1) | |
f57333a7 | 144 | li r3,0 /* Return 0 (no nap) */ |
7230c564 BH |
145 | mtlr r0 |
146 | blr | |
147 | ||
148 | 1: /* We mark irqs hard disabled as this is the state we'll | |
149 | * be in when returning and we need to tell arch_local_irq_restore() | |
150 | * about it | |
151 | */ | |
152 | li r0,PACA_IRQ_HARD_DIS | |
153 | stb r0,PACAIRQHAPPENED(r13) | |
154 | ||
155 | /* We haven't lost state ... yet */ | |
948cf67c | 156 | li r0,0 |
2fde6d20 | 157 | stb r0,PACA_NAPSTATELOST(r13) |
948cf67c BH |
158 | |
159 | /* Continue saving state */ | |
160 | SAVE_GPR(2, r1) | |
161 | SAVE_NVGPRS(r1) | |
aca79d2b VS |
162 | mfcr r4 |
163 | std r4,_CCR(r1) | |
948cf67c BH |
164 | std r9,_MSR(r1) |
165 | std r1,PACAR1(r13) | |
166 | ||
8117ac6a PM |
167 | /* |
168 | * Go to real mode to do the nap, as required by the architecture. | |
169 | * Also, we need to be in real mode before setting hwthread_state, | |
170 | * because as soon as we do that, another thread can switch | |
171 | * the MMU context to the guest. | |
172 | */ | |
4eae2c9a | 173 | LOAD_REG_IMMEDIATE(r7, MSR_IDLE) |
8117ac6a PM |
174 | li r6, MSR_RI |
175 | andc r6, r9, r6 | |
8117ac6a | 176 | mtmsrd r6, 1 /* clear RI before setting SRR0/1 */ |
4eae2c9a SP |
177 | mtspr SPRN_SRR0, r5 |
178 | mtspr SPRN_SRR1, r7 | |
8117ac6a PM |
179 | rfid |
180 | ||
5fa6b6bd SP |
181 | .globl pnv_enter_arch207_idle_mode |
182 | pnv_enter_arch207_idle_mode: | |
56c46222 PM |
183 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE |
184 | /* Tell KVM we're entering idle */ | |
185 | li r4,KVM_HWTHREAD_IN_IDLE | |
186 | /******************************************************/ | |
187 | /* N O T E W E L L ! ! ! N O T E W E L L */ | |
188 | /* The following store to HSTATE_HWTHREAD_STATE(r13) */ | |
189 | /* MUST occur in real mode, i.e. with the MMU off, */ | |
190 | /* and the MMU must stay off until we clear this flag */ | |
191 | /* and test HSTATE_HWTHREAD_REQ(r13) in the system */ | |
192 | /* reset interrupt vector in exceptions-64s.S. */ | |
193 | /* The reason is that another thread can switch the */ | |
194 | /* MMU to a guest context whenever this flag is set */ | |
195 | /* to KVM_HWTHREAD_IN_IDLE, and if the MMU was on, */ | |
196 | /* that would potentially cause this thread to start */ | |
197 | /* executing instructions from guest memory in */ | |
198 | /* hypervisor mode, leading to a host crash or data */ | |
199 | /* corruption, or worse. */ | |
200 | /******************************************************/ | |
201 | stb r4,HSTATE_HWTHREAD_STATE(r13) | |
202 | #endif | |
7cba160a | 203 | stb r3,PACA_THREAD_IDLE_STATE(r13) |
77b54e9f SP |
204 | cmpwi cr3,r3,PNV_THREAD_SLEEP |
205 | bge cr3,2f | |
823b7bd5 | 206 | IDLE_STATE_ENTER_SEQ_NORET(PPC_NAP) |
aca79d2b | 207 | /* No return */ |
7cba160a SP |
208 | 2: |
209 | /* Sleep or winkle */ | |
210 | lbz r7,PACA_THREAD_MASK(r13) | |
211 | ld r14,PACA_CORE_IDLE_STATE_PTR(r13) | |
212 | lwarx_loop1: | |
213 | lwarx r15,0,r14 | |
b32aadc1 SP |
214 | |
215 | andi. r9,r15,PNV_CORE_IDLE_LOCK_BIT | |
216 | bnel core_idle_lock_held | |
217 | ||
7cba160a SP |
218 | andc r15,r15,r7 /* Clear thread bit */ |
219 | ||
220 | andi. r15,r15,PNV_CORE_IDLE_THREAD_BITS | |
221 | ||
222 | /* | |
223 | * If cr0 = 0, then current thread is the last thread of the core entering | |
224 | * sleep. Last thread needs to execute the hardware bug workaround code if | |
225 | * required by the platform. | |
226 | * Make the workaround call unconditionally here. The below branch call is | |
227 | * patched out when the idle states are discovered if the platform does not | |
228 | * require it. | |
229 | */ | |
230 | .global pnv_fastsleep_workaround_at_entry | |
231 | pnv_fastsleep_workaround_at_entry: | |
232 | beq fastsleep_workaround_at_entry | |
233 | ||
234 | stwcx. r15,0,r14 | |
235 | bne- lwarx_loop1 | |
236 | isync | |
237 | ||
77b54e9f SP |
238 | common_enter: /* common code for all the threads entering sleep or winkle */ |
239 | bgt cr3,enter_winkle | |
823b7bd5 | 240 | IDLE_STATE_ENTER_SEQ_NORET(PPC_SLEEP) |
7cba160a SP |
241 | |
242 | fastsleep_workaround_at_entry: | |
243 | ori r15,r15,PNV_CORE_IDLE_LOCK_BIT | |
244 | stwcx. r15,0,r14 | |
245 | bne- lwarx_loop1 | |
246 | isync | |
247 | ||
248 | /* Fast sleep workaround */ | |
249 | li r3,1 | |
250 | li r4,1 | |
ab9bad0e | 251 | bl opal_config_cpu_idle_state |
7cba160a SP |
252 | |
253 | /* Clear Lock bit */ | |
254 | li r0,0 | |
255 | lwsync | |
256 | stw r0,0(r14) | |
257 | b common_enter | |
258 | ||
77b54e9f | 259 | enter_winkle: |
0dfffb48 SP |
260 | bl save_sprs_to_stack |
261 | ||
823b7bd5 | 262 | IDLE_STATE_ENTER_SEQ_NORET(PPC_WINKLE) |
f0888f70 | 263 | |
bcef83a0 | 264 | /* |
09206b60 | 265 | * r3 - PSSCR value corresponding to the requested stop state. |
bcef83a0 SP |
266 | */ |
267 | power_enter_stop: | |
56c46222 PM |
268 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE |
269 | /* Tell KVM we're entering idle */ | |
270 | li r4,KVM_HWTHREAD_IN_IDLE | |
271 | /* DO THIS IN REAL MODE! See comment above. */ | |
272 | stb r4,HSTATE_HWTHREAD_STATE(r13) | |
273 | #endif | |
09206b60 GS |
274 | /* |
275 | * Check if we are executing the lite variant with ESL=EC=0 | |
276 | */ | |
277 | andis. r4,r3,PSSCR_EC_ESL_MASK_SHIFTED | |
278 | clrldi r3,r3,60 /* r3 = Bits[60:63] = Requested Level (RL) */ | |
424f8acd | 279 | bne .Lhandle_esl_ec_set |
09206b60 GS |
280 | IDLE_STATE_ENTER_SEQ(PPC_STOP) |
281 | li r3,0 /* Since we didn't lose state, return 0 */ | |
282 | b pnv_wakeup_noloss | |
424f8acd GS |
283 | |
284 | .Lhandle_esl_ec_set: | |
bcef83a0 SP |
285 | /* |
286 | * Check if the requested state is a deep idle state. | |
287 | */ | |
424f8acd | 288 | LOAD_REG_ADDRBASE(r5,pnv_first_deep_stop_state) |
bcef83a0 SP |
289 | ld r4,ADDROFF(pnv_first_deep_stop_state)(r5) |
290 | cmpd r3,r4 | |
424f8acd | 291 | bge .Lhandle_deep_stop |
823b7bd5 | 292 | IDLE_STATE_ENTER_SEQ_NORET(PPC_STOP) |
424f8acd | 293 | .Lhandle_deep_stop: |
bcef83a0 SP |
294 | /* |
295 | * Entering deep idle state. | |
296 | * Clear thread bit in PACA_CORE_IDLE_STATE, save SPRs to | |
297 | * stack and enter stop | |
298 | */ | |
299 | lbz r7,PACA_THREAD_MASK(r13) | |
300 | ld r14,PACA_CORE_IDLE_STATE_PTR(r13) | |
301 | ||
302 | lwarx_loop_stop: | |
303 | lwarx r15,0,r14 | |
304 | andi. r9,r15,PNV_CORE_IDLE_LOCK_BIT | |
305 | bnel core_idle_lock_held | |
306 | andc r15,r15,r7 /* Clear thread bit */ | |
307 | ||
308 | stwcx. r15,0,r14 | |
309 | bne- lwarx_loop_stop | |
310 | isync | |
311 | ||
312 | bl save_sprs_to_stack | |
313 | ||
823b7bd5 | 314 | IDLE_STATE_ENTER_SEQ_NORET(PPC_STOP) |
bcef83a0 | 315 | |
aca79d2b VS |
316 | _GLOBAL(power7_idle) |
317 | /* Now check if user or arch enabled NAP mode */ | |
318 | LOAD_REG_ADDRBASE(r3,powersave_nap) | |
319 | lwz r4,ADDROFF(powersave_nap)(r3) | |
320 | cmpwi 0,r4,0 | |
321 | beqlr | |
8d6f7c5a | 322 | li r3, 1 |
aca79d2b VS |
323 | /* fall through */ |
324 | ||
325 | _GLOBAL(power7_nap) | |
8d6f7c5a | 326 | mr r4,r3 |
7cba160a | 327 | li r3,PNV_THREAD_NAP |
4eae2c9a | 328 | LOAD_REG_ADDR(r5, pnv_enter_arch207_idle_mode) |
5fa6b6bd | 329 | b pnv_powersave_common |
aca79d2b VS |
330 | /* No return */ |
331 | ||
332 | _GLOBAL(power7_sleep) | |
7cba160a | 333 | li r3,PNV_THREAD_SLEEP |
c733cf83 | 334 | li r4,1 |
4eae2c9a | 335 | LOAD_REG_ADDR(r5, pnv_enter_arch207_idle_mode) |
5fa6b6bd | 336 | b pnv_powersave_common |
aca79d2b | 337 | /* No return */ |
948cf67c | 338 | |
77b54e9f | 339 | _GLOBAL(power7_winkle) |
bfd1b7ae | 340 | li r3,PNV_THREAD_WINKLE |
77b54e9f | 341 | li r4,1 |
4eae2c9a | 342 | LOAD_REG_ADDR(r5, pnv_enter_arch207_idle_mode) |
5fa6b6bd | 343 | b pnv_powersave_common |
77b54e9f SP |
344 | /* No return */ |
345 | ||
bbdb760d MS |
346 | #define CHECK_HMI_INTERRUPT \ |
347 | mfspr r0,SPRN_SRR1; \ | |
348 | BEGIN_FTR_SECTION_NESTED(66); \ | |
349 | rlwinm r0,r0,45-31,0xf; /* extract wake reason field (P8) */ \ | |
350 | FTR_SECTION_ELSE_NESTED(66); \ | |
351 | rlwinm r0,r0,45-31,0xe; /* P7 wake reason field is 3 bits */ \ | |
352 | ALT_FTR_SECTION_END_NESTED_IFSET(CPU_FTR_ARCH_207S, 66); \ | |
353 | cmpwi r0,0xa; /* Hypervisor maintenance ? */ \ | |
354 | bne 20f; \ | |
355 | /* Invoke opal call to handle hmi */ \ | |
356 | ld r2,PACATOC(r13); \ | |
357 | ld r1,PACAR1(r13); \ | |
358 | std r3,ORIG_GPR3(r1); /* Save original r3 */ \ | |
fd7bacbc MS |
359 | li r3,0; /* NULL argument */ \ |
360 | bl hmi_exception_realmode; \ | |
361 | nop; \ | |
bbdb760d MS |
362 | ld r3,ORIG_GPR3(r1); /* Restore original r3 */ \ |
363 | 20: nop; | |
364 | ||
bcef83a0 | 365 | /* |
09206b60 GS |
366 | * r3 - The PSSCR value corresponding to the stop state. |
367 | * r4 - The PSSCR mask corrresonding to the stop state. | |
bcef83a0 SP |
368 | */ |
369 | _GLOBAL(power9_idle_stop) | |
09206b60 GS |
370 | mfspr r5,SPRN_PSSCR |
371 | andc r5,r5,r4 | |
372 | or r3,r3,r5 | |
373 | mtspr SPRN_PSSCR,r3 | |
bcef83a0 | 374 | LOAD_REG_ADDR(r5,power_enter_stop) |
09206b60 | 375 | li r4,1 |
bcef83a0 SP |
376 | b pnv_powersave_common |
377 | /* No return */ | |
17065671 SP |
378 | /* |
379 | * Called from reset vector. Check whether we have woken up with | |
380 | * hypervisor state loss. If yes, restore hypervisor state and return | |
381 | * back to reset vector. | |
382 | * | |
383 | * r13 - Contents of HSPRG0 | |
384 | * cr3 - set to gt if waking up with partial/complete hypervisor state loss | |
385 | */ | |
5fa6b6bd | 386 | _GLOBAL(pnv_restore_hyp_resource) |
bcef83a0 | 387 | BEGIN_FTR_SECTION |
e325d76f | 388 | ld r2,PACATOC(r13); |
bcef83a0 SP |
389 | /* |
390 | * POWER ISA 3. Use PSSCR to determine if we | |
391 | * are waking up from deep idle state | |
392 | */ | |
393 | LOAD_REG_ADDRBASE(r5,pnv_first_deep_stop_state) | |
394 | ld r4,ADDROFF(pnv_first_deep_stop_state)(r5) | |
395 | ||
396 | mfspr r5,SPRN_PSSCR | |
17065671 | 397 | /* |
bcef83a0 SP |
398 | * 0-3 bits correspond to Power-Saving Level Status |
399 | * which indicates the idle state we are waking up from | |
400 | */ | |
401 | rldicl r5,r5,4,60 | |
402 | cmpd cr4,r5,r4 | |
403 | bge cr4,pnv_wakeup_tb_loss | |
404 | /* | |
405 | * Waking up without hypervisor state loss. Return to | |
406 | * reset vector | |
407 | */ | |
408 | blr | |
409 | ||
410 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) | |
411 | ||
412 | /* | |
413 | * POWER ISA 2.07 or less. | |
17065671 SP |
414 | * Check if last bit of HSPGR0 is set. This indicates whether we are |
415 | * waking up from winkle. | |
416 | */ | |
417 | clrldi r5,r13,63 | |
418 | clrrdi r13,r13,1 | |
e325d76f MS |
419 | |
420 | /* Now that we are sure r13 is corrected, load TOC */ | |
421 | ld r2,PACATOC(r13); | |
17065671 SP |
422 | cmpwi cr4,r5,1 |
423 | mtspr SPRN_HSPRG0,r13 | |
424 | ||
425 | lbz r0,PACA_THREAD_IDLE_STATE(r13) | |
426 | cmpwi cr2,r0,PNV_THREAD_NAP | |
5fa6b6bd | 427 | bgt cr2,pnv_wakeup_tb_loss /* Either sleep or Winkle */ |
17065671 SP |
428 | |
429 | /* | |
430 | * We fall through here if PACA_THREAD_IDLE_STATE shows we are waking | |
431 | * up from nap. At this stage CR3 shouldn't contains 'gt' since that | |
432 | * indicates we are waking with hypervisor state loss from nap. | |
433 | */ | |
434 | bgt cr3,. | |
435 | ||
436 | blr /* Return back to System Reset vector from where | |
5fa6b6bd | 437 | pnv_restore_hyp_resource was invoked */ |
17065671 | 438 | |
bcef83a0 SP |
439 | /* |
440 | * Called if waking up from idle state which can cause either partial or | |
441 | * complete hyp state loss. | |
442 | * In POWER8, called if waking up from fastsleep or winkle | |
443 | * In POWER9, called if waking up from stop state >= pnv_first_deep_stop_state | |
444 | * | |
445 | * r13 - PACA | |
446 | * cr3 - gt if waking up with partial/complete hypervisor state loss | |
bd00a240 | 447 | * cr4 - gt or eq if waking up from complete hypervisor state loss. |
bcef83a0 | 448 | */ |
5fa6b6bd | 449 | _GLOBAL(pnv_wakeup_tb_loss) |
97eb001f | 450 | ld r1,PACAR1(r13) |
7cba160a | 451 | /* |
6d98ce0b NP |
452 | * Before entering any idle state, the NVGPRs are saved in the stack. |
453 | * If there was a state loss, or PACA_NAPSTATELOST was set, then the | |
454 | * NVGPRs are restored. If we are here, it is likely that state is lost, | |
455 | * but not guaranteed -- neither ISA207 nor ISA300 tests to reach | |
456 | * here are the same as the test to restore NVGPRS: | |
457 | * PACA_THREAD_IDLE_STATE test for ISA207, PSSCR test for ISA300, | |
458 | * and SRR1 test for restoring NVGPRs. | |
459 | * | |
460 | * We are about to clobber NVGPRs now, so set NAPSTATELOST to | |
461 | * guarantee they will always be restored. This might be tightened | |
462 | * with careful reading of specs (particularly for ISA300) but this | |
463 | * is already a slow wakeup path and it's simpler to be safe. | |
464 | */ | |
465 | li r0,1 | |
466 | stb r0,PACA_NAPSTATELOST(r13) | |
467 | ||
468 | /* | |
7cba160a | 469 | * |
17065671 | 470 | * Save SRR1 and LR in NVGPRs as they might be clobbered in |
69c592ed | 471 | * opal_call() (called in CHECK_HMI_INTERRUPT). SRR1 is required |
17065671 SP |
472 | * to determine the wakeup reason if we branch to kvm_start_guest. LR |
473 | * is required to return back to reset vector after hypervisor state | |
474 | * restore is complete. | |
7cba160a | 475 | */ |
17065671 | 476 | mflr r17 |
7cba160a | 477 | mfspr r16,SPRN_SRR1 |
bbdb760d MS |
478 | BEGIN_FTR_SECTION |
479 | CHECK_HMI_INTERRUPT | |
480 | END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) | |
7cba160a SP |
481 | |
482 | lbz r7,PACA_THREAD_MASK(r13) | |
483 | ld r14,PACA_CORE_IDLE_STATE_PTR(r13) | |
484 | lwarx_loop2: | |
485 | lwarx r15,0,r14 | |
486 | andi. r9,r15,PNV_CORE_IDLE_LOCK_BIT | |
487 | /* | |
488 | * Lock bit is set in one of the 2 cases- | |
489 | * a. In the sleep/winkle enter path, the last thread is executing | |
490 | * fastsleep workaround code. | |
491 | * b. In the wake up path, another thread is executing fastsleep | |
492 | * workaround undo code or resyncing timebase or restoring context | |
493 | * In either case loop until the lock bit is cleared. | |
494 | */ | |
b32aadc1 | 495 | bnel core_idle_lock_held |
7cba160a SP |
496 | |
497 | cmpwi cr2,r15,0 | |
77b54e9f SP |
498 | |
499 | /* | |
500 | * At this stage | |
bcef83a0 SP |
501 | * cr2 - eq if first thread to wakeup in core |
502 | * cr3- gt if waking up with partial/complete hypervisor state loss | |
bd00a240 | 503 | * cr4 - gt or eq if waking up from complete hypervisor state loss. |
77b54e9f SP |
504 | */ |
505 | ||
7cba160a SP |
506 | ori r15,r15,PNV_CORE_IDLE_LOCK_BIT |
507 | stwcx. r15,0,r14 | |
508 | bne- lwarx_loop2 | |
509 | isync | |
510 | ||
bcef83a0 SP |
511 | BEGIN_FTR_SECTION |
512 | lbz r4,PACA_SUBCORE_SIBLING_MASK(r13) | |
513 | and r4,r4,r15 | |
514 | cmpwi r4,0 /* Check if first in subcore */ | |
515 | ||
516 | or r15,r15,r7 /* Set thread bit */ | |
517 | beq first_thread_in_subcore | |
518 | END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) | |
519 | ||
520 | or r15,r15,r7 /* Set thread bit */ | |
521 | beq cr2,first_thread_in_core | |
522 | ||
523 | /* Not first thread in core or subcore to wake up */ | |
524 | b clear_lock | |
525 | ||
526 | first_thread_in_subcore: | |
77b54e9f SP |
527 | /* |
528 | * If waking up from sleep, subcore state is not lost. Hence | |
529 | * skip subcore state restore | |
530 | */ | |
bd00a240 | 531 | blt cr4,subcore_state_restored |
77b54e9f SP |
532 | |
533 | /* Restore per-subcore state */ | |
534 | ld r4,_SDR1(r1) | |
535 | mtspr SPRN_SDR1,r4 | |
bcef83a0 | 536 | |
77b54e9f SP |
537 | ld r4,_RPR(r1) |
538 | mtspr SPRN_RPR,r4 | |
539 | ld r4,_AMOR(r1) | |
540 | mtspr SPRN_AMOR,r4 | |
541 | ||
542 | subcore_state_restored: | |
543 | /* | |
544 | * Check if the thread is also the first thread in the core. If not, | |
545 | * skip to clear_lock. | |
546 | */ | |
547 | bne cr2,clear_lock | |
548 | ||
549 | first_thread_in_core: | |
550 | ||
7cba160a | 551 | /* |
bcef83a0 SP |
552 | * First thread in the core waking up from any state which can cause |
553 | * partial or complete hypervisor state loss. It needs to | |
7cba160a SP |
554 | * call the fastsleep workaround code if the platform requires it. |
555 | * Call it unconditionally here. The below branch instruction will | |
bcef83a0 SP |
556 | * be patched out if the platform does not have fastsleep or does not |
557 | * require the workaround. Patching will be performed during the | |
558 | * discovery of idle-states. | |
7cba160a SP |
559 | */ |
560 | .global pnv_fastsleep_workaround_at_exit | |
561 | pnv_fastsleep_workaround_at_exit: | |
562 | b fastsleep_workaround_at_exit | |
563 | ||
564 | timebase_resync: | |
bcef83a0 SP |
565 | /* |
566 | * Use cr3 which indicates that we are waking up with atleast partial | |
567 | * hypervisor state loss to determine if TIMEBASE RESYNC is needed. | |
568 | */ | |
7cba160a | 569 | ble cr3,clear_lock |
97eb001f | 570 | /* Time base re-sync */ |
ab9bad0e | 571 | bl opal_resync_timebase; |
77b54e9f SP |
572 | /* |
573 | * If waking up from sleep, per core state is not lost, skip to | |
574 | * clear_lock. | |
575 | */ | |
bd00a240 | 576 | blt cr4,clear_lock |
77b54e9f | 577 | |
bcef83a0 SP |
578 | /* |
579 | * First thread in the core to wake up and its waking up with | |
580 | * complete hypervisor state loss. Restore per core hypervisor | |
581 | * state. | |
582 | */ | |
583 | BEGIN_FTR_SECTION | |
584 | ld r4,_PTCR(r1) | |
585 | mtspr SPRN_PTCR,r4 | |
586 | ld r4,_RPR(r1) | |
587 | mtspr SPRN_RPR,r4 | |
588 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) | |
589 | ||
77b54e9f SP |
590 | ld r4,_TSCR(r1) |
591 | mtspr SPRN_TSCR,r4 | |
592 | ld r4,_WORC(r1) | |
593 | mtspr SPRN_WORC,r4 | |
594 | ||
7cba160a SP |
595 | clear_lock: |
596 | andi. r15,r15,PNV_CORE_IDLE_THREAD_BITS | |
597 | lwsync | |
598 | stw r15,0(r14) | |
599 | ||
600 | common_exit: | |
77b54e9f SP |
601 | /* |
602 | * Common to all threads. | |
603 | * | |
604 | * If waking up from sleep, hypervisor state is not lost. Hence | |
605 | * skip hypervisor state restore. | |
606 | */ | |
bd00a240 | 607 | blt cr4,hypervisor_state_restored |
77b54e9f SP |
608 | |
609 | /* Waking up from winkle */ | |
610 | ||
bcef83a0 SP |
611 | BEGIN_MMU_FTR_SECTION |
612 | b no_segments | |
5a25b6f5 | 613 | END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX) |
77b54e9f SP |
614 | /* Restore SLB from PACA */ |
615 | ld r8,PACA_SLBSHADOWPTR(r13) | |
616 | ||
617 | .rept SLB_NUM_BOLTED | |
618 | li r3, SLBSHADOW_SAVEAREA | |
619 | LDX_BE r5, r8, r3 | |
620 | addi r3, r3, 8 | |
621 | LDX_BE r6, r8, r3 | |
622 | andis. r7,r5,SLB_ESID_V@h | |
623 | beq 1f | |
624 | slbmte r6,r5 | |
625 | 1: addi r8,r8,16 | |
626 | .endr | |
bcef83a0 SP |
627 | no_segments: |
628 | ||
629 | /* Restore per thread state */ | |
77b54e9f SP |
630 | |
631 | ld r4,_SPURR(r1) | |
632 | mtspr SPRN_SPURR,r4 | |
633 | ld r4,_PURR(r1) | |
634 | mtspr SPRN_PURR,r4 | |
635 | ld r4,_DSCR(r1) | |
636 | mtspr SPRN_DSCR,r4 | |
637 | ld r4,_WORT(r1) | |
638 | mtspr SPRN_WORT,r4 | |
639 | ||
bcef83a0 SP |
640 | /* Call cur_cpu_spec->cpu_restore() */ |
641 | LOAD_REG_ADDR(r4, cur_cpu_spec) | |
642 | ld r4,0(r4) | |
643 | ld r12,CPU_SPEC_RESTORE(r4) | |
644 | #ifdef PPC64_ELF_ABI_v1 | |
645 | ld r12,0(r12) | |
646 | #endif | |
647 | mtctr r12 | |
648 | bctrl | |
649 | ||
77b54e9f SP |
650 | hypervisor_state_restored: |
651 | ||
7cba160a | 652 | mtspr SPRN_SRR1,r16 |
17065671 SP |
653 | mtlr r17 |
654 | blr /* Return back to System Reset vector from where | |
5fa6b6bd | 655 | pnv_restore_hyp_resource was invoked */ |
97eb001f | 656 | |
7cba160a SP |
657 | fastsleep_workaround_at_exit: |
658 | li r3,1 | |
659 | li r4,0 | |
ab9bad0e | 660 | bl opal_config_cpu_idle_state |
7cba160a SP |
661 | b timebase_resync |
662 | ||
56548fc0 PM |
663 | /* |
664 | * R3 here contains the value that will be returned to the caller | |
665 | * of power7_nap. | |
666 | */ | |
5fa6b6bd | 667 | _GLOBAL(pnv_wakeup_loss) |
948cf67c | 668 | ld r1,PACAR1(r13) |
bbdb760d MS |
669 | BEGIN_FTR_SECTION |
670 | CHECK_HMI_INTERRUPT | |
671 | END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) | |
948cf67c BH |
672 | REST_NVGPRS(r1) |
673 | REST_GPR(2, r1) | |
56548fc0 | 674 | ld r6,_CCR(r1) |
948cf67c BH |
675 | ld r4,_MSR(r1) |
676 | ld r5,_NIP(r1) | |
677 | addi r1,r1,INT_FRAME_SIZE | |
56548fc0 | 678 | mtcr r6 |
948cf67c BH |
679 | mtspr SPRN_SRR1,r4 |
680 | mtspr SPRN_SRR0,r5 | |
681 | rfid | |
682 | ||
56548fc0 PM |
683 | /* |
684 | * R3 here contains the value that will be returned to the caller | |
685 | * of power7_nap. | |
686 | */ | |
5fa6b6bd | 687 | _GLOBAL(pnv_wakeup_noloss) |
2fde6d20 PM |
688 | lbz r0,PACA_NAPSTATELOST(r13) |
689 | cmpwi r0,0 | |
5fa6b6bd | 690 | bne pnv_wakeup_loss |
bbdb760d MS |
691 | BEGIN_FTR_SECTION |
692 | CHECK_HMI_INTERRUPT | |
693 | END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) | |
948cf67c | 694 | ld r1,PACAR1(r13) |
0aab3747 | 695 | ld r6,_CCR(r1) |
948cf67c BH |
696 | ld r4,_MSR(r1) |
697 | ld r5,_NIP(r1) | |
698 | addi r1,r1,INT_FRAME_SIZE | |
0aab3747 | 699 | mtcr r6 |
948cf67c BH |
700 | mtspr SPRN_SRR1,r4 |
701 | mtspr SPRN_SRR0,r5 | |
702 | rfid |