]>
Commit | Line | Data |
---|---|---|
948cf67c | 1 | /* |
bcef83a0 SP |
2 | * This file contains idle entry/exit functions for POWER7, |
3 | * POWER8 and POWER9 CPUs. | |
948cf67c BH |
4 | * |
5 | * This program is free software; you can redistribute it and/or | |
6 | * modify it under the terms of the GNU General Public License | |
7 | * as published by the Free Software Foundation; either version | |
8 | * 2 of the License, or (at your option) any later version. | |
9 | */ | |
10 | ||
11 | #include <linux/threads.h> | |
12 | #include <asm/processor.h> | |
13 | #include <asm/page.h> | |
14 | #include <asm/cputable.h> | |
15 | #include <asm/thread_info.h> | |
16 | #include <asm/ppc_asm.h> | |
17 | #include <asm/asm-offsets.h> | |
18 | #include <asm/ppc-opcode.h> | |
7230c564 | 19 | #include <asm/hw_irq.h> |
f0888f70 | 20 | #include <asm/kvm_book3s_asm.h> |
97eb001f | 21 | #include <asm/opal.h> |
7cba160a | 22 | #include <asm/cpuidle.h> |
bf0153c1 | 23 | #include <asm/exception-64s.h> |
f64e8084 | 24 | #include <asm/book3s/64/mmu-hash.h> |
bcef83a0 | 25 | #include <asm/mmu.h> |
948cf67c BH |
26 | |
27 | #undef DEBUG | |
28 | ||
77b54e9f SP |
29 | /* |
30 | * Use unused space in the interrupt stack to save and restore | |
31 | * registers for winkle support. | |
32 | */ | |
33 | #define _SDR1 GPR3 | |
34 | #define _RPR GPR4 | |
35 | #define _SPURR GPR5 | |
36 | #define _PURR GPR6 | |
37 | #define _TSCR GPR7 | |
38 | #define _DSCR GPR8 | |
39 | #define _AMOR GPR9 | |
40 | #define _WORT GPR10 | |
41 | #define _WORC GPR11 | |
bcef83a0 SP |
42 | #define _PTCR GPR12 |
43 | ||
09206b60 | 44 | #define PSSCR_EC_ESL_MASK_SHIFTED (PSSCR_EC | PSSCR_ESL) >> 16 |
77b54e9f | 45 | |
aca79d2b VS |
46 | .text |
47 | ||
0dfffb48 SP |
48 | /* |
49 | * Used by threads before entering deep idle states. Saves SPRs | |
50 | * in interrupt stack frame | |
51 | */ | |
52 | save_sprs_to_stack: | |
53 | /* | |
54 | * Note all register i.e per-core, per-subcore or per-thread is saved | |
55 | * here since any thread in the core might wake up first | |
56 | */ | |
bcef83a0 SP |
57 | BEGIN_FTR_SECTION |
58 | mfspr r3,SPRN_PTCR | |
59 | std r3,_PTCR(r1) | |
60 | /* | |
61 | * Note - SDR1 is dropped in Power ISA v3. Hence not restoring | |
62 | * SDR1 here | |
63 | */ | |
64 | FTR_SECTION_ELSE | |
0dfffb48 SP |
65 | mfspr r3,SPRN_SDR1 |
66 | std r3,_SDR1(r1) | |
bcef83a0 | 67 | ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300) |
0dfffb48 SP |
68 | mfspr r3,SPRN_RPR |
69 | std r3,_RPR(r1) | |
70 | mfspr r3,SPRN_SPURR | |
71 | std r3,_SPURR(r1) | |
72 | mfspr r3,SPRN_PURR | |
73 | std r3,_PURR(r1) | |
74 | mfspr r3,SPRN_TSCR | |
75 | std r3,_TSCR(r1) | |
76 | mfspr r3,SPRN_DSCR | |
77 | std r3,_DSCR(r1) | |
78 | mfspr r3,SPRN_AMOR | |
79 | std r3,_AMOR(r1) | |
80 | mfspr r3,SPRN_WORT | |
81 | std r3,_WORT(r1) | |
82 | mfspr r3,SPRN_WORC | |
83 | std r3,_WORC(r1) | |
84 | ||
85 | blr | |
86 | ||
b32aadc1 SP |
87 | /* |
88 | * Used by threads when the lock bit of core_idle_state is set. | |
89 | * Threads will spin in HMT_LOW until the lock bit is cleared. | |
90 | * r14 - pointer to core_idle_state | |
91 | * r15 - used to load contents of core_idle_state | |
09b7e37b | 92 | * r9 - used as a temporary variable |
b32aadc1 SP |
93 | */ |
94 | ||
95 | core_idle_lock_held: | |
96 | HMT_LOW | |
97 | 3: lwz r15,0(r14) | |
adbcf8d7 | 98 | andis. r15,r15,PNV_CORE_IDLE_LOCK_BIT@h |
b32aadc1 SP |
99 | bne 3b |
100 | HMT_MEDIUM | |
101 | lwarx r15,0,r14 | |
adbcf8d7 NP |
102 | andis. r9,r15,PNV_CORE_IDLE_LOCK_BIT@h |
103 | bne- core_idle_lock_held | |
b32aadc1 SP |
104 | blr |
105 | ||
aca79d2b VS |
106 | /* |
107 | * Pass requested state in r3: | |
bcef83a0 SP |
108 | * r3 - PNV_THREAD_NAP/SLEEP/WINKLE in POWER8 |
109 | * - Requested STOP state in POWER9 | |
8d6f7c5a ME |
110 | * |
111 | * To check IRQ_HAPPENED in r4 | |
112 | * 0 - don't check | |
113 | * 1 - check | |
4eae2c9a SP |
114 | * |
115 | * Address to 'rfid' to in r5 | |
aca79d2b | 116 | */ |
bf0153c1 | 117 | pnv_powersave_common: |
aca79d2b | 118 | /* Use r3 to pass state nap/sleep/winkle */ |
948cf67c BH |
119 | /* NAP is a state loss, we create a regs frame on the |
120 | * stack, fill it up with the state we care about and | |
121 | * stick a pointer to it in PACAR1. We really only | |
122 | * need to save PC, some CR bits and the NV GPRs, | |
123 | * but for now an interrupt frame will do. | |
124 | */ | |
125 | mflr r0 | |
126 | std r0,16(r1) | |
127 | stdu r1,-INT_FRAME_SIZE(r1) | |
128 | std r0,_LINK(r1) | |
129 | std r0,_NIP(r1) | |
130 | ||
948cf67c BH |
131 | /* Hard disable interrupts */ |
132 | mfmsr r9 | |
133 | rldicl r9,r9,48,1 | |
134 | rotldi r9,r9,16 | |
135 | mtmsrd r9,1 /* hard-disable interrupts */ | |
7230c564 BH |
136 | |
137 | /* Check if something happened while soft-disabled */ | |
138 | lbz r0,PACAIRQHAPPENED(r13) | |
d6a4f709 | 139 | andi. r0,r0,~PACA_IRQ_HARD_DIS@l |
7230c564 | 140 | beq 1f |
8d6f7c5a ME |
141 | cmpwi cr0,r4,0 |
142 | beq 1f | |
7230c564 BH |
143 | addi r1,r1,INT_FRAME_SIZE |
144 | ld r0,16(r1) | |
f57333a7 | 145 | li r3,0 /* Return 0 (no nap) */ |
7230c564 BH |
146 | mtlr r0 |
147 | blr | |
148 | ||
149 | 1: /* We mark irqs hard disabled as this is the state we'll | |
150 | * be in when returning and we need to tell arch_local_irq_restore() | |
151 | * about it | |
152 | */ | |
153 | li r0,PACA_IRQ_HARD_DIS | |
154 | stb r0,PACAIRQHAPPENED(r13) | |
155 | ||
156 | /* We haven't lost state ... yet */ | |
948cf67c | 157 | li r0,0 |
2fde6d20 | 158 | stb r0,PACA_NAPSTATELOST(r13) |
948cf67c BH |
159 | |
160 | /* Continue saving state */ | |
161 | SAVE_GPR(2, r1) | |
162 | SAVE_NVGPRS(r1) | |
aca79d2b VS |
163 | mfcr r4 |
164 | std r4,_CCR(r1) | |
948cf67c BH |
165 | std r9,_MSR(r1) |
166 | std r1,PACAR1(r13) | |
167 | ||
8117ac6a PM |
168 | /* |
169 | * Go to real mode to do the nap, as required by the architecture. | |
170 | * Also, we need to be in real mode before setting hwthread_state, | |
171 | * because as soon as we do that, another thread can switch | |
172 | * the MMU context to the guest. | |
173 | */ | |
4eae2c9a | 174 | LOAD_REG_IMMEDIATE(r7, MSR_IDLE) |
8117ac6a PM |
175 | li r6, MSR_RI |
176 | andc r6, r9, r6 | |
8117ac6a | 177 | mtmsrd r6, 1 /* clear RI before setting SRR0/1 */ |
4eae2c9a SP |
178 | mtspr SPRN_SRR0, r5 |
179 | mtspr SPRN_SRR1, r7 | |
8117ac6a PM |
180 | rfid |
181 | ||
5fa6b6bd SP |
182 | .globl pnv_enter_arch207_idle_mode |
183 | pnv_enter_arch207_idle_mode: | |
56c46222 PM |
184 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE |
185 | /* Tell KVM we're entering idle */ | |
186 | li r4,KVM_HWTHREAD_IN_IDLE | |
187 | /******************************************************/ | |
188 | /* N O T E W E L L ! ! ! N O T E W E L L */ | |
189 | /* The following store to HSTATE_HWTHREAD_STATE(r13) */ | |
190 | /* MUST occur in real mode, i.e. with the MMU off, */ | |
191 | /* and the MMU must stay off until we clear this flag */ | |
bf0153c1 NP |
192 | /* and test HSTATE_HWTHREAD_REQ(r13) in */ |
193 | /* pnv_powersave_wakeup in this file. */ | |
56c46222 PM |
194 | /* The reason is that another thread can switch the */ |
195 | /* MMU to a guest context whenever this flag is set */ | |
196 | /* to KVM_HWTHREAD_IN_IDLE, and if the MMU was on, */ | |
197 | /* that would potentially cause this thread to start */ | |
198 | /* executing instructions from guest memory in */ | |
199 | /* hypervisor mode, leading to a host crash or data */ | |
200 | /* corruption, or worse. */ | |
201 | /******************************************************/ | |
202 | stb r4,HSTATE_HWTHREAD_STATE(r13) | |
203 | #endif | |
7cba160a | 204 | stb r3,PACA_THREAD_IDLE_STATE(r13) |
77b54e9f SP |
205 | cmpwi cr3,r3,PNV_THREAD_SLEEP |
206 | bge cr3,2f | |
823b7bd5 | 207 | IDLE_STATE_ENTER_SEQ_NORET(PPC_NAP) |
aca79d2b | 208 | /* No return */ |
7cba160a SP |
209 | 2: |
210 | /* Sleep or winkle */ | |
211 | lbz r7,PACA_THREAD_MASK(r13) | |
212 | ld r14,PACA_CORE_IDLE_STATE_PTR(r13) | |
0d7720a2 NP |
213 | li r5,0 |
214 | beq cr3,3f | |
215 | lis r5,PNV_CORE_IDLE_WINKLE_COUNT@h | |
216 | 3: | |
7cba160a SP |
217 | lwarx_loop1: |
218 | lwarx r15,0,r14 | |
b32aadc1 | 219 | |
adbcf8d7 NP |
220 | andis. r9,r15,PNV_CORE_IDLE_LOCK_BIT@h |
221 | bnel- core_idle_lock_held | |
b32aadc1 | 222 | |
0d7720a2 | 223 | add r15,r15,r5 /* Add if winkle */ |
7cba160a SP |
224 | andc r15,r15,r7 /* Clear thread bit */ |
225 | ||
0d7720a2 | 226 | andi. r9,r15,PNV_CORE_IDLE_THREAD_BITS |
7cba160a SP |
227 | |
228 | /* | |
229 | * If cr0 = 0, then current thread is the last thread of the core entering | |
230 | * sleep. Last thread needs to execute the hardware bug workaround code if | |
231 | * required by the platform. | |
232 | * Make the workaround call unconditionally here. The below branch call is | |
233 | * patched out when the idle states are discovered if the platform does not | |
234 | * require it. | |
235 | */ | |
236 | .global pnv_fastsleep_workaround_at_entry | |
237 | pnv_fastsleep_workaround_at_entry: | |
238 | beq fastsleep_workaround_at_entry | |
239 | ||
240 | stwcx. r15,0,r14 | |
241 | bne- lwarx_loop1 | |
242 | isync | |
243 | ||
77b54e9f SP |
244 | common_enter: /* common code for all the threads entering sleep or winkle */ |
245 | bgt cr3,enter_winkle | |
823b7bd5 | 246 | IDLE_STATE_ENTER_SEQ_NORET(PPC_SLEEP) |
7cba160a SP |
247 | |
248 | fastsleep_workaround_at_entry: | |
adbcf8d7 | 249 | oris r15,r15,PNV_CORE_IDLE_LOCK_BIT@h |
7cba160a SP |
250 | stwcx. r15,0,r14 |
251 | bne- lwarx_loop1 | |
252 | isync | |
253 | ||
254 | /* Fast sleep workaround */ | |
255 | li r3,1 | |
256 | li r4,1 | |
ab9bad0e | 257 | bl opal_config_cpu_idle_state |
7cba160a | 258 | |
adbcf8d7 NP |
259 | /* Unlock */ |
260 | xoris r15,r15,PNV_CORE_IDLE_LOCK_BIT@h | |
7cba160a | 261 | lwsync |
adbcf8d7 | 262 | stw r15,0(r14) |
7cba160a SP |
263 | b common_enter |
264 | ||
77b54e9f | 265 | enter_winkle: |
0dfffb48 SP |
266 | bl save_sprs_to_stack |
267 | ||
823b7bd5 | 268 | IDLE_STATE_ENTER_SEQ_NORET(PPC_WINKLE) |
f0888f70 | 269 | |
bcef83a0 | 270 | /* |
09206b60 | 271 | * r3 - PSSCR value corresponding to the requested stop state. |
bcef83a0 SP |
272 | */ |
273 | power_enter_stop: | |
56c46222 PM |
274 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE |
275 | /* Tell KVM we're entering idle */ | |
276 | li r4,KVM_HWTHREAD_IN_IDLE | |
277 | /* DO THIS IN REAL MODE! See comment above. */ | |
278 | stb r4,HSTATE_HWTHREAD_STATE(r13) | |
279 | #endif | |
09206b60 GS |
280 | /* |
281 | * Check if we are executing the lite variant with ESL=EC=0 | |
282 | */ | |
283 | andis. r4,r3,PSSCR_EC_ESL_MASK_SHIFTED | |
284 | clrldi r3,r3,60 /* r3 = Bits[60:63] = Requested Level (RL) */ | |
424f8acd | 285 | bne .Lhandle_esl_ec_set |
09206b60 GS |
286 | IDLE_STATE_ENTER_SEQ(PPC_STOP) |
287 | li r3,0 /* Since we didn't lose state, return 0 */ | |
288 | b pnv_wakeup_noloss | |
424f8acd GS |
289 | |
290 | .Lhandle_esl_ec_set: | |
bcef83a0 SP |
291 | /* |
292 | * Check if the requested state is a deep idle state. | |
293 | */ | |
424f8acd | 294 | LOAD_REG_ADDRBASE(r5,pnv_first_deep_stop_state) |
bcef83a0 SP |
295 | ld r4,ADDROFF(pnv_first_deep_stop_state)(r5) |
296 | cmpd r3,r4 | |
424f8acd | 297 | bge .Lhandle_deep_stop |
823b7bd5 | 298 | IDLE_STATE_ENTER_SEQ_NORET(PPC_STOP) |
424f8acd | 299 | .Lhandle_deep_stop: |
bcef83a0 SP |
300 | /* |
301 | * Entering deep idle state. | |
302 | * Clear thread bit in PACA_CORE_IDLE_STATE, save SPRs to | |
303 | * stack and enter stop | |
304 | */ | |
305 | lbz r7,PACA_THREAD_MASK(r13) | |
306 | ld r14,PACA_CORE_IDLE_STATE_PTR(r13) | |
307 | ||
308 | lwarx_loop_stop: | |
309 | lwarx r15,0,r14 | |
adbcf8d7 NP |
310 | andis. r9,r15,PNV_CORE_IDLE_LOCK_BIT@h |
311 | bnel- core_idle_lock_held | |
bcef83a0 SP |
312 | andc r15,r15,r7 /* Clear thread bit */ |
313 | ||
314 | stwcx. r15,0,r14 | |
315 | bne- lwarx_loop_stop | |
316 | isync | |
317 | ||
318 | bl save_sprs_to_stack | |
319 | ||
823b7bd5 | 320 | IDLE_STATE_ENTER_SEQ_NORET(PPC_STOP) |
bcef83a0 | 321 | |
aca79d2b VS |
322 | _GLOBAL(power7_idle) |
323 | /* Now check if user or arch enabled NAP mode */ | |
324 | LOAD_REG_ADDRBASE(r3,powersave_nap) | |
325 | lwz r4,ADDROFF(powersave_nap)(r3) | |
326 | cmpwi 0,r4,0 | |
327 | beqlr | |
8d6f7c5a | 328 | li r3, 1 |
aca79d2b VS |
329 | /* fall through */ |
330 | ||
331 | _GLOBAL(power7_nap) | |
8d6f7c5a | 332 | mr r4,r3 |
7cba160a | 333 | li r3,PNV_THREAD_NAP |
4eae2c9a | 334 | LOAD_REG_ADDR(r5, pnv_enter_arch207_idle_mode) |
5fa6b6bd | 335 | b pnv_powersave_common |
aca79d2b VS |
336 | /* No return */ |
337 | ||
338 | _GLOBAL(power7_sleep) | |
7cba160a | 339 | li r3,PNV_THREAD_SLEEP |
c733cf83 | 340 | li r4,1 |
4eae2c9a | 341 | LOAD_REG_ADDR(r5, pnv_enter_arch207_idle_mode) |
5fa6b6bd | 342 | b pnv_powersave_common |
aca79d2b | 343 | /* No return */ |
948cf67c | 344 | |
77b54e9f | 345 | _GLOBAL(power7_winkle) |
bfd1b7ae | 346 | li r3,PNV_THREAD_WINKLE |
77b54e9f | 347 | li r4,1 |
4eae2c9a | 348 | LOAD_REG_ADDR(r5, pnv_enter_arch207_idle_mode) |
5fa6b6bd | 349 | b pnv_powersave_common |
77b54e9f SP |
350 | /* No return */ |
351 | ||
bbdb760d MS |
352 | #define CHECK_HMI_INTERRUPT \ |
353 | mfspr r0,SPRN_SRR1; \ | |
354 | BEGIN_FTR_SECTION_NESTED(66); \ | |
355 | rlwinm r0,r0,45-31,0xf; /* extract wake reason field (P8) */ \ | |
356 | FTR_SECTION_ELSE_NESTED(66); \ | |
357 | rlwinm r0,r0,45-31,0xe; /* P7 wake reason field is 3 bits */ \ | |
358 | ALT_FTR_SECTION_END_NESTED_IFSET(CPU_FTR_ARCH_207S, 66); \ | |
359 | cmpwi r0,0xa; /* Hypervisor maintenance ? */ \ | |
360 | bne 20f; \ | |
361 | /* Invoke opal call to handle hmi */ \ | |
362 | ld r2,PACATOC(r13); \ | |
363 | ld r1,PACAR1(r13); \ | |
364 | std r3,ORIG_GPR3(r1); /* Save original r3 */ \ | |
fd7bacbc MS |
365 | li r3,0; /* NULL argument */ \ |
366 | bl hmi_exception_realmode; \ | |
367 | nop; \ | |
bbdb760d MS |
368 | ld r3,ORIG_GPR3(r1); /* Restore original r3 */ \ |
369 | 20: nop; | |
370 | ||
bcef83a0 | 371 | /* |
09206b60 GS |
372 | * r3 - The PSSCR value corresponding to the stop state. |
373 | * r4 - The PSSCR mask corrresonding to the stop state. | |
bcef83a0 SP |
374 | */ |
375 | _GLOBAL(power9_idle_stop) | |
09206b60 GS |
376 | mfspr r5,SPRN_PSSCR |
377 | andc r5,r5,r4 | |
378 | or r3,r3,r5 | |
379 | mtspr SPRN_PSSCR,r3 | |
bcef83a0 | 380 | LOAD_REG_ADDR(r5,power_enter_stop) |
09206b60 | 381 | li r4,1 |
bcef83a0 SP |
382 | b pnv_powersave_common |
383 | /* No return */ | |
17ed4c8f | 384 | |
17ed4c8f GS |
385 | /* |
386 | * On waking up from stop 0,1,2 with ESL=1 on POWER9 DD1, | |
387 | * HSPRG0 will be set to the HSPRG0 value of one of the | |
388 | * threads in this core. Thus the value we have in r13 | |
389 | * may not be this thread's paca pointer. | |
390 | * | |
391 | * Fortunately, the TIR remains invariant. Since this thread's | |
392 | * paca pointer is recorded in all its sibling's paca, we can | |
393 | * correctly recover this thread's paca pointer if we | |
394 | * know the index of this thread in the core. | |
395 | * | |
396 | * This index can be obtained from the TIR. | |
397 | * | |
398 | * i.e, thread's position in the core = TIR. | |
399 | * If this value is i, then this thread's paca is | |
400 | * paca->thread_sibling_pacas[i]. | |
401 | */ | |
402 | power9_dd1_recover_paca: | |
403 | mfspr r4, SPRN_TIR | |
404 | /* | |
405 | * Since each entry in thread_sibling_pacas is 8 bytes | |
406 | * we need to left-shift by 3 bits. Thus r4 = i * 8 | |
407 | */ | |
408 | sldi r4, r4, 3 | |
409 | /* Get &paca->thread_sibling_pacas[0] in r5 */ | |
410 | ld r5, PACA_SIBLING_PACA_PTRS(r13) | |
411 | /* Load paca->thread_sibling_pacas[i] into r13 */ | |
412 | ldx r13, r4, r5 | |
413 | SET_PACA(r13) | |
414 | ld r2, PACATOC(r13) | |
415 | /* | |
416 | * Indicate that we have lost NVGPR state | |
417 | * which needs to be restored from the stack. | |
418 | */ | |
419 | li r3, 1 | |
420 | stb r0,PACA_NAPSTATELOST(r13) | |
421 | blr | |
422 | ||
1945bc45 NP |
423 | /* |
424 | * Called from machine check handler for powersave wakeups. | |
425 | * Low level machine check processing has already been done. Now just | |
426 | * go through the wake up path to get everything in order. | |
427 | * | |
428 | * r3 - The original SRR1 value. | |
429 | * Original SRR[01] have been clobbered. | |
430 | * MSR_RI is clear. | |
431 | */ | |
432 | .global pnv_powersave_wakeup_mce | |
433 | pnv_powersave_wakeup_mce: | |
434 | /* Set cr3 for pnv_powersave_wakeup */ | |
435 | rlwinm r11,r3,47-31,30,31 | |
436 | cmpwi cr3,r11,2 | |
437 | ||
438 | /* | |
439 | * Now put the original SRR1 with SRR1_WAKEMCE_RESVD as the wake | |
440 | * reason into SRR1, which allows reuse of the system reset wakeup | |
441 | * code without being mistaken for another type of wakeup. | |
442 | */ | |
443 | oris r3,r3,SRR1_WAKEMCE_RESVD@h | |
444 | mtspr SPRN_SRR1,r3 | |
445 | ||
446 | b pnv_powersave_wakeup | |
447 | ||
544686ca NP |
448 | /* |
449 | * Called from reset vector for powersave wakeups. | |
450 | * cr3 - set to gt if waking up with partial/complete hypervisor state loss | |
451 | */ | |
bf0153c1 NP |
452 | .global pnv_powersave_wakeup |
453 | pnv_powersave_wakeup: | |
10101aa9 NP |
454 | BEGIN_FTR_SECTION |
455 | bl pnv_restore_hyp_resource_arch300 | |
456 | FTR_SECTION_ELSE | |
457 | bl pnv_restore_hyp_resource_arch207 | |
458 | ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300) | |
bf0153c1 NP |
459 | |
460 | li r0,PNV_THREAD_RUNNING | |
461 | stb r0,PACA_THREAD_IDLE_STATE(r13) /* Clear thread state */ | |
462 | ||
463 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE | |
464 | li r0,KVM_HWTHREAD_IN_KERNEL | |
465 | stb r0,HSTATE_HWTHREAD_STATE(r13) | |
466 | /* Order setting hwthread_state vs. testing hwthread_req */ | |
467 | sync | |
468 | lbz r0,HSTATE_HWTHREAD_REQ(r13) | |
469 | cmpwi r0,0 | |
470 | beq 1f | |
471 | b kvm_start_guest | |
472 | 1: | |
473 | #endif | |
474 | ||
475 | /* Return SRR1 from power7_nap() */ | |
476 | mfspr r3,SPRN_SRR1 | |
477 | blt cr3,pnv_wakeup_noloss | |
478 | b pnv_wakeup_loss | |
479 | ||
17065671 | 480 | /* |
bf0153c1 NP |
481 | * Check whether we have woken up with hypervisor state loss. |
482 | * If yes, restore hypervisor state and return back to link. | |
17065671 | 483 | * |
17065671 SP |
484 | * cr3 - set to gt if waking up with partial/complete hypervisor state loss |
485 | */ | |
10101aa9 NP |
486 | pnv_restore_hyp_resource_arch300: |
487 | /* | |
488 | * POWER ISA 3. Use PSSCR to determine if we | |
489 | * are waking up from deep idle state | |
490 | */ | |
bcef83a0 | 491 | BEGIN_FTR_SECTION |
17ed4c8f GS |
492 | mflr r6 |
493 | bl power9_dd1_recover_paca | |
494 | mtlr r6 | |
544686ca | 495 | FTR_SECTION_ELSE |
17ed4c8f | 496 | ld r2, PACATOC(r13) |
544686ca NP |
497 | ALT_FTR_SECTION_END_IFSET(CPU_FTR_POWER9_DD1) |
498 | ||
bcef83a0 SP |
499 | LOAD_REG_ADDRBASE(r5,pnv_first_deep_stop_state) |
500 | ld r4,ADDROFF(pnv_first_deep_stop_state)(r5) | |
501 | ||
502 | mfspr r5,SPRN_PSSCR | |
17065671 | 503 | /* |
bcef83a0 SP |
504 | * 0-3 bits correspond to Power-Saving Level Status |
505 | * which indicates the idle state we are waking up from | |
506 | */ | |
507 | rldicl r5,r5,4,60 | |
508 | cmpd cr4,r5,r4 | |
bf0153c1 NP |
509 | bge cr4,pnv_wakeup_tb_loss /* returns to caller */ |
510 | ||
511 | blr /* Waking up without hypervisor state loss. */ | |
bcef83a0 | 512 | |
10101aa9 NP |
513 | /* Same calling convention as arch300 */ |
514 | pnv_restore_hyp_resource_arch207: | |
bcef83a0 SP |
515 | /* |
516 | * POWER ISA 2.07 or less. | |
0d7720a2 | 517 | * Check if we slept with sleep or winkle. |
17065671 | 518 | */ |
10101aa9 NP |
519 | ld r2,PACATOC(r13); |
520 | ||
0d7720a2 NP |
521 | lbz r4,PACA_THREAD_IDLE_STATE(r13) |
522 | cmpwi cr2,r4,PNV_THREAD_NAP | |
523 | bgt cr2,pnv_wakeup_tb_loss /* Either sleep or Winkle */ | |
17065671 SP |
524 | |
525 | /* | |
526 | * We fall through here if PACA_THREAD_IDLE_STATE shows we are waking | |
527 | * up from nap. At this stage CR3 shouldn't contains 'gt' since that | |
528 | * indicates we are waking with hypervisor state loss from nap. | |
529 | */ | |
530 | bgt cr3,. | |
531 | ||
bf0153c1 | 532 | blr /* Waking up without hypervisor state loss */ |
17065671 | 533 | |
bcef83a0 SP |
534 | /* |
535 | * Called if waking up from idle state which can cause either partial or | |
536 | * complete hyp state loss. | |
537 | * In POWER8, called if waking up from fastsleep or winkle | |
538 | * In POWER9, called if waking up from stop state >= pnv_first_deep_stop_state | |
539 | * | |
540 | * r13 - PACA | |
541 | * cr3 - gt if waking up with partial/complete hypervisor state loss | |
0d7720a2 NP |
542 | * |
543 | * If ISA300: | |
bd00a240 | 544 | * cr4 - gt or eq if waking up from complete hypervisor state loss. |
0d7720a2 NP |
545 | * |
546 | * If ISA207: | |
547 | * r4 - PACA_THREAD_IDLE_STATE | |
bcef83a0 | 548 | */ |
bf0153c1 | 549 | pnv_wakeup_tb_loss: |
97eb001f | 550 | ld r1,PACAR1(r13) |
7cba160a SP |
551 | /* |
552 | * Before entering any idle state, the NVGPRs are saved in the stack | |
553 | * and they are restored before switching to the process context. Hence | |
554 | * until they are restored, they are free to be used. | |
555 | * | |
17065671 | 556 | * Save SRR1 and LR in NVGPRs as they might be clobbered in |
69c592ed | 557 | * opal_call() (called in CHECK_HMI_INTERRUPT). SRR1 is required |
17065671 SP |
558 | * to determine the wakeup reason if we branch to kvm_start_guest. LR |
559 | * is required to return back to reset vector after hypervisor state | |
560 | * restore is complete. | |
7cba160a | 561 | */ |
0d7720a2 | 562 | mr r18,r4 |
17065671 | 563 | mflr r17 |
7cba160a | 564 | mfspr r16,SPRN_SRR1 |
bbdb760d MS |
565 | BEGIN_FTR_SECTION |
566 | CHECK_HMI_INTERRUPT | |
567 | END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) | |
7cba160a | 568 | |
7cba160a | 569 | ld r14,PACA_CORE_IDLE_STATE_PTR(r13) |
e420249d NP |
570 | lbz r7,PACA_THREAD_MASK(r13) |
571 | ||
7cba160a | 572 | /* |
e420249d NP |
573 | * Take the core lock to synchronize against other threads. |
574 | * | |
7cba160a SP |
575 | * Lock bit is set in one of the 2 cases- |
576 | * a. In the sleep/winkle enter path, the last thread is executing | |
577 | * fastsleep workaround code. | |
578 | * b. In the wake up path, another thread is executing fastsleep | |
579 | * workaround undo code or resyncing timebase or restoring context | |
580 | * In either case loop until the lock bit is cleared. | |
581 | */ | |
e420249d NP |
582 | 1: |
583 | lwarx r15,0,r14 | |
584 | andis. r9,r15,PNV_CORE_IDLE_LOCK_BIT@h | |
adbcf8d7 | 585 | bnel- core_idle_lock_held |
e420249d NP |
586 | oris r15,r15,PNV_CORE_IDLE_LOCK_BIT@h |
587 | stwcx. r15,0,r14 | |
588 | bne- 1b | |
589 | isync | |
7cba160a | 590 | |
adbcf8d7 NP |
591 | andi. r9,r15,PNV_CORE_IDLE_THREAD_BITS |
592 | cmpwi cr2,r9,0 | |
77b54e9f SP |
593 | |
594 | /* | |
595 | * At this stage | |
bcef83a0 SP |
596 | * cr2 - eq if first thread to wakeup in core |
597 | * cr3- gt if waking up with partial/complete hypervisor state loss | |
0d7720a2 | 598 | * ISA300: |
bd00a240 | 599 | * cr4 - gt or eq if waking up from complete hypervisor state loss. |
77b54e9f SP |
600 | */ |
601 | ||
bcef83a0 | 602 | BEGIN_FTR_SECTION |
0d7720a2 NP |
603 | /* |
604 | * Were we in winkle? | |
605 | * If yes, check if all threads were in winkle, decrement our | |
606 | * winkle count, set all thread winkle bits if all were in winkle. | |
607 | * Check if our thread has a winkle bit set, and set cr4 accordingly | |
608 | * (to match ISA300, above). Pseudo-code for core idle state | |
609 | * transitions for ISA207 is as follows (everything happens atomically | |
610 | * due to store conditional and/or lock bit): | |
611 | * | |
612 | * nap_idle() { } | |
613 | * nap_wake() { } | |
614 | * | |
615 | * sleep_idle() | |
616 | * { | |
617 | * core_idle_state &= ~thread_in_core | |
618 | * } | |
619 | * | |
620 | * sleep_wake() | |
621 | * { | |
622 | * bool first_in_core, first_in_subcore; | |
623 | * | |
624 | * first_in_core = (core_idle_state & IDLE_THREAD_BITS) == 0; | |
625 | * first_in_subcore = (core_idle_state & SUBCORE_SIBLING_MASK) == 0; | |
626 | * | |
627 | * core_idle_state |= thread_in_core; | |
628 | * } | |
629 | * | |
630 | * winkle_idle() | |
631 | * { | |
632 | * core_idle_state &= ~thread_in_core; | |
633 | * core_idle_state += 1 << WINKLE_COUNT_SHIFT; | |
634 | * } | |
635 | * | |
636 | * winkle_wake() | |
637 | * { | |
638 | * bool first_in_core, first_in_subcore, winkle_state_lost; | |
639 | * | |
640 | * first_in_core = (core_idle_state & IDLE_THREAD_BITS) == 0; | |
641 | * first_in_subcore = (core_idle_state & SUBCORE_SIBLING_MASK) == 0; | |
642 | * | |
643 | * core_idle_state |= thread_in_core; | |
644 | * | |
645 | * if ((core_idle_state & WINKLE_MASK) == (8 << WINKLE_COUNT_SIHFT)) | |
646 | * core_idle_state |= THREAD_WINKLE_BITS; | |
647 | * core_idle_state -= 1 << WINKLE_COUNT_SHIFT; | |
648 | * | |
649 | * winkle_state_lost = core_idle_state & | |
650 | * (thread_in_core << WINKLE_THREAD_SHIFT); | |
651 | * core_idle_state &= ~(thread_in_core << WINKLE_THREAD_SHIFT); | |
652 | * } | |
653 | * | |
654 | */ | |
655 | cmpwi r18,PNV_THREAD_WINKLE | |
656 | bne 2f | |
657 | andis. r9,r15,PNV_CORE_IDLE_WINKLE_COUNT_ALL_BIT@h | |
658 | subis r15,r15,PNV_CORE_IDLE_WINKLE_COUNT@h | |
659 | beq 2f | |
660 | ori r15,r15,PNV_CORE_IDLE_THREAD_WINKLE_BITS /* all were winkle */ | |
661 | 2: | |
662 | /* Shift thread bit to winkle mask, then test if this thread is set, | |
663 | * and remove it from the winkle bits */ | |
664 | slwi r8,r7,8 | |
665 | and r8,r8,r15 | |
666 | andc r15,r15,r8 | |
667 | cmpwi cr4,r8,1 /* cr4 will be gt if our bit is set, lt if not */ | |
668 | ||
bcef83a0 SP |
669 | lbz r4,PACA_SUBCORE_SIBLING_MASK(r13) |
670 | and r4,r4,r15 | |
671 | cmpwi r4,0 /* Check if first in subcore */ | |
672 | ||
673 | or r15,r15,r7 /* Set thread bit */ | |
674 | beq first_thread_in_subcore | |
675 | END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) | |
676 | ||
677 | or r15,r15,r7 /* Set thread bit */ | |
678 | beq cr2,first_thread_in_core | |
679 | ||
680 | /* Not first thread in core or subcore to wake up */ | |
681 | b clear_lock | |
682 | ||
683 | first_thread_in_subcore: | |
77b54e9f SP |
684 | /* |
685 | * If waking up from sleep, subcore state is not lost. Hence | |
686 | * skip subcore state restore | |
687 | */ | |
bd00a240 | 688 | blt cr4,subcore_state_restored |
77b54e9f SP |
689 | |
690 | /* Restore per-subcore state */ | |
691 | ld r4,_SDR1(r1) | |
692 | mtspr SPRN_SDR1,r4 | |
bcef83a0 | 693 | |
77b54e9f SP |
694 | ld r4,_RPR(r1) |
695 | mtspr SPRN_RPR,r4 | |
696 | ld r4,_AMOR(r1) | |
697 | mtspr SPRN_AMOR,r4 | |
698 | ||
699 | subcore_state_restored: | |
700 | /* | |
701 | * Check if the thread is also the first thread in the core. If not, | |
702 | * skip to clear_lock. | |
703 | */ | |
704 | bne cr2,clear_lock | |
705 | ||
706 | first_thread_in_core: | |
707 | ||
7cba160a | 708 | /* |
bcef83a0 SP |
709 | * First thread in the core waking up from any state which can cause |
710 | * partial or complete hypervisor state loss. It needs to | |
7cba160a SP |
711 | * call the fastsleep workaround code if the platform requires it. |
712 | * Call it unconditionally here. The below branch instruction will | |
bcef83a0 SP |
713 | * be patched out if the platform does not have fastsleep or does not |
714 | * require the workaround. Patching will be performed during the | |
715 | * discovery of idle-states. | |
7cba160a SP |
716 | */ |
717 | .global pnv_fastsleep_workaround_at_exit | |
718 | pnv_fastsleep_workaround_at_exit: | |
719 | b fastsleep_workaround_at_exit | |
720 | ||
721 | timebase_resync: | |
bcef83a0 SP |
722 | /* |
723 | * Use cr3 which indicates that we are waking up with atleast partial | |
724 | * hypervisor state loss to determine if TIMEBASE RESYNC is needed. | |
725 | */ | |
7cba160a | 726 | ble cr3,clear_lock |
97eb001f | 727 | /* Time base re-sync */ |
ab9bad0e | 728 | bl opal_resync_timebase; |
77b54e9f SP |
729 | /* |
730 | * If waking up from sleep, per core state is not lost, skip to | |
731 | * clear_lock. | |
732 | */ | |
bd00a240 | 733 | blt cr4,clear_lock |
77b54e9f | 734 | |
bcef83a0 SP |
735 | /* |
736 | * First thread in the core to wake up and its waking up with | |
737 | * complete hypervisor state loss. Restore per core hypervisor | |
738 | * state. | |
739 | */ | |
740 | BEGIN_FTR_SECTION | |
741 | ld r4,_PTCR(r1) | |
742 | mtspr SPRN_PTCR,r4 | |
743 | ld r4,_RPR(r1) | |
744 | mtspr SPRN_RPR,r4 | |
745 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) | |
746 | ||
77b54e9f SP |
747 | ld r4,_TSCR(r1) |
748 | mtspr SPRN_TSCR,r4 | |
749 | ld r4,_WORC(r1) | |
750 | mtspr SPRN_WORC,r4 | |
751 | ||
7cba160a | 752 | clear_lock: |
adbcf8d7 | 753 | xoris r15,r15,PNV_CORE_IDLE_LOCK_BIT@h |
7cba160a SP |
754 | lwsync |
755 | stw r15,0(r14) | |
756 | ||
757 | common_exit: | |
77b54e9f SP |
758 | /* |
759 | * Common to all threads. | |
760 | * | |
761 | * If waking up from sleep, hypervisor state is not lost. Hence | |
762 | * skip hypervisor state restore. | |
763 | */ | |
bd00a240 | 764 | blt cr4,hypervisor_state_restored |
77b54e9f SP |
765 | |
766 | /* Waking up from winkle */ | |
767 | ||
bcef83a0 SP |
768 | BEGIN_MMU_FTR_SECTION |
769 | b no_segments | |
5a25b6f5 | 770 | END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX) |
77b54e9f SP |
771 | /* Restore SLB from PACA */ |
772 | ld r8,PACA_SLBSHADOWPTR(r13) | |
773 | ||
774 | .rept SLB_NUM_BOLTED | |
775 | li r3, SLBSHADOW_SAVEAREA | |
776 | LDX_BE r5, r8, r3 | |
777 | addi r3, r3, 8 | |
778 | LDX_BE r6, r8, r3 | |
779 | andis. r7,r5,SLB_ESID_V@h | |
780 | beq 1f | |
781 | slbmte r6,r5 | |
782 | 1: addi r8,r8,16 | |
783 | .endr | |
bcef83a0 SP |
784 | no_segments: |
785 | ||
786 | /* Restore per thread state */ | |
77b54e9f SP |
787 | |
788 | ld r4,_SPURR(r1) | |
789 | mtspr SPRN_SPURR,r4 | |
790 | ld r4,_PURR(r1) | |
791 | mtspr SPRN_PURR,r4 | |
792 | ld r4,_DSCR(r1) | |
793 | mtspr SPRN_DSCR,r4 | |
794 | ld r4,_WORT(r1) | |
795 | mtspr SPRN_WORT,r4 | |
796 | ||
bcef83a0 SP |
797 | /* Call cur_cpu_spec->cpu_restore() */ |
798 | LOAD_REG_ADDR(r4, cur_cpu_spec) | |
799 | ld r4,0(r4) | |
800 | ld r12,CPU_SPEC_RESTORE(r4) | |
801 | #ifdef PPC64_ELF_ABI_v1 | |
802 | ld r12,0(r12) | |
803 | #endif | |
804 | mtctr r12 | |
805 | bctrl | |
806 | ||
77b54e9f SP |
807 | hypervisor_state_restored: |
808 | ||
7cba160a | 809 | mtspr SPRN_SRR1,r16 |
17065671 | 810 | mtlr r17 |
bf0153c1 | 811 | blr /* return to pnv_powersave_wakeup */ |
97eb001f | 812 | |
7cba160a SP |
813 | fastsleep_workaround_at_exit: |
814 | li r3,1 | |
815 | li r4,0 | |
ab9bad0e | 816 | bl opal_config_cpu_idle_state |
7cba160a SP |
817 | b timebase_resync |
818 | ||
56548fc0 PM |
819 | /* |
820 | * R3 here contains the value that will be returned to the caller | |
821 | * of power7_nap. | |
822 | */ | |
bf0153c1 NP |
823 | .global pnv_wakeup_loss |
824 | pnv_wakeup_loss: | |
948cf67c | 825 | ld r1,PACAR1(r13) |
bbdb760d MS |
826 | BEGIN_FTR_SECTION |
827 | CHECK_HMI_INTERRUPT | |
828 | END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) | |
948cf67c BH |
829 | REST_NVGPRS(r1) |
830 | REST_GPR(2, r1) | |
56548fc0 | 831 | ld r6,_CCR(r1) |
948cf67c BH |
832 | ld r4,_MSR(r1) |
833 | ld r5,_NIP(r1) | |
834 | addi r1,r1,INT_FRAME_SIZE | |
56548fc0 | 835 | mtcr r6 |
948cf67c BH |
836 | mtspr SPRN_SRR1,r4 |
837 | mtspr SPRN_SRR0,r5 | |
838 | rfid | |
839 | ||
56548fc0 PM |
840 | /* |
841 | * R3 here contains the value that will be returned to the caller | |
842 | * of power7_nap. | |
843 | */ | |
bf0153c1 | 844 | pnv_wakeup_noloss: |
2fde6d20 PM |
845 | lbz r0,PACA_NAPSTATELOST(r13) |
846 | cmpwi r0,0 | |
5fa6b6bd | 847 | bne pnv_wakeup_loss |
bbdb760d MS |
848 | BEGIN_FTR_SECTION |
849 | CHECK_HMI_INTERRUPT | |
850 | END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) | |
948cf67c | 851 | ld r1,PACAR1(r13) |
0aab3747 | 852 | ld r6,_CCR(r1) |
948cf67c BH |
853 | ld r4,_MSR(r1) |
854 | ld r5,_NIP(r1) | |
855 | addi r1,r1,INT_FRAME_SIZE | |
0aab3747 | 856 | mtcr r6 |
948cf67c BH |
857 | mtspr SPRN_SRR1,r4 |
858 | mtspr SPRN_SRR0,r5 | |
859 | rfid |