]>
Commit | Line | Data |
---|---|---|
d94d71cb | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
07372794 | 2 | /* |
07372794 AG |
3 | * |
4 | * Copyright SUSE Linux Products GmbH 2010 | |
5 | * | |
6 | * Authors: Alexander Graf <agraf@suse.de> | |
7 | */ | |
8 | ||
9 | /* Real mode helpers */ | |
10 | ||
ec0c464c | 11 | #include <asm/asm-compat.h> |
2c86cd18 | 12 | #include <asm/feature-fixups.h> |
ec0c464c | 13 | |
07372794 AG |
14 | #if defined(CONFIG_PPC_BOOK3S_64) |
15 | ||
16 | #define GET_SHADOW_VCPU(reg) \ | |
3c42bf8a | 17 | mr reg, r13 |
07372794 AG |
18 | |
19 | #elif defined(CONFIG_PPC_BOOK3S_32) | |
20 | ||
21 | #define GET_SHADOW_VCPU(reg) \ | |
22 | tophys(reg, r2); \ | |
23 | lwz reg, (THREAD + THREAD_KVM_SVCPU)(reg); \ | |
24 | tophys(reg, reg) | |
25 | ||
26 | #endif | |
27 | ||
28 | /* Disable for nested KVM */ | |
29 | #define USE_QUICK_LAST_INST | |
30 | ||
31 | ||
32 | /* Get helper functions for subarch specific functionality */ | |
33 | ||
34 | #if defined(CONFIG_PPC_BOOK3S_64) | |
35 | #include "book3s_64_slb.S" | |
36 | #elif defined(CONFIG_PPC_BOOK3S_32) | |
37 | #include "book3s_32_sr.S" | |
38 | #endif | |
39 | ||
40 | /****************************************************************************** | |
41 | * * | |
42 | * Entry code * | |
43 | * * | |
44 | *****************************************************************************/ | |
45 | ||
46 | .global kvmppc_handler_trampoline_enter | |
47 | kvmppc_handler_trampoline_enter: | |
48 | ||
49 | /* Required state: | |
50 | * | |
51 | * MSR = ~IR|DR | |
07372794 AG |
52 | * R1 = host R1 |
53 | * R2 = host R2 | |
02143947 PM |
54 | * R4 = guest shadow MSR |
55 | * R5 = normal host MSR | |
56 | * R6 = current host MSR (EE, IR, DR off) | |
57 | * LR = highmem guest exit code | |
07372794 AG |
58 | * all other volatile GPRS = free |
59 | * SVCPU[CR] = guest CR | |
60 | * SVCPU[XER] = guest XER | |
61 | * SVCPU[CTR] = guest CTR | |
62 | * SVCPU[LR] = guest LR | |
63 | */ | |
64 | ||
65 | /* r3 = shadow vcpu */ | |
66 | GET_SHADOW_VCPU(r3) | |
67 | ||
02143947 PM |
68 | /* Save guest exit handler address and MSR */ |
69 | mflr r0 | |
70 | PPC_STL r0, HSTATE_VMHANDLER(r3) | |
71 | PPC_STL r5, HSTATE_HOST_MSR(r3) | |
72 | ||
3c42bf8a PM |
73 | /* Save R1/R2 in the PACA (64-bit) or shadow_vcpu (32-bit) */ |
74 | PPC_STL r1, HSTATE_HOST_R1(r3) | |
75 | PPC_STL r2, HSTATE_HOST_R2(r3) | |
76 | ||
07372794 AG |
77 | /* Activate guest mode, so faults get handled by KVM */ |
78 | li r11, KVM_GUEST_MODE_GUEST | |
3c42bf8a | 79 | stb r11, HSTATE_IN_GUEST(r3) |
07372794 AG |
80 | |
81 | /* Switch to guest segment. This is subarch specific. */ | |
82 | LOAD_GUEST_SEGMENTS | |
83 | ||
02143947 | 84 | #ifdef CONFIG_PPC_BOOK3S_64 |
616dff86 AG |
85 | BEGIN_FTR_SECTION |
86 | /* Save host FSCR */ | |
87 | mfspr r8, SPRN_FSCR | |
88 | std r8, HSTATE_HOST_FSCR(r13) | |
89 | /* Set FSCR during guest execution */ | |
90 | ld r9, SVCPU_SHADOW_FSCR(r13) | |
91 | mtspr SPRN_FSCR, r9 | |
92 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) | |
93 | ||
02143947 PM |
94 | /* Some guests may need to have dcbz set to 32 byte length. |
95 | * | |
96 | * Usually we ensure that by patching the guest's instructions | |
97 | * to trap on dcbz and emulate it in the hypervisor. | |
98 | * | |
99 | * If we can, we should tell the CPU to use 32 byte dcbz though, | |
100 | * because that's a lot faster. | |
101 | */ | |
102 | lbz r0, HSTATE_RESTORE_HID5(r3) | |
103 | cmpwi r0, 0 | |
104 | beq no_dcbz32_on | |
105 | ||
106 | mfspr r0,SPRN_HID5 | |
107 | ori r0, r0, 0x80 /* XXX HID5_dcbz32 = 0x80 */ | |
108 | mtspr SPRN_HID5,r0 | |
109 | no_dcbz32_on: | |
110 | ||
111 | #endif /* CONFIG_PPC_BOOK3S_64 */ | |
112 | ||
07372794 AG |
113 | /* Enter guest */ |
114 | ||
02143947 PM |
115 | PPC_LL r8, SVCPU_CTR(r3) |
116 | PPC_LL r9, SVCPU_LR(r3) | |
117 | lwz r10, SVCPU_CR(r3) | |
c63517c2 | 118 | PPC_LL r11, SVCPU_XER(r3) |
02143947 PM |
119 | |
120 | mtctr r8 | |
121 | mtlr r9 | |
122 | mtcr r10 | |
123 | mtxer r11 | |
07372794 | 124 | |
02143947 PM |
125 | /* Move SRR0 and SRR1 into the respective regs */ |
126 | PPC_LL r9, SVCPU_PC(r3) | |
127 | /* First clear RI in our current MSR value */ | |
128 | li r0, MSR_RI | |
129 | andc r6, r6, r0 | |
07372794 | 130 | |
de56a948 PM |
131 | PPC_LL r0, SVCPU_R0(r3) |
132 | PPC_LL r1, SVCPU_R1(r3) | |
133 | PPC_LL r2, SVCPU_R2(r3) | |
de56a948 | 134 | PPC_LL r5, SVCPU_R5(r3) |
de56a948 PM |
135 | PPC_LL r7, SVCPU_R7(r3) |
136 | PPC_LL r8, SVCPU_R8(r3) | |
de56a948 PM |
137 | PPC_LL r10, SVCPU_R10(r3) |
138 | PPC_LL r11, SVCPU_R11(r3) | |
139 | PPC_LL r12, SVCPU_R12(r3) | |
140 | PPC_LL r13, SVCPU_R13(r3) | |
07372794 | 141 | |
8c2d0be7 AG |
142 | MTMSR_EERI(r6) |
143 | mtsrr0 r9 | |
144 | mtsrr1 r4 | |
145 | ||
146 | PPC_LL r4, SVCPU_R4(r3) | |
147 | PPC_LL r6, SVCPU_R6(r3) | |
148 | PPC_LL r9, SVCPU_R9(r3) | |
07372794 AG |
149 | PPC_LL r3, (SVCPU_R3)(r3) |
150 | ||
222f20f1 | 151 | RFI_TO_GUEST |
07372794 AG |
152 | kvmppc_handler_trampoline_enter_end: |
153 | ||
154 | ||
155 | ||
156 | /****************************************************************************** | |
157 | * * | |
158 | * Exit code * | |
159 | * * | |
160 | *****************************************************************************/ | |
161 | ||
dd96b2c2 AK |
162 | .global kvmppc_interrupt_pr |
163 | kvmppc_interrupt_pr: | |
d3918e7f NP |
164 | /* 64-bit entry. Register usage at this point: |
165 | * | |
166 | * SPRG_SCRATCH0 = guest R13 | |
167 | * R12 = (guest CR << 32) | exit handler id | |
168 | * R13 = PACA | |
169 | * HSTATE.SCRATCH0 = guest R12 | |
a97a65d5 | 170 | * HSTATE.SCRATCH1 = guest CTR if RELOCATABLE |
d3918e7f NP |
171 | */ |
172 | #ifdef CONFIG_PPC64 | |
173 | /* Match 32-bit entry */ | |
a97a65d5 NP |
174 | #ifdef CONFIG_RELOCATABLE |
175 | std r9, HSTATE_SCRATCH2(r13) | |
176 | ld r9, HSTATE_SCRATCH1(r13) | |
177 | mtctr r9 | |
178 | ld r9, HSTATE_SCRATCH2(r13) | |
179 | #endif | |
d3918e7f NP |
180 | rotldi r12, r12, 32 /* Flip R12 halves for stw */ |
181 | stw r12, HSTATE_SCRATCH1(r13) /* CR is now in the low half */ | |
182 | srdi r12, r12, 32 /* shift trap into low half */ | |
183 | #endif | |
b01c8b54 | 184 | |
d3918e7f NP |
185 | .global kvmppc_handler_trampoline_exit |
186 | kvmppc_handler_trampoline_exit: | |
07372794 AG |
187 | /* Register usage at this point: |
188 | * | |
d3918e7f NP |
189 | * SPRG_SCRATCH0 = guest R13 |
190 | * R12 = exit handler id | |
191 | * R13 = shadow vcpu (32-bit) or PACA (64-bit) | |
3c42bf8a PM |
192 | * HSTATE.SCRATCH0 = guest R12 |
193 | * HSTATE.SCRATCH1 = guest CR | |
07372794 AG |
194 | */ |
195 | ||
196 | /* Save registers */ | |
197 | ||
3c42bf8a PM |
198 | PPC_STL r0, SVCPU_R0(r13) |
199 | PPC_STL r1, SVCPU_R1(r13) | |
200 | PPC_STL r2, SVCPU_R2(r13) | |
201 | PPC_STL r3, SVCPU_R3(r13) | |
202 | PPC_STL r4, SVCPU_R4(r13) | |
203 | PPC_STL r5, SVCPU_R5(r13) | |
204 | PPC_STL r6, SVCPU_R6(r13) | |
205 | PPC_STL r7, SVCPU_R7(r13) | |
206 | PPC_STL r8, SVCPU_R8(r13) | |
207 | PPC_STL r9, SVCPU_R9(r13) | |
208 | PPC_STL r10, SVCPU_R10(r13) | |
209 | PPC_STL r11, SVCPU_R11(r13) | |
07372794 AG |
210 | |
211 | /* Restore R1/R2 so we can handle faults */ | |
3c42bf8a PM |
212 | PPC_LL r1, HSTATE_HOST_R1(r13) |
213 | PPC_LL r2, HSTATE_HOST_R2(r13) | |
07372794 AG |
214 | |
215 | /* Save guest PC and MSR */ | |
b01c8b54 PM |
216 | #ifdef CONFIG_PPC64 |
217 | BEGIN_FTR_SECTION | |
32c7dbfd AG |
218 | andi. r0, r12, 0x2 |
219 | cmpwi cr1, r0, 0 | |
a5d4f3ad BH |
220 | beq 1f |
221 | mfspr r3,SPRN_HSRR0 | |
222 | mfspr r4,SPRN_HSRR1 | |
223 | andi. r12,r12,0x3ffd | |
224 | b 2f | |
969391c5 | 225 | END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) |
b01c8b54 | 226 | #endif |
a5d4f3ad | 227 | 1: mfsrr0 r3 |
07372794 | 228 | mfsrr1 r4 |
a5d4f3ad | 229 | 2: |
3c42bf8a PM |
230 | PPC_STL r3, SVCPU_PC(r13) |
231 | PPC_STL r4, SVCPU_SHADOW_SRR1(r13) | |
07372794 AG |
232 | |
233 | /* Get scratch'ed off registers */ | |
673b189a | 234 | GET_SCRATCH0(r9) |
3c42bf8a PM |
235 | PPC_LL r8, HSTATE_SCRATCH0(r13) |
236 | lwz r7, HSTATE_SCRATCH1(r13) | |
07372794 | 237 | |
3c42bf8a PM |
238 | PPC_STL r9, SVCPU_R13(r13) |
239 | PPC_STL r8, SVCPU_R12(r13) | |
240 | stw r7, SVCPU_CR(r13) | |
07372794 AG |
241 | |
242 | /* Save more register state */ | |
243 | ||
244 | mfxer r5 | |
245 | mfdar r6 | |
246 | mfdsisr r7 | |
247 | mfctr r8 | |
248 | mflr r9 | |
249 | ||
c63517c2 | 250 | PPC_STL r5, SVCPU_XER(r13) |
3c42bf8a PM |
251 | PPC_STL r6, SVCPU_FAULT_DAR(r13) |
252 | stw r7, SVCPU_FAULT_DSISR(r13) | |
253 | PPC_STL r8, SVCPU_CTR(r13) | |
254 | PPC_STL r9, SVCPU_LR(r13) | |
07372794 AG |
255 | |
256 | /* | |
257 | * In order for us to easily get the last instruction, | |
258 | * we got the #vmexit at, we exploit the fact that the | |
259 | * virtual layout is still the same here, so we can just | |
260 | * ld from the guest's PC address | |
261 | */ | |
262 | ||
263 | /* We only load the last instruction when it's safe */ | |
264 | cmpwi r12, BOOK3S_INTERRUPT_DATA_STORAGE | |
265 | beq ld_last_inst | |
266 | cmpwi r12, BOOK3S_INTERRUPT_PROGRAM | |
267 | beq ld_last_inst | |
77e675ad AG |
268 | cmpwi r12, BOOK3S_INTERRUPT_SYSCALL |
269 | beq ld_last_prev_inst | |
6fc55825 AG |
270 | cmpwi r12, BOOK3S_INTERRUPT_ALIGNMENT |
271 | beq- ld_last_inst | |
7ef4e985 AG |
272 | #ifdef CONFIG_PPC64 |
273 | BEGIN_FTR_SECTION | |
274 | cmpwi r12, BOOK3S_INTERRUPT_H_EMUL_ASSIST | |
275 | beq- ld_last_inst | |
276 | END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) | |
616dff86 AG |
277 | BEGIN_FTR_SECTION |
278 | cmpwi r12, BOOK3S_INTERRUPT_FAC_UNAVAIL | |
279 | beq- ld_last_inst | |
280 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) | |
7ef4e985 | 281 | #endif |
07372794 AG |
282 | |
283 | b no_ld_last_inst | |
284 | ||
77e675ad AG |
285 | ld_last_prev_inst: |
286 | addi r3, r3, -4 | |
287 | ||
07372794 AG |
288 | ld_last_inst: |
289 | /* Save off the guest instruction we're at */ | |
290 | ||
291 | /* In case lwz faults */ | |
292 | li r0, KVM_INST_FETCH_FAILED | |
293 | ||
294 | #ifdef USE_QUICK_LAST_INST | |
295 | ||
296 | /* Set guest mode to 'jump over instruction' so if lwz faults | |
297 | * we'll just continue at the next IP. */ | |
298 | li r9, KVM_GUEST_MODE_SKIP | |
3c42bf8a | 299 | stb r9, HSTATE_IN_GUEST(r13) |
07372794 AG |
300 | |
301 | /* 1) enable paging for data */ | |
302 | mfmsr r9 | |
303 | ori r11, r9, MSR_DR /* Enable paging for data */ | |
304 | mtmsr r11 | |
305 | sync | |
306 | /* 2) fetch the instruction */ | |
307 | lwz r0, 0(r3) | |
308 | /* 3) disable paging again */ | |
309 | mtmsr r9 | |
310 | sync | |
311 | ||
312 | #endif | |
3c42bf8a | 313 | stw r0, SVCPU_LAST_INST(r13) |
07372794 AG |
314 | |
315 | no_ld_last_inst: | |
316 | ||
317 | /* Unset guest mode */ | |
318 | li r9, KVM_GUEST_MODE_NONE | |
3c42bf8a | 319 | stb r9, HSTATE_IN_GUEST(r13) |
07372794 AG |
320 | |
321 | /* Switch back to host MMU */ | |
322 | LOAD_HOST_SEGMENTS | |
323 | ||
02143947 PM |
324 | #ifdef CONFIG_PPC_BOOK3S_64 |
325 | ||
326 | lbz r5, HSTATE_RESTORE_HID5(r13) | |
327 | cmpwi r5, 0 | |
328 | beq no_dcbz32_off | |
329 | ||
330 | li r4, 0 | |
331 | mfspr r5,SPRN_HID5 | |
332 | rldimi r5,r4,6,56 | |
333 | mtspr SPRN_HID5,r5 | |
334 | ||
335 | no_dcbz32_off: | |
336 | ||
616dff86 AG |
337 | BEGIN_FTR_SECTION |
338 | /* Save guest FSCR on a FAC_UNAVAIL interrupt */ | |
339 | cmpwi r12, BOOK3S_INTERRUPT_FAC_UNAVAIL | |
340 | bne+ no_fscr_save | |
341 | mfspr r7, SPRN_FSCR | |
342 | std r7, SVCPU_SHADOW_FSCR(r13) | |
343 | no_fscr_save: | |
344 | /* Restore host FSCR */ | |
345 | ld r8, HSTATE_HOST_FSCR(r13) | |
346 | mtspr SPRN_FSCR, r8 | |
347 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) | |
348 | ||
02143947 PM |
349 | #endif /* CONFIG_PPC_BOOK3S_64 */ |
350 | ||
351 | /* | |
352 | * For some interrupts, we need to call the real Linux | |
353 | * handler, so it can do work for us. This has to happen | |
354 | * as if the interrupt arrived from the kernel though, | |
355 | * so let's fake it here where most state is restored. | |
356 | * | |
357 | * Having set up SRR0/1 with the address where we want | |
358 | * to continue with relocation on (potentially in module | |
359 | * space), we either just go straight there with rfi[d], | |
56e13dba AG |
360 | * or we jump to an interrupt handler if there is an |
361 | * interrupt to be handled first. In the latter case, | |
362 | * the rfi[d] at the end of the interrupt handler will | |
363 | * get us back to where we want to continue. | |
02143947 PM |
364 | */ |
365 | ||
07372794 AG |
366 | /* Register usage at this point: |
367 | * | |
368 | * R1 = host R1 | |
369 | * R2 = host R2 | |
56e13dba | 370 | * R10 = raw exit handler id |
07372794 | 371 | * R12 = exit handler id |
3c42bf8a | 372 | * R13 = shadow vcpu (32-bit) or PACA (64-bit) |
07372794 AG |
373 | * SVCPU.* = guest * |
374 | * | |
375 | */ | |
376 | ||
02143947 | 377 | PPC_LL r6, HSTATE_HOST_MSR(r13) |
36383a08 SG |
378 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
379 | /* | |
380 | * We don't want to change MSR[TS] bits via rfi here. | |
381 | * The actual TM handling logic will be in host with | |
382 | * recovered DR/IR bits after HSTATE_VMHANDLER. | |
383 | * And MSR_TM can be enabled in HOST_MSR so rfid may | |
384 | * not suppress this change and can lead to exception. | |
385 | * Manually set MSR to prevent TS state change here. | |
386 | */ | |
387 | mfmsr r7 | |
388 | rldicl r7, r7, 64 - MSR_TS_S_LG, 62 | |
389 | rldimi r6, r7, MSR_TS_S_LG, 63 - MSR_TS_T_LG | |
390 | #endif | |
3c42bf8a | 391 | PPC_LL r8, HSTATE_VMHANDLER(r13) |
02143947 | 392 | |
56e13dba AG |
393 | #ifdef CONFIG_PPC64 |
394 | BEGIN_FTR_SECTION | |
32c7dbfd | 395 | beq cr1, 1f |
56e13dba AG |
396 | mtspr SPRN_HSRR1, r6 |
397 | mtspr SPRN_HSRR0, r8 | |
398 | END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) | |
399 | #endif | |
400 | 1: /* Restore host msr -> SRR1 */ | |
02143947 PM |
401 | mtsrr1 r6 |
402 | /* Load highmem handler address */ | |
07372794 AG |
403 | mtsrr0 r8 |
404 | ||
02143947 | 405 | /* RFI into the highmem handler, or jump to interrupt handler */ |
56e13dba AG |
406 | cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL |
407 | beqa BOOK3S_INTERRUPT_EXTERNAL | |
408 | cmpwi r12, BOOK3S_INTERRUPT_DECREMENTER | |
409 | beqa BOOK3S_INTERRUPT_DECREMENTER | |
410 | cmpwi r12, BOOK3S_INTERRUPT_PERFMON | |
411 | beqa BOOK3S_INTERRUPT_PERFMON | |
40688909 PM |
412 | cmpwi r12, BOOK3S_INTERRUPT_DOORBELL |
413 | beqa BOOK3S_INTERRUPT_DOORBELL | |
56e13dba | 414 | |
222f20f1 | 415 | RFI_TO_KERNEL |
07372794 | 416 | kvmppc_handler_trampoline_exit_end: |