]>
Commit | Line | Data |
---|---|---|
07372794 AG |
1 | /* |
2 | * This program is free software; you can redistribute it and/or modify | |
3 | * it under the terms of the GNU General Public License, version 2, as | |
4 | * published by the Free Software Foundation. | |
5 | * | |
6 | * This program is distributed in the hope that it will be useful, | |
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
9 | * GNU General Public License for more details. | |
10 | * | |
11 | * You should have received a copy of the GNU General Public License | |
12 | * along with this program; if not, write to the Free Software | |
13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | |
14 | * | |
15 | * Copyright SUSE Linux Products GmbH 2010 | |
16 | * | |
17 | * Authors: Alexander Graf <agraf@suse.de> | |
18 | */ | |
19 | ||
20 | /* Real mode helpers */ | |
21 | ||
22 | #if defined(CONFIG_PPC_BOOK3S_64) | |
23 | ||
24 | #define GET_SHADOW_VCPU(reg) \ | |
3c42bf8a | 25 | mr reg, r13 |
07372794 AG |
26 | |
27 | #elif defined(CONFIG_PPC_BOOK3S_32) | |
28 | ||
29 | #define GET_SHADOW_VCPU(reg) \ | |
30 | tophys(reg, r2); \ | |
31 | lwz reg, (THREAD + THREAD_KVM_SVCPU)(reg); \ | |
32 | tophys(reg, reg) | |
33 | ||
34 | #endif | |
35 | ||
36 | /* Disable for nested KVM */ | |
37 | #define USE_QUICK_LAST_INST | |
38 | ||
39 | ||
40 | /* Get helper functions for subarch specific functionality */ | |
41 | ||
42 | #if defined(CONFIG_PPC_BOOK3S_64) | |
43 | #include "book3s_64_slb.S" | |
44 | #elif defined(CONFIG_PPC_BOOK3S_32) | |
45 | #include "book3s_32_sr.S" | |
46 | #endif | |
47 | ||
48 | /****************************************************************************** | |
49 | * * | |
50 | * Entry code * | |
51 | * * | |
52 | *****************************************************************************/ | |
53 | ||
54 | .global kvmppc_handler_trampoline_enter | |
55 | kvmppc_handler_trampoline_enter: | |
56 | ||
57 | /* Required state: | |
58 | * | |
59 | * MSR = ~IR|DR | |
07372794 AG |
60 | * R1 = host R1 |
61 | * R2 = host R2 | |
02143947 PM |
62 | * R4 = guest shadow MSR |
63 | * R5 = normal host MSR | |
64 | * R6 = current host MSR (EE, IR, DR off) | |
65 | * LR = highmem guest exit code | |
07372794 AG |
66 | * all other volatile GPRS = free |
67 | * SVCPU[CR] = guest CR | |
68 | * SVCPU[XER] = guest XER | |
69 | * SVCPU[CTR] = guest CTR | |
70 | * SVCPU[LR] = guest LR | |
71 | */ | |
72 | ||
73 | /* r3 = shadow vcpu */ | |
74 | GET_SHADOW_VCPU(r3) | |
75 | ||
02143947 PM |
76 | /* Save guest exit handler address and MSR */ |
77 | mflr r0 | |
78 | PPC_STL r0, HSTATE_VMHANDLER(r3) | |
79 | PPC_STL r5, HSTATE_HOST_MSR(r3) | |
80 | ||
3c42bf8a PM |
81 | /* Save R1/R2 in the PACA (64-bit) or shadow_vcpu (32-bit) */ |
82 | PPC_STL r1, HSTATE_HOST_R1(r3) | |
83 | PPC_STL r2, HSTATE_HOST_R2(r3) | |
84 | ||
07372794 AG |
85 | /* Activate guest mode, so faults get handled by KVM */ |
86 | li r11, KVM_GUEST_MODE_GUEST | |
3c42bf8a | 87 | stb r11, HSTATE_IN_GUEST(r3) |
07372794 AG |
88 | |
89 | /* Switch to guest segment. This is subarch specific. */ | |
90 | LOAD_GUEST_SEGMENTS | |
91 | ||
02143947 | 92 | #ifdef CONFIG_PPC_BOOK3S_64 |
616dff86 AG |
93 | BEGIN_FTR_SECTION |
94 | /* Save host FSCR */ | |
95 | mfspr r8, SPRN_FSCR | |
96 | std r8, HSTATE_HOST_FSCR(r13) | |
97 | /* Set FSCR during guest execution */ | |
98 | ld r9, SVCPU_SHADOW_FSCR(r13) | |
99 | mtspr SPRN_FSCR, r9 | |
100 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) | |
101 | ||
02143947 PM |
102 | /* Some guests may need to have dcbz set to 32 byte length. |
103 | * | |
104 | * Usually we ensure that by patching the guest's instructions | |
105 | * to trap on dcbz and emulate it in the hypervisor. | |
106 | * | |
107 | * If we can, we should tell the CPU to use 32 byte dcbz though, | |
108 | * because that's a lot faster. | |
109 | */ | |
110 | lbz r0, HSTATE_RESTORE_HID5(r3) | |
111 | cmpwi r0, 0 | |
112 | beq no_dcbz32_on | |
113 | ||
114 | mfspr r0,SPRN_HID5 | |
115 | ori r0, r0, 0x80 /* XXX HID5_dcbz32 = 0x80 */ | |
116 | mtspr SPRN_HID5,r0 | |
117 | no_dcbz32_on: | |
118 | ||
119 | #endif /* CONFIG_PPC_BOOK3S_64 */ | |
120 | ||
07372794 AG |
121 | /* Enter guest */ |
122 | ||
02143947 PM |
123 | PPC_LL r8, SVCPU_CTR(r3) |
124 | PPC_LL r9, SVCPU_LR(r3) | |
125 | lwz r10, SVCPU_CR(r3) | |
c63517c2 | 126 | PPC_LL r11, SVCPU_XER(r3) |
02143947 PM |
127 | |
128 | mtctr r8 | |
129 | mtlr r9 | |
130 | mtcr r10 | |
131 | mtxer r11 | |
07372794 | 132 | |
02143947 PM |
133 | /* Move SRR0 and SRR1 into the respective regs */ |
134 | PPC_LL r9, SVCPU_PC(r3) | |
135 | /* First clear RI in our current MSR value */ | |
136 | li r0, MSR_RI | |
137 | andc r6, r6, r0 | |
07372794 | 138 | |
de56a948 PM |
139 | PPC_LL r0, SVCPU_R0(r3) |
140 | PPC_LL r1, SVCPU_R1(r3) | |
141 | PPC_LL r2, SVCPU_R2(r3) | |
de56a948 | 142 | PPC_LL r5, SVCPU_R5(r3) |
de56a948 PM |
143 | PPC_LL r7, SVCPU_R7(r3) |
144 | PPC_LL r8, SVCPU_R8(r3) | |
de56a948 PM |
145 | PPC_LL r10, SVCPU_R10(r3) |
146 | PPC_LL r11, SVCPU_R11(r3) | |
147 | PPC_LL r12, SVCPU_R12(r3) | |
148 | PPC_LL r13, SVCPU_R13(r3) | |
07372794 | 149 | |
8c2d0be7 AG |
150 | MTMSR_EERI(r6) |
151 | mtsrr0 r9 | |
152 | mtsrr1 r4 | |
153 | ||
154 | PPC_LL r4, SVCPU_R4(r3) | |
155 | PPC_LL r6, SVCPU_R6(r3) | |
156 | PPC_LL r9, SVCPU_R9(r3) | |
07372794 AG |
157 | PPC_LL r3, (SVCPU_R3)(r3) |
158 | ||
159 | RFI | |
160 | kvmppc_handler_trampoline_enter_end: | |
161 | ||
162 | ||
163 | ||
164 | /****************************************************************************** | |
165 | * * | |
166 | * Exit code * | |
167 | * * | |
168 | *****************************************************************************/ | |
169 | ||
dd96b2c2 AK |
170 | .global kvmppc_interrupt_pr |
171 | kvmppc_interrupt_pr: | |
d3918e7f NP |
172 | /* 64-bit entry. Register usage at this point: |
173 | * | |
174 | * SPRG_SCRATCH0 = guest R13 | |
175 | * R12 = (guest CR << 32) | exit handler id | |
176 | * R13 = PACA | |
177 | * HSTATE.SCRATCH0 = guest R12 | |
a97a65d5 | 178 | * HSTATE.SCRATCH1 = guest CTR if RELOCATABLE |
d3918e7f NP |
179 | */ |
180 | #ifdef CONFIG_PPC64 | |
181 | /* Match 32-bit entry */ | |
a97a65d5 NP |
182 | #ifdef CONFIG_RELOCATABLE |
183 | std r9, HSTATE_SCRATCH2(r13) | |
184 | ld r9, HSTATE_SCRATCH1(r13) | |
185 | mtctr r9 | |
186 | ld r9, HSTATE_SCRATCH2(r13) | |
187 | #endif | |
d3918e7f NP |
188 | rotldi r12, r12, 32 /* Flip R12 halves for stw */ |
189 | stw r12, HSTATE_SCRATCH1(r13) /* CR is now in the low half */ | |
190 | srdi r12, r12, 32 /* shift trap into low half */ | |
191 | #endif | |
b01c8b54 | 192 | |
d3918e7f NP |
193 | .global kvmppc_handler_trampoline_exit |
194 | kvmppc_handler_trampoline_exit: | |
07372794 AG |
195 | /* Register usage at this point: |
196 | * | |
d3918e7f NP |
197 | * SPRG_SCRATCH0 = guest R13 |
198 | * R12 = exit handler id | |
199 | * R13 = shadow vcpu (32-bit) or PACA (64-bit) | |
3c42bf8a PM |
200 | * HSTATE.SCRATCH0 = guest R12 |
201 | * HSTATE.SCRATCH1 = guest CR | |
07372794 AG |
202 | */ |
203 | ||
204 | /* Save registers */ | |
205 | ||
3c42bf8a PM |
206 | PPC_STL r0, SVCPU_R0(r13) |
207 | PPC_STL r1, SVCPU_R1(r13) | |
208 | PPC_STL r2, SVCPU_R2(r13) | |
209 | PPC_STL r3, SVCPU_R3(r13) | |
210 | PPC_STL r4, SVCPU_R4(r13) | |
211 | PPC_STL r5, SVCPU_R5(r13) | |
212 | PPC_STL r6, SVCPU_R6(r13) | |
213 | PPC_STL r7, SVCPU_R7(r13) | |
214 | PPC_STL r8, SVCPU_R8(r13) | |
215 | PPC_STL r9, SVCPU_R9(r13) | |
216 | PPC_STL r10, SVCPU_R10(r13) | |
217 | PPC_STL r11, SVCPU_R11(r13) | |
07372794 AG |
218 | |
219 | /* Restore R1/R2 so we can handle faults */ | |
3c42bf8a PM |
220 | PPC_LL r1, HSTATE_HOST_R1(r13) |
221 | PPC_LL r2, HSTATE_HOST_R2(r13) | |
07372794 AG |
222 | |
223 | /* Save guest PC and MSR */ | |
b01c8b54 PM |
224 | #ifdef CONFIG_PPC64 |
225 | BEGIN_FTR_SECTION | |
32c7dbfd AG |
226 | andi. r0, r12, 0x2 |
227 | cmpwi cr1, r0, 0 | |
a5d4f3ad BH |
228 | beq 1f |
229 | mfspr r3,SPRN_HSRR0 | |
230 | mfspr r4,SPRN_HSRR1 | |
231 | andi. r12,r12,0x3ffd | |
232 | b 2f | |
969391c5 | 233 | END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) |
b01c8b54 | 234 | #endif |
a5d4f3ad | 235 | 1: mfsrr0 r3 |
07372794 | 236 | mfsrr1 r4 |
a5d4f3ad | 237 | 2: |
3c42bf8a PM |
238 | PPC_STL r3, SVCPU_PC(r13) |
239 | PPC_STL r4, SVCPU_SHADOW_SRR1(r13) | |
07372794 AG |
240 | |
241 | /* Get scratch'ed off registers */ | |
673b189a | 242 | GET_SCRATCH0(r9) |
3c42bf8a PM |
243 | PPC_LL r8, HSTATE_SCRATCH0(r13) |
244 | lwz r7, HSTATE_SCRATCH1(r13) | |
07372794 | 245 | |
3c42bf8a PM |
246 | PPC_STL r9, SVCPU_R13(r13) |
247 | PPC_STL r8, SVCPU_R12(r13) | |
248 | stw r7, SVCPU_CR(r13) | |
07372794 AG |
249 | |
250 | /* Save more register state */ | |
251 | ||
252 | mfxer r5 | |
253 | mfdar r6 | |
254 | mfdsisr r7 | |
255 | mfctr r8 | |
256 | mflr r9 | |
257 | ||
c63517c2 | 258 | PPC_STL r5, SVCPU_XER(r13) |
3c42bf8a PM |
259 | PPC_STL r6, SVCPU_FAULT_DAR(r13) |
260 | stw r7, SVCPU_FAULT_DSISR(r13) | |
261 | PPC_STL r8, SVCPU_CTR(r13) | |
262 | PPC_STL r9, SVCPU_LR(r13) | |
07372794 AG |
263 | |
264 | /* | |
265 | * In order for us to easily get the last instruction, | |
266 | * we got the #vmexit at, we exploit the fact that the | |
267 | * virtual layout is still the same here, so we can just | |
268 | * ld from the guest's PC address | |
269 | */ | |
270 | ||
271 | /* We only load the last instruction when it's safe */ | |
272 | cmpwi r12, BOOK3S_INTERRUPT_DATA_STORAGE | |
273 | beq ld_last_inst | |
274 | cmpwi r12, BOOK3S_INTERRUPT_PROGRAM | |
275 | beq ld_last_inst | |
77e675ad AG |
276 | cmpwi r12, BOOK3S_INTERRUPT_SYSCALL |
277 | beq ld_last_prev_inst | |
6fc55825 AG |
278 | cmpwi r12, BOOK3S_INTERRUPT_ALIGNMENT |
279 | beq- ld_last_inst | |
7ef4e985 AG |
280 | #ifdef CONFIG_PPC64 |
281 | BEGIN_FTR_SECTION | |
282 | cmpwi r12, BOOK3S_INTERRUPT_H_EMUL_ASSIST | |
283 | beq- ld_last_inst | |
284 | END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) | |
616dff86 AG |
285 | BEGIN_FTR_SECTION |
286 | cmpwi r12, BOOK3S_INTERRUPT_FAC_UNAVAIL | |
287 | beq- ld_last_inst | |
288 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) | |
7ef4e985 | 289 | #endif |
07372794 AG |
290 | |
291 | b no_ld_last_inst | |
292 | ||
77e675ad AG |
293 | ld_last_prev_inst: |
294 | addi r3, r3, -4 | |
295 | ||
07372794 AG |
296 | ld_last_inst: |
297 | /* Save off the guest instruction we're at */ | |
298 | ||
299 | /* In case lwz faults */ | |
300 | li r0, KVM_INST_FETCH_FAILED | |
301 | ||
302 | #ifdef USE_QUICK_LAST_INST | |
303 | ||
304 | /* Set guest mode to 'jump over instruction' so if lwz faults | |
305 | * we'll just continue at the next IP. */ | |
306 | li r9, KVM_GUEST_MODE_SKIP | |
3c42bf8a | 307 | stb r9, HSTATE_IN_GUEST(r13) |
07372794 AG |
308 | |
309 | /* 1) enable paging for data */ | |
310 | mfmsr r9 | |
311 | ori r11, r9, MSR_DR /* Enable paging for data */ | |
312 | mtmsr r11 | |
313 | sync | |
314 | /* 2) fetch the instruction */ | |
315 | lwz r0, 0(r3) | |
316 | /* 3) disable paging again */ | |
317 | mtmsr r9 | |
318 | sync | |
319 | ||
320 | #endif | |
3c42bf8a | 321 | stw r0, SVCPU_LAST_INST(r13) |
07372794 AG |
322 | |
323 | no_ld_last_inst: | |
324 | ||
325 | /* Unset guest mode */ | |
326 | li r9, KVM_GUEST_MODE_NONE | |
3c42bf8a | 327 | stb r9, HSTATE_IN_GUEST(r13) |
07372794 AG |
328 | |
329 | /* Switch back to host MMU */ | |
330 | LOAD_HOST_SEGMENTS | |
331 | ||
02143947 PM |
332 | #ifdef CONFIG_PPC_BOOK3S_64 |
333 | ||
334 | lbz r5, HSTATE_RESTORE_HID5(r13) | |
335 | cmpwi r5, 0 | |
336 | beq no_dcbz32_off | |
337 | ||
338 | li r4, 0 | |
339 | mfspr r5,SPRN_HID5 | |
340 | rldimi r5,r4,6,56 | |
341 | mtspr SPRN_HID5,r5 | |
342 | ||
343 | no_dcbz32_off: | |
344 | ||
616dff86 AG |
345 | BEGIN_FTR_SECTION |
346 | /* Save guest FSCR on a FAC_UNAVAIL interrupt */ | |
347 | cmpwi r12, BOOK3S_INTERRUPT_FAC_UNAVAIL | |
348 | bne+ no_fscr_save | |
349 | mfspr r7, SPRN_FSCR | |
350 | std r7, SVCPU_SHADOW_FSCR(r13) | |
351 | no_fscr_save: | |
352 | /* Restore host FSCR */ | |
353 | ld r8, HSTATE_HOST_FSCR(r13) | |
354 | mtspr SPRN_FSCR, r8 | |
355 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) | |
356 | ||
02143947 PM |
357 | #endif /* CONFIG_PPC_BOOK3S_64 */ |
358 | ||
359 | /* | |
360 | * For some interrupts, we need to call the real Linux | |
361 | * handler, so it can do work for us. This has to happen | |
362 | * as if the interrupt arrived from the kernel though, | |
363 | * so let's fake it here where most state is restored. | |
364 | * | |
365 | * Having set up SRR0/1 with the address where we want | |
366 | * to continue with relocation on (potentially in module | |
367 | * space), we either just go straight there with rfi[d], | |
56e13dba AG |
368 | * or we jump to an interrupt handler if there is an |
369 | * interrupt to be handled first. In the latter case, | |
370 | * the rfi[d] at the end of the interrupt handler will | |
371 | * get us back to where we want to continue. | |
02143947 PM |
372 | */ |
373 | ||
07372794 AG |
374 | /* Register usage at this point: |
375 | * | |
376 | * R1 = host R1 | |
377 | * R2 = host R2 | |
56e13dba | 378 | * R10 = raw exit handler id |
07372794 | 379 | * R12 = exit handler id |
3c42bf8a | 380 | * R13 = shadow vcpu (32-bit) or PACA (64-bit) |
07372794 AG |
381 | * SVCPU.* = guest * |
382 | * | |
383 | */ | |
384 | ||
02143947 | 385 | PPC_LL r6, HSTATE_HOST_MSR(r13) |
3c42bf8a | 386 | PPC_LL r8, HSTATE_VMHANDLER(r13) |
02143947 | 387 | |
56e13dba AG |
388 | #ifdef CONFIG_PPC64 |
389 | BEGIN_FTR_SECTION | |
32c7dbfd | 390 | beq cr1, 1f |
56e13dba AG |
391 | mtspr SPRN_HSRR1, r6 |
392 | mtspr SPRN_HSRR0, r8 | |
393 | END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) | |
394 | #endif | |
395 | 1: /* Restore host msr -> SRR1 */ | |
02143947 PM |
396 | mtsrr1 r6 |
397 | /* Load highmem handler address */ | |
07372794 AG |
398 | mtsrr0 r8 |
399 | ||
02143947 | 400 | /* RFI into the highmem handler, or jump to interrupt handler */ |
56e13dba AG |
401 | cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL |
402 | beqa BOOK3S_INTERRUPT_EXTERNAL | |
403 | cmpwi r12, BOOK3S_INTERRUPT_DECREMENTER | |
404 | beqa BOOK3S_INTERRUPT_DECREMENTER | |
405 | cmpwi r12, BOOK3S_INTERRUPT_PERFMON | |
406 | beqa BOOK3S_INTERRUPT_PERFMON | |
40688909 PM |
407 | cmpwi r12, BOOK3S_INTERRUPT_DOORBELL |
408 | beqa BOOK3S_INTERRUPT_DOORBELL | |
56e13dba | 409 | |
07372794 AG |
410 | RFI |
411 | kvmppc_handler_trampoline_exit_end: |