]>
Commit | Line | Data |
---|---|---|
de56a948 PM |
1 | /* |
2 | * This program is free software; you can redistribute it and/or modify | |
3 | * it under the terms of the GNU General Public License, version 2, as | |
4 | * published by the Free Software Foundation. | |
5 | * | |
6 | * This program is distributed in the hope that it will be useful, | |
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
9 | * GNU General Public License for more details. | |
10 | * | |
11 | * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> | |
12 | * | |
13 | * Derived from book3s_rmhandlers.S and other files, which are: | |
14 | * | |
15 | * Copyright SUSE Linux Products GmbH 2009 | |
16 | * | |
17 | * Authors: Alexander Graf <agraf@suse.de> | |
18 | */ | |
19 | ||
20 | #include <asm/ppc_asm.h> | |
21 | #include <asm/kvm_asm.h> | |
22 | #include <asm/reg.h> | |
23 | #include <asm/page.h> | |
24 | #include <asm/asm-offsets.h> | |
25 | #include <asm/exception-64s.h> | |
26 | ||
27 | /***************************************************************************** | |
28 | * * | |
29 | * Real Mode handlers that need to be in the linear mapping * | |
30 | * * | |
31 | ****************************************************************************/ | |
32 | ||
33 | #define SHADOW_VCPU_OFF PACA_KVM_SVCPU | |
34 | ||
35 | .globl kvmppc_skip_interrupt | |
36 | kvmppc_skip_interrupt: | |
37 | mfspr r13,SPRN_SRR0 | |
38 | addi r13,r13,4 | |
39 | mtspr SPRN_SRR0,r13 | |
40 | GET_SCRATCH0(r13) | |
41 | rfid | |
42 | b . | |
43 | ||
44 | .globl kvmppc_skip_Hinterrupt | |
45 | kvmppc_skip_Hinterrupt: | |
46 | mfspr r13,SPRN_HSRR0 | |
47 | addi r13,r13,4 | |
48 | mtspr SPRN_HSRR0,r13 | |
49 | GET_SCRATCH0(r13) | |
50 | hrfid | |
51 | b . | |
52 | ||
53 | /* | |
54 | * Call kvmppc_handler_trampoline_enter in real mode. | |
55 | * Must be called with interrupts hard-disabled. | |
56 | * | |
57 | * Input Registers: | |
58 | * | |
59 | * LR = return address to continue at after eventually re-enabling MMU | |
60 | */ | |
61 | _GLOBAL(kvmppc_hv_entry_trampoline) | |
62 | mfmsr r10 | |
63 | LOAD_REG_ADDR(r5, kvmppc_hv_entry) | |
64 | li r0,MSR_RI | |
65 | andc r0,r10,r0 | |
66 | li r6,MSR_IR | MSR_DR | |
67 | andc r6,r10,r6 | |
68 | mtmsrd r0,1 /* clear RI in MSR */ | |
69 | mtsrr0 r5 | |
70 | mtsrr1 r6 | |
71 | RFI | |
72 | ||
73 | #define ULONG_SIZE 8 | |
74 | #define VCPU_GPR(n) (VCPU_GPRS + (n * ULONG_SIZE)) | |
75 | ||
76 | /****************************************************************************** | |
77 | * * | |
78 | * Entry code * | |
79 | * * | |
80 | *****************************************************************************/ | |
81 | ||
82 | .global kvmppc_hv_entry | |
83 | kvmppc_hv_entry: | |
84 | ||
85 | /* Required state: | |
86 | * | |
87 | * R4 = vcpu pointer | |
88 | * MSR = ~IR|DR | |
89 | * R13 = PACA | |
90 | * R1 = host R1 | |
91 | * all other volatile GPRS = free | |
92 | */ | |
93 | mflr r0 | |
94 | std r0, HSTATE_VMHANDLER(r13) | |
95 | ||
96 | ld r14, VCPU_GPR(r14)(r4) | |
97 | ld r15, VCPU_GPR(r15)(r4) | |
98 | ld r16, VCPU_GPR(r16)(r4) | |
99 | ld r17, VCPU_GPR(r17)(r4) | |
100 | ld r18, VCPU_GPR(r18)(r4) | |
101 | ld r19, VCPU_GPR(r19)(r4) | |
102 | ld r20, VCPU_GPR(r20)(r4) | |
103 | ld r21, VCPU_GPR(r21)(r4) | |
104 | ld r22, VCPU_GPR(r22)(r4) | |
105 | ld r23, VCPU_GPR(r23)(r4) | |
106 | ld r24, VCPU_GPR(r24)(r4) | |
107 | ld r25, VCPU_GPR(r25)(r4) | |
108 | ld r26, VCPU_GPR(r26)(r4) | |
109 | ld r27, VCPU_GPR(r27)(r4) | |
110 | ld r28, VCPU_GPR(r28)(r4) | |
111 | ld r29, VCPU_GPR(r29)(r4) | |
112 | ld r30, VCPU_GPR(r30)(r4) | |
113 | ld r31, VCPU_GPR(r31)(r4) | |
114 | ||
115 | /* Load guest PMU registers */ | |
116 | /* R4 is live here (vcpu pointer) */ | |
117 | li r3, 1 | |
118 | sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */ | |
119 | mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */ | |
120 | isync | |
121 | lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */ | |
122 | lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */ | |
123 | lwz r6, VCPU_PMC + 8(r4) | |
124 | lwz r7, VCPU_PMC + 12(r4) | |
125 | lwz r8, VCPU_PMC + 16(r4) | |
126 | lwz r9, VCPU_PMC + 20(r4) | |
127 | mtspr SPRN_PMC1, r3 | |
128 | mtspr SPRN_PMC2, r5 | |
129 | mtspr SPRN_PMC3, r6 | |
130 | mtspr SPRN_PMC4, r7 | |
131 | mtspr SPRN_PMC5, r8 | |
132 | mtspr SPRN_PMC6, r9 | |
133 | ld r3, VCPU_MMCR(r4) | |
134 | ld r5, VCPU_MMCR + 8(r4) | |
135 | ld r6, VCPU_MMCR + 16(r4) | |
136 | mtspr SPRN_MMCR1, r5 | |
137 | mtspr SPRN_MMCRA, r6 | |
138 | mtspr SPRN_MMCR0, r3 | |
139 | isync | |
140 | ||
141 | /* Load up FP, VMX and VSX registers */ | |
142 | bl kvmppc_load_fp | |
143 | ||
144 | /* Switch DSCR to guest value */ | |
145 | ld r5, VCPU_DSCR(r4) | |
146 | mtspr SPRN_DSCR, r5 | |
147 | ||
148 | /* | |
149 | * Set the decrementer to the guest decrementer. | |
150 | */ | |
151 | ld r8,VCPU_DEC_EXPIRES(r4) | |
152 | mftb r7 | |
153 | subf r3,r7,r8 | |
154 | mtspr SPRN_DEC,r3 | |
155 | stw r3,VCPU_DEC(r4) | |
156 | ||
157 | ld r5, VCPU_SPRG0(r4) | |
158 | ld r6, VCPU_SPRG1(r4) | |
159 | ld r7, VCPU_SPRG2(r4) | |
160 | ld r8, VCPU_SPRG3(r4) | |
161 | mtspr SPRN_SPRG0, r5 | |
162 | mtspr SPRN_SPRG1, r6 | |
163 | mtspr SPRN_SPRG2, r7 | |
164 | mtspr SPRN_SPRG3, r8 | |
165 | ||
166 | /* Save R1 in the PACA */ | |
167 | std r1, HSTATE_HOST_R1(r13) | |
168 | ||
a8606e20 PM |
169 | /* Increment yield count if they have a VPA */ |
170 | ld r3, VCPU_VPA(r4) | |
171 | cmpdi r3, 0 | |
172 | beq 25f | |
173 | lwz r5, LPPACA_YIELDCOUNT(r3) | |
174 | addi r5, r5, 1 | |
175 | stw r5, LPPACA_YIELDCOUNT(r3) | |
176 | 25: | |
de56a948 PM |
177 | /* Load up DAR and DSISR */ |
178 | ld r5, VCPU_DAR(r4) | |
179 | lwz r6, VCPU_DSISR(r4) | |
180 | mtspr SPRN_DAR, r5 | |
181 | mtspr SPRN_DSISR, r6 | |
182 | ||
183 | /* Set partition DABR */ | |
184 | li r5,3 | |
185 | ld r6,VCPU_DABR(r4) | |
186 | mtspr SPRN_DABRX,r5 | |
187 | mtspr SPRN_DABR,r6 | |
188 | ||
189 | /* Restore AMR and UAMOR, set AMOR to all 1s */ | |
190 | ld r5,VCPU_AMR(r4) | |
191 | ld r6,VCPU_UAMOR(r4) | |
192 | li r7,-1 | |
193 | mtspr SPRN_AMR,r5 | |
194 | mtspr SPRN_UAMOR,r6 | |
195 | mtspr SPRN_AMOR,r7 | |
196 | ||
197 | /* Clear out SLB */ | |
198 | li r6,0 | |
199 | slbmte r6,r6 | |
200 | slbia | |
201 | ptesync | |
202 | ||
203 | /* Switch to guest partition. */ | |
204 | ld r9,VCPU_KVM(r4) /* pointer to struct kvm */ | |
205 | ld r6,KVM_SDR1(r9) | |
206 | lwz r7,KVM_LPID(r9) | |
207 | li r0,LPID_RSVD /* switch to reserved LPID */ | |
208 | mtspr SPRN_LPID,r0 | |
209 | ptesync | |
210 | mtspr SPRN_SDR1,r6 /* switch to partition page table */ | |
211 | mtspr SPRN_LPID,r7 | |
212 | isync | |
213 | ld r8,VCPU_LPCR(r4) | |
214 | mtspr SPRN_LPCR,r8 | |
215 | isync | |
216 | ||
217 | /* Check if HDEC expires soon */ | |
218 | mfspr r3,SPRN_HDEC | |
219 | cmpwi r3,10 | |
220 | li r12,BOOK3S_INTERRUPT_HV_DECREMENTER | |
221 | mr r9,r4 | |
222 | blt hdec_soon | |
223 | ||
224 | /* | |
225 | * Invalidate the TLB if we could possibly have stale TLB | |
226 | * entries for this partition on this core due to the use | |
227 | * of tlbiel. | |
228 | */ | |
229 | ld r9,VCPU_KVM(r4) /* pointer to struct kvm */ | |
230 | lwz r5,VCPU_VCPUID(r4) | |
231 | lhz r6,PACAPACAINDEX(r13) | |
232 | lhz r8,VCPU_LAST_CPU(r4) | |
233 | sldi r7,r6,1 /* see if this is the same vcpu */ | |
234 | add r7,r7,r9 /* as last ran on this pcpu */ | |
235 | lhz r0,KVM_LAST_VCPU(r7) | |
236 | cmpw r6,r8 /* on the same cpu core as last time? */ | |
237 | bne 3f | |
238 | cmpw r0,r5 /* same vcpu as this core last ran? */ | |
239 | beq 1f | |
240 | 3: sth r6,VCPU_LAST_CPU(r4) /* if not, invalidate partition TLB */ | |
241 | sth r5,KVM_LAST_VCPU(r7) | |
242 | li r6,128 | |
243 | mtctr r6 | |
244 | li r7,0x800 /* IS field = 0b10 */ | |
245 | ptesync | |
246 | 2: tlbiel r7 | |
247 | addi r7,r7,0x1000 | |
248 | bdnz 2b | |
249 | ptesync | |
250 | 1: | |
251 | ||
252 | /* Save purr/spurr */ | |
253 | mfspr r5,SPRN_PURR | |
254 | mfspr r6,SPRN_SPURR | |
255 | std r5,HSTATE_PURR(r13) | |
256 | std r6,HSTATE_SPURR(r13) | |
257 | ld r7,VCPU_PURR(r4) | |
258 | ld r8,VCPU_SPURR(r4) | |
259 | mtspr SPRN_PURR,r7 | |
260 | mtspr SPRN_SPURR,r8 | |
261 | ||
262 | /* Load up guest SLB entries */ | |
263 | lwz r5,VCPU_SLB_MAX(r4) | |
264 | cmpwi r5,0 | |
265 | beq 9f | |
266 | mtctr r5 | |
267 | addi r6,r4,VCPU_SLB | |
268 | 1: ld r8,VCPU_SLB_E(r6) | |
269 | ld r9,VCPU_SLB_V(r6) | |
270 | slbmte r9,r8 | |
271 | addi r6,r6,VCPU_SLB_SIZE | |
272 | bdnz 1b | |
273 | 9: | |
274 | ||
275 | /* Restore state of CTRL run bit; assume 1 on entry */ | |
276 | lwz r5,VCPU_CTRL(r4) | |
277 | andi. r5,r5,1 | |
278 | bne 4f | |
279 | mfspr r6,SPRN_CTRLF | |
280 | clrrdi r6,r6,1 | |
281 | mtspr SPRN_CTRLT,r6 | |
282 | 4: | |
283 | ld r6, VCPU_CTR(r4) | |
284 | lwz r7, VCPU_XER(r4) | |
285 | ||
286 | mtctr r6 | |
287 | mtxer r7 | |
288 | ||
289 | /* Move SRR0 and SRR1 into the respective regs */ | |
290 | ld r6, VCPU_SRR0(r4) | |
291 | ld r7, VCPU_SRR1(r4) | |
292 | mtspr SPRN_SRR0, r6 | |
293 | mtspr SPRN_SRR1, r7 | |
294 | ||
295 | ld r10, VCPU_PC(r4) | |
296 | ||
297 | ld r11, VCPU_MSR(r4) /* r10 = vcpu->arch.msr & ~MSR_HV */ | |
298 | rldicl r11, r11, 63 - MSR_HV_LG, 1 | |
299 | rotldi r11, r11, 1 + MSR_HV_LG | |
300 | ori r11, r11, MSR_ME | |
301 | ||
302 | fast_guest_return: | |
303 | mtspr SPRN_HSRR0,r10 | |
304 | mtspr SPRN_HSRR1,r11 | |
305 | ||
306 | /* Activate guest mode, so faults get handled by KVM */ | |
307 | li r9, KVM_GUEST_MODE_GUEST | |
308 | stb r9, HSTATE_IN_GUEST(r13) | |
309 | ||
310 | /* Enter guest */ | |
311 | ||
312 | ld r5, VCPU_LR(r4) | |
313 | lwz r6, VCPU_CR(r4) | |
314 | mtlr r5 | |
315 | mtcr r6 | |
316 | ||
317 | ld r0, VCPU_GPR(r0)(r4) | |
318 | ld r1, VCPU_GPR(r1)(r4) | |
319 | ld r2, VCPU_GPR(r2)(r4) | |
320 | ld r3, VCPU_GPR(r3)(r4) | |
321 | ld r5, VCPU_GPR(r5)(r4) | |
322 | ld r6, VCPU_GPR(r6)(r4) | |
323 | ld r7, VCPU_GPR(r7)(r4) | |
324 | ld r8, VCPU_GPR(r8)(r4) | |
325 | ld r9, VCPU_GPR(r9)(r4) | |
326 | ld r10, VCPU_GPR(r10)(r4) | |
327 | ld r11, VCPU_GPR(r11)(r4) | |
328 | ld r12, VCPU_GPR(r12)(r4) | |
329 | ld r13, VCPU_GPR(r13)(r4) | |
330 | ||
331 | ld r4, VCPU_GPR(r4)(r4) | |
332 | ||
333 | hrfid | |
334 | b . | |
335 | ||
336 | /****************************************************************************** | |
337 | * * | |
338 | * Exit code * | |
339 | * * | |
340 | *****************************************************************************/ | |
341 | ||
342 | /* | |
343 | * We come here from the first-level interrupt handlers. | |
344 | */ | |
345 | .globl kvmppc_interrupt | |
346 | kvmppc_interrupt: | |
347 | /* | |
348 | * Register contents: | |
349 | * R12 = interrupt vector | |
350 | * R13 = PACA | |
351 | * guest CR, R12 saved in shadow VCPU SCRATCH1/0 | |
352 | * guest R13 saved in SPRN_SCRATCH0 | |
353 | */ | |
354 | /* abuse host_r2 as third scratch area; we get r2 from PACATOC(r13) */ | |
355 | std r9, HSTATE_HOST_R2(r13) | |
356 | ld r9, HSTATE_KVM_VCPU(r13) | |
357 | ||
358 | /* Save registers */ | |
359 | ||
360 | std r0, VCPU_GPR(r0)(r9) | |
361 | std r1, VCPU_GPR(r1)(r9) | |
362 | std r2, VCPU_GPR(r2)(r9) | |
363 | std r3, VCPU_GPR(r3)(r9) | |
364 | std r4, VCPU_GPR(r4)(r9) | |
365 | std r5, VCPU_GPR(r5)(r9) | |
366 | std r6, VCPU_GPR(r6)(r9) | |
367 | std r7, VCPU_GPR(r7)(r9) | |
368 | std r8, VCPU_GPR(r8)(r9) | |
369 | ld r0, HSTATE_HOST_R2(r13) | |
370 | std r0, VCPU_GPR(r9)(r9) | |
371 | std r10, VCPU_GPR(r10)(r9) | |
372 | std r11, VCPU_GPR(r11)(r9) | |
373 | ld r3, HSTATE_SCRATCH0(r13) | |
374 | lwz r4, HSTATE_SCRATCH1(r13) | |
375 | std r3, VCPU_GPR(r12)(r9) | |
376 | stw r4, VCPU_CR(r9) | |
377 | ||
378 | /* Restore R1/R2 so we can handle faults */ | |
379 | ld r1, HSTATE_HOST_R1(r13) | |
380 | ld r2, PACATOC(r13) | |
381 | ||
382 | mfspr r10, SPRN_SRR0 | |
383 | mfspr r11, SPRN_SRR1 | |
384 | std r10, VCPU_SRR0(r9) | |
385 | std r11, VCPU_SRR1(r9) | |
386 | andi. r0, r12, 2 /* need to read HSRR0/1? */ | |
387 | beq 1f | |
388 | mfspr r10, SPRN_HSRR0 | |
389 | mfspr r11, SPRN_HSRR1 | |
390 | clrrdi r12, r12, 2 | |
391 | 1: std r10, VCPU_PC(r9) | |
392 | std r11, VCPU_MSR(r9) | |
393 | ||
394 | GET_SCRATCH0(r3) | |
395 | mflr r4 | |
396 | std r3, VCPU_GPR(r13)(r9) | |
397 | std r4, VCPU_LR(r9) | |
398 | ||
399 | /* Unset guest mode */ | |
400 | li r0, KVM_GUEST_MODE_NONE | |
401 | stb r0, HSTATE_IN_GUEST(r13) | |
402 | ||
403 | stw r12,VCPU_TRAP(r9) | |
404 | ||
405 | /* See if this is a leftover HDEC interrupt */ | |
406 | cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER | |
407 | bne 2f | |
408 | mfspr r3,SPRN_HDEC | |
409 | cmpwi r3,0 | |
410 | bge ignore_hdec | |
411 | 2: | |
a8606e20 PM |
412 | /* See if this is something we can handle in real mode */ |
413 | cmpwi r12,BOOK3S_INTERRUPT_SYSCALL | |
414 | beq hcall_try_real_mode | |
415 | hcall_real_cont: | |
de56a948 PM |
416 | |
417 | /* Check for mediated interrupts (could be done earlier really ...) */ | |
418 | cmpwi r12,BOOK3S_INTERRUPT_EXTERNAL | |
419 | bne+ 1f | |
420 | ld r5,VCPU_LPCR(r9) | |
421 | andi. r0,r11,MSR_EE | |
422 | beq 1f | |
423 | andi. r0,r5,LPCR_MER | |
424 | bne bounce_ext_interrupt | |
425 | 1: | |
426 | ||
427 | /* Save DEC */ | |
428 | mfspr r5,SPRN_DEC | |
429 | mftb r6 | |
430 | extsw r5,r5 | |
431 | add r5,r5,r6 | |
432 | std r5,VCPU_DEC_EXPIRES(r9) | |
433 | ||
434 | /* Save HEIR (HV emulation assist reg) in last_inst | |
435 | if this is an HEI (HV emulation interrupt, e40) */ | |
436 | li r3,-1 | |
437 | cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST | |
438 | bne 11f | |
439 | mfspr r3,SPRN_HEIR | |
440 | 11: stw r3,VCPU_LAST_INST(r9) | |
441 | ||
442 | /* Save more register state */ | |
443 | mfxer r5 | |
444 | mfdar r6 | |
445 | mfdsisr r7 | |
446 | mfctr r8 | |
447 | ||
448 | stw r5, VCPU_XER(r9) | |
449 | std r6, VCPU_DAR(r9) | |
450 | stw r7, VCPU_DSISR(r9) | |
451 | std r8, VCPU_CTR(r9) | |
452 | /* grab HDAR & HDSISR if HV data storage interrupt (HDSI) */ | |
453 | cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE | |
454 | beq 6f | |
455 | 7: std r6, VCPU_FAULT_DAR(r9) | |
456 | stw r7, VCPU_FAULT_DSISR(r9) | |
457 | ||
458 | /* Save guest CTRL register, set runlatch to 1 */ | |
459 | mfspr r6,SPRN_CTRLF | |
460 | stw r6,VCPU_CTRL(r9) | |
461 | andi. r0,r6,1 | |
462 | bne 4f | |
463 | ori r6,r6,1 | |
464 | mtspr SPRN_CTRLT,r6 | |
465 | 4: | |
466 | /* Read the guest SLB and save it away */ | |
467 | lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */ | |
468 | mtctr r0 | |
469 | li r6,0 | |
470 | addi r7,r9,VCPU_SLB | |
471 | li r5,0 | |
472 | 1: slbmfee r8,r6 | |
473 | andis. r0,r8,SLB_ESID_V@h | |
474 | beq 2f | |
475 | add r8,r8,r6 /* put index in */ | |
476 | slbmfev r3,r6 | |
477 | std r8,VCPU_SLB_E(r7) | |
478 | std r3,VCPU_SLB_V(r7) | |
479 | addi r7,r7,VCPU_SLB_SIZE | |
480 | addi r5,r5,1 | |
481 | 2: addi r6,r6,1 | |
482 | bdnz 1b | |
483 | stw r5,VCPU_SLB_MAX(r9) | |
484 | ||
485 | /* | |
486 | * Save the guest PURR/SPURR | |
487 | */ | |
488 | mfspr r5,SPRN_PURR | |
489 | mfspr r6,SPRN_SPURR | |
490 | ld r7,VCPU_PURR(r9) | |
491 | ld r8,VCPU_SPURR(r9) | |
492 | std r5,VCPU_PURR(r9) | |
493 | std r6,VCPU_SPURR(r9) | |
494 | subf r5,r7,r5 | |
495 | subf r6,r8,r6 | |
496 | ||
497 | /* | |
498 | * Restore host PURR/SPURR and add guest times | |
499 | * so that the time in the guest gets accounted. | |
500 | */ | |
501 | ld r3,HSTATE_PURR(r13) | |
502 | ld r4,HSTATE_SPURR(r13) | |
503 | add r3,r3,r5 | |
504 | add r4,r4,r6 | |
505 | mtspr SPRN_PURR,r3 | |
506 | mtspr SPRN_SPURR,r4 | |
507 | ||
508 | /* Clear out SLB */ | |
509 | li r5,0 | |
510 | slbmte r5,r5 | |
511 | slbia | |
512 | ptesync | |
513 | ||
514 | hdec_soon: | |
515 | /* Switch back to host partition */ | |
516 | ld r4,VCPU_KVM(r9) /* pointer to struct kvm */ | |
517 | ld r6,KVM_HOST_SDR1(r4) | |
518 | lwz r7,KVM_HOST_LPID(r4) | |
519 | li r8,LPID_RSVD /* switch to reserved LPID */ | |
520 | mtspr SPRN_LPID,r8 | |
521 | ptesync | |
522 | mtspr SPRN_SDR1,r6 /* switch to partition page table */ | |
523 | mtspr SPRN_LPID,r7 | |
524 | isync | |
525 | lis r8,0x7fff /* MAX_INT@h */ | |
526 | mtspr SPRN_HDEC,r8 | |
527 | ||
528 | ld r8,KVM_HOST_LPCR(r4) | |
529 | mtspr SPRN_LPCR,r8 | |
530 | isync | |
531 | ||
532 | /* load host SLB entries */ | |
533 | ld r8,PACA_SLBSHADOWPTR(r13) | |
534 | ||
535 | .rept SLB_NUM_BOLTED | |
536 | ld r5,SLBSHADOW_SAVEAREA(r8) | |
537 | ld r6,SLBSHADOW_SAVEAREA+8(r8) | |
538 | andis. r7,r5,SLB_ESID_V@h | |
539 | beq 1f | |
540 | slbmte r6,r5 | |
541 | 1: addi r8,r8,16 | |
542 | .endr | |
543 | ||
544 | /* Save and reset AMR and UAMOR before turning on the MMU */ | |
545 | mfspr r5,SPRN_AMR | |
546 | mfspr r6,SPRN_UAMOR | |
547 | std r5,VCPU_AMR(r9) | |
548 | std r6,VCPU_UAMOR(r9) | |
549 | li r6,0 | |
550 | mtspr SPRN_AMR,r6 | |
551 | ||
552 | /* Restore host DABR and DABRX */ | |
553 | ld r5,HSTATE_DABR(r13) | |
554 | li r6,7 | |
555 | mtspr SPRN_DABR,r5 | |
556 | mtspr SPRN_DABRX,r6 | |
557 | ||
558 | /* Switch DSCR back to host value */ | |
559 | mfspr r8, SPRN_DSCR | |
560 | ld r7, HSTATE_DSCR(r13) | |
561 | std r8, VCPU_DSCR(r7) | |
562 | mtspr SPRN_DSCR, r7 | |
563 | ||
564 | /* Save non-volatile GPRs */ | |
565 | std r14, VCPU_GPR(r14)(r9) | |
566 | std r15, VCPU_GPR(r15)(r9) | |
567 | std r16, VCPU_GPR(r16)(r9) | |
568 | std r17, VCPU_GPR(r17)(r9) | |
569 | std r18, VCPU_GPR(r18)(r9) | |
570 | std r19, VCPU_GPR(r19)(r9) | |
571 | std r20, VCPU_GPR(r20)(r9) | |
572 | std r21, VCPU_GPR(r21)(r9) | |
573 | std r22, VCPU_GPR(r22)(r9) | |
574 | std r23, VCPU_GPR(r23)(r9) | |
575 | std r24, VCPU_GPR(r24)(r9) | |
576 | std r25, VCPU_GPR(r25)(r9) | |
577 | std r26, VCPU_GPR(r26)(r9) | |
578 | std r27, VCPU_GPR(r27)(r9) | |
579 | std r28, VCPU_GPR(r28)(r9) | |
580 | std r29, VCPU_GPR(r29)(r9) | |
581 | std r30, VCPU_GPR(r30)(r9) | |
582 | std r31, VCPU_GPR(r31)(r9) | |
583 | ||
584 | /* Save SPRGs */ | |
585 | mfspr r3, SPRN_SPRG0 | |
586 | mfspr r4, SPRN_SPRG1 | |
587 | mfspr r5, SPRN_SPRG2 | |
588 | mfspr r6, SPRN_SPRG3 | |
589 | std r3, VCPU_SPRG0(r9) | |
590 | std r4, VCPU_SPRG1(r9) | |
591 | std r5, VCPU_SPRG2(r9) | |
592 | std r6, VCPU_SPRG3(r9) | |
593 | ||
a8606e20 PM |
594 | /* Increment yield count if they have a VPA */ |
595 | ld r8, VCPU_VPA(r9) /* do they have a VPA? */ | |
596 | cmpdi r8, 0 | |
597 | beq 25f | |
598 | lwz r3, LPPACA_YIELDCOUNT(r8) | |
599 | addi r3, r3, 1 | |
600 | stw r3, LPPACA_YIELDCOUNT(r8) | |
601 | 25: | |
602 | /* Save PMU registers if requested */ | |
603 | /* r8 and cr0.eq are live here */ | |
de56a948 PM |
604 | li r3, 1 |
605 | sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */ | |
606 | mfspr r4, SPRN_MMCR0 /* save MMCR0 */ | |
607 | mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */ | |
608 | isync | |
a8606e20 PM |
609 | beq 21f /* if no VPA, save PMU stuff anyway */ |
610 | lbz r7, LPPACA_PMCINUSE(r8) | |
611 | cmpwi r7, 0 /* did they ask for PMU stuff to be saved? */ | |
612 | bne 21f | |
613 | std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */ | |
614 | b 22f | |
615 | 21: mfspr r5, SPRN_MMCR1 | |
de56a948 PM |
616 | mfspr r6, SPRN_MMCRA |
617 | std r4, VCPU_MMCR(r9) | |
618 | std r5, VCPU_MMCR + 8(r9) | |
619 | std r6, VCPU_MMCR + 16(r9) | |
620 | mfspr r3, SPRN_PMC1 | |
621 | mfspr r4, SPRN_PMC2 | |
622 | mfspr r5, SPRN_PMC3 | |
623 | mfspr r6, SPRN_PMC4 | |
624 | mfspr r7, SPRN_PMC5 | |
625 | mfspr r8, SPRN_PMC6 | |
626 | stw r3, VCPU_PMC(r9) | |
627 | stw r4, VCPU_PMC + 4(r9) | |
628 | stw r5, VCPU_PMC + 8(r9) | |
629 | stw r6, VCPU_PMC + 12(r9) | |
630 | stw r7, VCPU_PMC + 16(r9) | |
631 | stw r8, VCPU_PMC + 20(r9) | |
632 | 22: | |
633 | /* save FP state */ | |
634 | mr r3, r9 | |
635 | bl .kvmppc_save_fp | |
636 | ||
637 | /* | |
638 | * Reload DEC. HDEC interrupts were disabled when | |
639 | * we reloaded the host's LPCR value. | |
640 | */ | |
641 | ld r3, HSTATE_DECEXP(r13) | |
642 | mftb r4 | |
643 | subf r4, r4, r3 | |
644 | mtspr SPRN_DEC, r4 | |
645 | ||
646 | /* Reload the host's PMU registers */ | |
647 | ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */ | |
648 | lbz r4, LPPACA_PMCINUSE(r3) | |
649 | cmpwi r4, 0 | |
650 | beq 23f /* skip if not */ | |
651 | lwz r3, HSTATE_PMC(r13) | |
652 | lwz r4, HSTATE_PMC + 4(r13) | |
653 | lwz r5, HSTATE_PMC + 8(r13) | |
654 | lwz r6, HSTATE_PMC + 12(r13) | |
655 | lwz r8, HSTATE_PMC + 16(r13) | |
656 | lwz r9, HSTATE_PMC + 20(r13) | |
657 | mtspr SPRN_PMC1, r3 | |
658 | mtspr SPRN_PMC2, r4 | |
659 | mtspr SPRN_PMC3, r5 | |
660 | mtspr SPRN_PMC4, r6 | |
661 | mtspr SPRN_PMC5, r8 | |
662 | mtspr SPRN_PMC6, r9 | |
663 | ld r3, HSTATE_MMCR(r13) | |
664 | ld r4, HSTATE_MMCR + 8(r13) | |
665 | ld r5, HSTATE_MMCR + 16(r13) | |
666 | mtspr SPRN_MMCR1, r4 | |
667 | mtspr SPRN_MMCRA, r5 | |
668 | mtspr SPRN_MMCR0, r3 | |
669 | isync | |
670 | 23: | |
671 | /* | |
672 | * For external and machine check interrupts, we need | |
673 | * to call the Linux handler to process the interrupt. | |
674 | * We do that by jumping to the interrupt vector address | |
675 | * which we have in r12. The [h]rfid at the end of the | |
676 | * handler will return to the book3s_hv_interrupts.S code. | |
677 | * For other interrupts we do the rfid to get back | |
678 | * to the book3s_interrupts.S code here. | |
679 | */ | |
680 | ld r8, HSTATE_VMHANDLER(r13) | |
681 | ld r7, HSTATE_HOST_MSR(r13) | |
682 | ||
683 | cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL | |
684 | beq 11f | |
685 | cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK | |
686 | ||
687 | /* RFI into the highmem handler, or branch to interrupt handler */ | |
688 | mfmsr r6 | |
689 | mtctr r12 | |
690 | li r0, MSR_RI | |
691 | andc r6, r6, r0 | |
692 | mtmsrd r6, 1 /* Clear RI in MSR */ | |
693 | mtsrr0 r8 | |
694 | mtsrr1 r7 | |
695 | beqctr | |
696 | RFI | |
697 | ||
698 | 11: mtspr SPRN_HSRR0, r8 | |
699 | mtspr SPRN_HSRR1, r7 | |
700 | ba 0x500 | |
701 | ||
702 | 6: mfspr r6,SPRN_HDAR | |
703 | mfspr r7,SPRN_HDSISR | |
704 | b 7b | |
705 | ||
a8606e20 PM |
706 | /* |
707 | * Try to handle an hcall in real mode. | |
708 | * Returns to the guest if we handle it, or continues on up to | |
709 | * the kernel if we can't (i.e. if we don't have a handler for | |
710 | * it, or if the handler returns H_TOO_HARD). | |
711 | */ | |
712 | .globl hcall_try_real_mode | |
713 | hcall_try_real_mode: | |
714 | ld r3,VCPU_GPR(r3)(r9) | |
715 | andi. r0,r11,MSR_PR | |
716 | bne hcall_real_cont | |
717 | clrrdi r3,r3,2 | |
718 | cmpldi r3,hcall_real_table_end - hcall_real_table | |
719 | bge hcall_real_cont | |
720 | LOAD_REG_ADDR(r4, hcall_real_table) | |
721 | lwzx r3,r3,r4 | |
722 | cmpwi r3,0 | |
723 | beq hcall_real_cont | |
724 | add r3,r3,r4 | |
725 | mtctr r3 | |
726 | mr r3,r9 /* get vcpu pointer */ | |
727 | ld r4,VCPU_GPR(r4)(r9) | |
728 | bctrl | |
729 | cmpdi r3,H_TOO_HARD | |
730 | beq hcall_real_fallback | |
731 | ld r4,HSTATE_KVM_VCPU(r13) | |
732 | std r3,VCPU_GPR(r3)(r4) | |
733 | ld r10,VCPU_PC(r4) | |
734 | ld r11,VCPU_MSR(r4) | |
735 | b fast_guest_return | |
736 | ||
737 | /* We've attempted a real mode hcall, but it's punted it back | |
738 | * to userspace. We need to restore some clobbered volatiles | |
739 | * before resuming the pass-it-to-qemu path */ | |
740 | hcall_real_fallback: | |
741 | li r12,BOOK3S_INTERRUPT_SYSCALL | |
742 | ld r9, HSTATE_KVM_VCPU(r13) | |
743 | ld r11, VCPU_MSR(r9) | |
744 | ||
745 | b hcall_real_cont | |
746 | ||
747 | .globl hcall_real_table | |
748 | hcall_real_table: | |
749 | .long 0 /* 0 - unused */ | |
750 | .long .kvmppc_h_remove - hcall_real_table | |
751 | .long .kvmppc_h_enter - hcall_real_table | |
752 | .long .kvmppc_h_read - hcall_real_table | |
753 | .long 0 /* 0x10 - H_CLEAR_MOD */ | |
754 | .long 0 /* 0x14 - H_CLEAR_REF */ | |
755 | .long .kvmppc_h_protect - hcall_real_table | |
756 | .long 0 /* 0x1c - H_GET_TCE */ | |
54738c09 | 757 | .long .kvmppc_h_put_tce - hcall_real_table |
a8606e20 PM |
758 | .long 0 /* 0x24 - H_SET_SPRG0 */ |
759 | .long .kvmppc_h_set_dabr - hcall_real_table | |
760 | .long 0 /* 0x2c */ | |
761 | .long 0 /* 0x30 */ | |
762 | .long 0 /* 0x34 */ | |
763 | .long 0 /* 0x38 */ | |
764 | .long 0 /* 0x3c */ | |
765 | .long 0 /* 0x40 */ | |
766 | .long 0 /* 0x44 */ | |
767 | .long 0 /* 0x48 */ | |
768 | .long 0 /* 0x4c */ | |
769 | .long 0 /* 0x50 */ | |
770 | .long 0 /* 0x54 */ | |
771 | .long 0 /* 0x58 */ | |
772 | .long 0 /* 0x5c */ | |
773 | .long 0 /* 0x60 */ | |
774 | .long 0 /* 0x64 */ | |
775 | .long 0 /* 0x68 */ | |
776 | .long 0 /* 0x6c */ | |
777 | .long 0 /* 0x70 */ | |
778 | .long 0 /* 0x74 */ | |
779 | .long 0 /* 0x78 */ | |
780 | .long 0 /* 0x7c */ | |
781 | .long 0 /* 0x80 */ | |
782 | .long 0 /* 0x84 */ | |
783 | .long 0 /* 0x88 */ | |
784 | .long 0 /* 0x8c */ | |
785 | .long 0 /* 0x90 */ | |
786 | .long 0 /* 0x94 */ | |
787 | .long 0 /* 0x98 */ | |
788 | .long 0 /* 0x9c */ | |
789 | .long 0 /* 0xa0 */ | |
790 | .long 0 /* 0xa4 */ | |
791 | .long 0 /* 0xa8 */ | |
792 | .long 0 /* 0xac */ | |
793 | .long 0 /* 0xb0 */ | |
794 | .long 0 /* 0xb4 */ | |
795 | .long 0 /* 0xb8 */ | |
796 | .long 0 /* 0xbc */ | |
797 | .long 0 /* 0xc0 */ | |
798 | .long 0 /* 0xc4 */ | |
799 | .long 0 /* 0xc8 */ | |
800 | .long 0 /* 0xcc */ | |
801 | .long 0 /* 0xd0 */ | |
802 | .long 0 /* 0xd4 */ | |
803 | .long 0 /* 0xd8 */ | |
804 | .long 0 /* 0xdc */ | |
805 | .long 0 /* 0xe0 */ | |
806 | .long 0 /* 0xe4 */ | |
807 | .long 0 /* 0xe8 */ | |
808 | .long 0 /* 0xec */ | |
809 | .long 0 /* 0xf0 */ | |
810 | .long 0 /* 0xf4 */ | |
811 | .long 0 /* 0xf8 */ | |
812 | .long 0 /* 0xfc */ | |
813 | .long 0 /* 0x100 */ | |
814 | .long 0 /* 0x104 */ | |
815 | .long 0 /* 0x108 */ | |
816 | .long 0 /* 0x10c */ | |
817 | .long 0 /* 0x110 */ | |
818 | .long 0 /* 0x114 */ | |
819 | .long 0 /* 0x118 */ | |
820 | .long 0 /* 0x11c */ | |
821 | .long 0 /* 0x120 */ | |
822 | .long .kvmppc_h_bulk_remove - hcall_real_table | |
823 | hcall_real_table_end: | |
824 | ||
de56a948 PM |
825 | ignore_hdec: |
826 | mr r4,r9 | |
827 | b fast_guest_return | |
828 | ||
829 | bounce_ext_interrupt: | |
830 | mr r4,r9 | |
831 | mtspr SPRN_SRR0,r10 | |
832 | mtspr SPRN_SRR1,r11 | |
833 | li r10,BOOK3S_INTERRUPT_EXTERNAL | |
834 | LOAD_REG_IMMEDIATE(r11,MSR_SF | MSR_ME); | |
835 | b fast_guest_return | |
836 | ||
a8606e20 PM |
837 | _GLOBAL(kvmppc_h_set_dabr) |
838 | std r4,VCPU_DABR(r3) | |
839 | mtspr SPRN_DABR,r4 | |
840 | li r3,0 | |
841 | blr | |
842 | ||
de56a948 PM |
843 | /* |
844 | * Save away FP, VMX and VSX registers. | |
845 | * r3 = vcpu pointer | |
a8606e20 | 846 | */ |
de56a948 PM |
847 | _GLOBAL(kvmppc_save_fp) |
848 | mfmsr r9 | |
849 | ori r8,r9,MSR_FP | |
850 | #ifdef CONFIG_ALTIVEC | |
851 | BEGIN_FTR_SECTION | |
852 | oris r8,r8,MSR_VEC@h | |
853 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | |
854 | #endif | |
855 | #ifdef CONFIG_VSX | |
856 | BEGIN_FTR_SECTION | |
857 | oris r8,r8,MSR_VSX@h | |
858 | END_FTR_SECTION_IFSET(CPU_FTR_VSX) | |
859 | #endif | |
860 | mtmsrd r8 | |
861 | isync | |
862 | #ifdef CONFIG_VSX | |
863 | BEGIN_FTR_SECTION | |
864 | reg = 0 | |
865 | .rept 32 | |
866 | li r6,reg*16+VCPU_VSRS | |
867 | stxvd2x reg,r6,r3 | |
868 | reg = reg + 1 | |
869 | .endr | |
870 | FTR_SECTION_ELSE | |
871 | #endif | |
872 | reg = 0 | |
873 | .rept 32 | |
874 | stfd reg,reg*8+VCPU_FPRS(r3) | |
875 | reg = reg + 1 | |
876 | .endr | |
877 | #ifdef CONFIG_VSX | |
878 | ALT_FTR_SECTION_END_IFSET(CPU_FTR_VSX) | |
879 | #endif | |
880 | mffs fr0 | |
881 | stfd fr0,VCPU_FPSCR(r3) | |
882 | ||
883 | #ifdef CONFIG_ALTIVEC | |
884 | BEGIN_FTR_SECTION | |
885 | reg = 0 | |
886 | .rept 32 | |
887 | li r6,reg*16+VCPU_VRS | |
888 | stvx reg,r6,r3 | |
889 | reg = reg + 1 | |
890 | .endr | |
891 | mfvscr vr0 | |
892 | li r6,VCPU_VSCR | |
893 | stvx vr0,r6,r3 | |
894 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | |
895 | #endif | |
896 | mfspr r6,SPRN_VRSAVE | |
897 | stw r6,VCPU_VRSAVE(r3) | |
898 | mtmsrd r9 | |
899 | isync | |
900 | blr | |
901 | ||
902 | /* | |
903 | * Load up FP, VMX and VSX registers | |
904 | * r4 = vcpu pointer | |
905 | */ | |
906 | .globl kvmppc_load_fp | |
907 | kvmppc_load_fp: | |
908 | mfmsr r9 | |
909 | ori r8,r9,MSR_FP | |
910 | #ifdef CONFIG_ALTIVEC | |
911 | BEGIN_FTR_SECTION | |
912 | oris r8,r8,MSR_VEC@h | |
913 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | |
914 | #endif | |
915 | #ifdef CONFIG_VSX | |
916 | BEGIN_FTR_SECTION | |
917 | oris r8,r8,MSR_VSX@h | |
918 | END_FTR_SECTION_IFSET(CPU_FTR_VSX) | |
919 | #endif | |
920 | mtmsrd r8 | |
921 | isync | |
922 | lfd fr0,VCPU_FPSCR(r4) | |
923 | MTFSF_L(fr0) | |
924 | #ifdef CONFIG_VSX | |
925 | BEGIN_FTR_SECTION | |
926 | reg = 0 | |
927 | .rept 32 | |
928 | li r7,reg*16+VCPU_VSRS | |
929 | lxvd2x reg,r7,r4 | |
930 | reg = reg + 1 | |
931 | .endr | |
932 | FTR_SECTION_ELSE | |
933 | #endif | |
934 | reg = 0 | |
935 | .rept 32 | |
936 | lfd reg,reg*8+VCPU_FPRS(r4) | |
937 | reg = reg + 1 | |
938 | .endr | |
939 | #ifdef CONFIG_VSX | |
940 | ALT_FTR_SECTION_END_IFSET(CPU_FTR_VSX) | |
941 | #endif | |
942 | ||
943 | #ifdef CONFIG_ALTIVEC | |
944 | BEGIN_FTR_SECTION | |
945 | li r7,VCPU_VSCR | |
946 | lvx vr0,r7,r4 | |
947 | mtvscr vr0 | |
948 | reg = 0 | |
949 | .rept 32 | |
950 | li r7,reg*16+VCPU_VRS | |
951 | lvx reg,r7,r4 | |
952 | reg = reg + 1 | |
953 | .endr | |
954 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | |
955 | #endif | |
956 | lwz r7,VCPU_VRSAVE(r4) | |
957 | mtspr SPRN_VRSAVE,r7 | |
958 | blr |