2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
13 * Derived from book3s_rmhandlers.S and other files, which are:
15 * Copyright SUSE Linux Products GmbH 2009
17 * Authors: Alexander Graf <agraf@suse.de>
20 #include <asm/ppc_asm.h>
21 #include <asm/kvm_asm.h>
24 #include <asm/asm-offsets.h>
25 #include <asm/exception-64s.h>
27 /*****************************************************************************
29 * Real Mode handlers that need to be in the linear mapping *
31 ****************************************************************************/
33 #define SHADOW_VCPU_OFF PACA_KVM_SVCPU
35 .globl kvmppc_skip_interrupt
36 kvmppc_skip_interrupt:
44 .globl kvmppc_skip_Hinterrupt
45 kvmppc_skip_Hinterrupt:
54 * Call kvmppc_handler_trampoline_enter in real mode.
55 * Must be called with interrupts hard-disabled.
59 * LR = return address to continue at after eventually re-enabling MMU
61 _GLOBAL(kvmppc_hv_entry_trampoline)
63 LOAD_REG_ADDR(r5, kvmppc_hv_entry)
68 mtmsrd r0,1 /* clear RI in MSR */
74 #define VCPU_GPR(n) (VCPU_GPRS + (n * ULONG_SIZE))
76 /******************************************************************************
80 *****************************************************************************/
82 .global kvmppc_hv_entry
91 * all other volatile GPRS = free
94 std r0, HSTATE_VMHANDLER(r13)
96 ld r14, VCPU_GPR(r14)(r4)
97 ld r15, VCPU_GPR(r15)(r4)
98 ld r16, VCPU_GPR(r16)(r4)
99 ld r17, VCPU_GPR(r17)(r4)
100 ld r18, VCPU_GPR(r18)(r4)
101 ld r19, VCPU_GPR(r19)(r4)
102 ld r20, VCPU_GPR(r20)(r4)
103 ld r21, VCPU_GPR(r21)(r4)
104 ld r22, VCPU_GPR(r22)(r4)
105 ld r23, VCPU_GPR(r23)(r4)
106 ld r24, VCPU_GPR(r24)(r4)
107 ld r25, VCPU_GPR(r25)(r4)
108 ld r26, VCPU_GPR(r26)(r4)
109 ld r27, VCPU_GPR(r27)(r4)
110 ld r28, VCPU_GPR(r28)(r4)
111 ld r29, VCPU_GPR(r29)(r4)
112 ld r30, VCPU_GPR(r30)(r4)
113 ld r31, VCPU_GPR(r31)(r4)
115 /* Load guest PMU registers */
116 /* R4 is live here (vcpu pointer) */
118 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
119 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
121 lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */
122 lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */
123 lwz r6, VCPU_PMC + 8(r4)
124 lwz r7, VCPU_PMC + 12(r4)
125 lwz r8, VCPU_PMC + 16(r4)
126 lwz r9, VCPU_PMC + 20(r4)
134 ld r5, VCPU_MMCR + 8(r4)
135 ld r6, VCPU_MMCR + 16(r4)
141 /* Load up FP, VMX and VSX registers */
144 /* Switch DSCR to guest value */
149 * Set the decrementer to the guest decrementer.
151 ld r8,VCPU_DEC_EXPIRES(r4)
157 ld r5, VCPU_SPRG0(r4)
158 ld r6, VCPU_SPRG1(r4)
159 ld r7, VCPU_SPRG2(r4)
160 ld r8, VCPU_SPRG3(r4)
166 /* Save R1 in the PACA */
167 std r1, HSTATE_HOST_R1(r13)
169 /* Increment yield count if they have a VPA */
173 lwz r5, LPPACA_YIELDCOUNT(r3)
175 stw r5, LPPACA_YIELDCOUNT(r3)
177 /* Load up DAR and DSISR */
179 lwz r6, VCPU_DSISR(r4)
183 /* Set partition DABR */
189 /* Restore AMR and UAMOR, set AMOR to all 1s */
203 /* Switch to guest partition. */
204 ld r9,VCPU_KVM(r4) /* pointer to struct kvm */
207 li r0,LPID_RSVD /* switch to reserved LPID */
210 mtspr SPRN_SDR1,r6 /* switch to partition page table */
217 /* Check if HDEC expires soon */
220 li r12,BOOK3S_INTERRUPT_HV_DECREMENTER
225 * Invalidate the TLB if we could possibly have stale TLB
226 * entries for this partition on this core due to the use
229 ld r9,VCPU_KVM(r4) /* pointer to struct kvm */
230 lwz r5,VCPU_VCPUID(r4)
231 lhz r6,PACAPACAINDEX(r13)
232 lhz r8,VCPU_LAST_CPU(r4)
233 sldi r7,r6,1 /* see if this is the same vcpu */
234 add r7,r7,r9 /* as last ran on this pcpu */
235 lhz r0,KVM_LAST_VCPU(r7)
236 cmpw r6,r8 /* on the same cpu core as last time? */
238 cmpw r0,r5 /* same vcpu as this core last ran? */
240 3: sth r6,VCPU_LAST_CPU(r4) /* if not, invalidate partition TLB */
241 sth r5,KVM_LAST_VCPU(r7)
244 li r7,0x800 /* IS field = 0b10 */
252 /* Save purr/spurr */
255 std r5,HSTATE_PURR(r13)
256 std r6,HSTATE_SPURR(r13)
262 /* Load up guest SLB entries */
263 lwz r5,VCPU_SLB_MAX(r4)
268 1: ld r8,VCPU_SLB_E(r6)
271 addi r6,r6,VCPU_SLB_SIZE
275 /* Restore state of CTRL run bit; assume 1 on entry */
289 /* Move SRR0 and SRR1 into the respective regs */
297 ld r11, VCPU_MSR(r4) /* r10 = vcpu->arch.msr & ~MSR_HV */
298 rldicl r11, r11, 63 - MSR_HV_LG, 1
299 rotldi r11, r11, 1 + MSR_HV_LG
306 /* Activate guest mode, so faults get handled by KVM */
307 li r9, KVM_GUEST_MODE_GUEST
308 stb r9, HSTATE_IN_GUEST(r13)
317 ld r0, VCPU_GPR(r0)(r4)
318 ld r1, VCPU_GPR(r1)(r4)
319 ld r2, VCPU_GPR(r2)(r4)
320 ld r3, VCPU_GPR(r3)(r4)
321 ld r5, VCPU_GPR(r5)(r4)
322 ld r6, VCPU_GPR(r6)(r4)
323 ld r7, VCPU_GPR(r7)(r4)
324 ld r8, VCPU_GPR(r8)(r4)
325 ld r9, VCPU_GPR(r9)(r4)
326 ld r10, VCPU_GPR(r10)(r4)
327 ld r11, VCPU_GPR(r11)(r4)
328 ld r12, VCPU_GPR(r12)(r4)
329 ld r13, VCPU_GPR(r13)(r4)
331 ld r4, VCPU_GPR(r4)(r4)
336 /******************************************************************************
340 *****************************************************************************/
343 * We come here from the first-level interrupt handlers.
345 .globl kvmppc_interrupt
349 * R12 = interrupt vector
351 * guest CR, R12 saved in shadow VCPU SCRATCH1/0
352 * guest R13 saved in SPRN_SCRATCH0
354 /* abuse host_r2 as third scratch area; we get r2 from PACATOC(r13) */
355 std r9, HSTATE_HOST_R2(r13)
356 ld r9, HSTATE_KVM_VCPU(r13)
360 std r0, VCPU_GPR(r0)(r9)
361 std r1, VCPU_GPR(r1)(r9)
362 std r2, VCPU_GPR(r2)(r9)
363 std r3, VCPU_GPR(r3)(r9)
364 std r4, VCPU_GPR(r4)(r9)
365 std r5, VCPU_GPR(r5)(r9)
366 std r6, VCPU_GPR(r6)(r9)
367 std r7, VCPU_GPR(r7)(r9)
368 std r8, VCPU_GPR(r8)(r9)
369 ld r0, HSTATE_HOST_R2(r13)
370 std r0, VCPU_GPR(r9)(r9)
371 std r10, VCPU_GPR(r10)(r9)
372 std r11, VCPU_GPR(r11)(r9)
373 ld r3, HSTATE_SCRATCH0(r13)
374 lwz r4, HSTATE_SCRATCH1(r13)
375 std r3, VCPU_GPR(r12)(r9)
378 /* Restore R1/R2 so we can handle faults */
379 ld r1, HSTATE_HOST_R1(r13)
384 std r10, VCPU_SRR0(r9)
385 std r11, VCPU_SRR1(r9)
386 andi. r0, r12, 2 /* need to read HSRR0/1? */
388 mfspr r10, SPRN_HSRR0
389 mfspr r11, SPRN_HSRR1
391 1: std r10, VCPU_PC(r9)
392 std r11, VCPU_MSR(r9)
396 std r3, VCPU_GPR(r13)(r9)
399 /* Unset guest mode */
400 li r0, KVM_GUEST_MODE_NONE
401 stb r0, HSTATE_IN_GUEST(r13)
403 stw r12,VCPU_TRAP(r9)
405 /* See if this is a leftover HDEC interrupt */
406 cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
412 /* See if this is something we can handle in real mode */
413 cmpwi r12,BOOK3S_INTERRUPT_SYSCALL
414 beq hcall_try_real_mode
417 /* Check for mediated interrupts (could be done earlier really ...) */
418 cmpwi r12,BOOK3S_INTERRUPT_EXTERNAL
424 bne bounce_ext_interrupt
432 std r5,VCPU_DEC_EXPIRES(r9)
434 /* Save HEIR (HV emulation assist reg) in last_inst
435 if this is an HEI (HV emulation interrupt, e40) */
437 cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST
440 11: stw r3,VCPU_LAST_INST(r9)
442 /* Save more register state */
450 stw r7, VCPU_DSISR(r9)
452 /* grab HDAR & HDSISR if HV data storage interrupt (HDSI) */
453 cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE
455 7: std r6, VCPU_FAULT_DAR(r9)
456 stw r7, VCPU_FAULT_DSISR(r9)
458 /* Save guest CTRL register, set runlatch to 1 */
466 /* Read the guest SLB and save it away */
467 lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */
473 andis. r0,r8,SLB_ESID_V@h
475 add r8,r8,r6 /* put index in */
477 std r8,VCPU_SLB_E(r7)
478 std r3,VCPU_SLB_V(r7)
479 addi r7,r7,VCPU_SLB_SIZE
483 stw r5,VCPU_SLB_MAX(r9)
486 * Save the guest PURR/SPURR
493 std r6,VCPU_SPURR(r9)
498 * Restore host PURR/SPURR and add guest times
499 * so that the time in the guest gets accounted.
501 ld r3,HSTATE_PURR(r13)
502 ld r4,HSTATE_SPURR(r13)
515 /* Switch back to host partition */
516 ld r4,VCPU_KVM(r9) /* pointer to struct kvm */
517 ld r6,KVM_HOST_SDR1(r4)
518 lwz r7,KVM_HOST_LPID(r4)
519 li r8,LPID_RSVD /* switch to reserved LPID */
522 mtspr SPRN_SDR1,r6 /* switch to partition page table */
525 lis r8,0x7fff /* MAX_INT@h */
528 ld r8,KVM_HOST_LPCR(r4)
532 /* load host SLB entries */
533 ld r8,PACA_SLBSHADOWPTR(r13)
536 ld r5,SLBSHADOW_SAVEAREA(r8)
537 ld r6,SLBSHADOW_SAVEAREA+8(r8)
538 andis. r7,r5,SLB_ESID_V@h
544 /* Save and reset AMR and UAMOR before turning on the MMU */
548 std r6,VCPU_UAMOR(r9)
552 /* Restore host DABR and DABRX */
553 ld r5,HSTATE_DABR(r13)
558 /* Switch DSCR back to host value */
560 ld r7, HSTATE_DSCR(r13)
561 std r8, VCPU_DSCR(r7)
564 /* Save non-volatile GPRs */
565 std r14, VCPU_GPR(r14)(r9)
566 std r15, VCPU_GPR(r15)(r9)
567 std r16, VCPU_GPR(r16)(r9)
568 std r17, VCPU_GPR(r17)(r9)
569 std r18, VCPU_GPR(r18)(r9)
570 std r19, VCPU_GPR(r19)(r9)
571 std r20, VCPU_GPR(r20)(r9)
572 std r21, VCPU_GPR(r21)(r9)
573 std r22, VCPU_GPR(r22)(r9)
574 std r23, VCPU_GPR(r23)(r9)
575 std r24, VCPU_GPR(r24)(r9)
576 std r25, VCPU_GPR(r25)(r9)
577 std r26, VCPU_GPR(r26)(r9)
578 std r27, VCPU_GPR(r27)(r9)
579 std r28, VCPU_GPR(r28)(r9)
580 std r29, VCPU_GPR(r29)(r9)
581 std r30, VCPU_GPR(r30)(r9)
582 std r31, VCPU_GPR(r31)(r9)
589 std r3, VCPU_SPRG0(r9)
590 std r4, VCPU_SPRG1(r9)
591 std r5, VCPU_SPRG2(r9)
592 std r6, VCPU_SPRG3(r9)
594 /* Increment yield count if they have a VPA */
595 ld r8, VCPU_VPA(r9) /* do they have a VPA? */
598 lwz r3, LPPACA_YIELDCOUNT(r8)
600 stw r3, LPPACA_YIELDCOUNT(r8)
602 /* Save PMU registers if requested */
603 /* r8 and cr0.eq are live here */
605 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
606 mfspr r4, SPRN_MMCR0 /* save MMCR0 */
607 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
609 beq 21f /* if no VPA, save PMU stuff anyway */
610 lbz r7, LPPACA_PMCINUSE(r8)
611 cmpwi r7, 0 /* did they ask for PMU stuff to be saved? */
613 std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */
615 21: mfspr r5, SPRN_MMCR1
617 std r4, VCPU_MMCR(r9)
618 std r5, VCPU_MMCR + 8(r9)
619 std r6, VCPU_MMCR + 16(r9)
627 stw r4, VCPU_PMC + 4(r9)
628 stw r5, VCPU_PMC + 8(r9)
629 stw r6, VCPU_PMC + 12(r9)
630 stw r7, VCPU_PMC + 16(r9)
631 stw r8, VCPU_PMC + 20(r9)
638 * Reload DEC. HDEC interrupts were disabled when
639 * we reloaded the host's LPCR value.
641 ld r3, HSTATE_DECEXP(r13)
646 /* Reload the host's PMU registers */
647 ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */
648 lbz r4, LPPACA_PMCINUSE(r3)
650 beq 23f /* skip if not */
651 lwz r3, HSTATE_PMC(r13)
652 lwz r4, HSTATE_PMC + 4(r13)
653 lwz r5, HSTATE_PMC + 8(r13)
654 lwz r6, HSTATE_PMC + 12(r13)
655 lwz r8, HSTATE_PMC + 16(r13)
656 lwz r9, HSTATE_PMC + 20(r13)
663 ld r3, HSTATE_MMCR(r13)
664 ld r4, HSTATE_MMCR + 8(r13)
665 ld r5, HSTATE_MMCR + 16(r13)
672 * For external and machine check interrupts, we need
673 * to call the Linux handler to process the interrupt.
674 * We do that by jumping to the interrupt vector address
675 * which we have in r12. The [h]rfid at the end of the
676 * handler will return to the book3s_hv_interrupts.S code.
677 * For other interrupts we do the rfid to get back
678 * to the book3s_interrupts.S code here.
680 ld r8, HSTATE_VMHANDLER(r13)
681 ld r7, HSTATE_HOST_MSR(r13)
683 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
685 cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK
687 /* RFI into the highmem handler, or branch to interrupt handler */
692 mtmsrd r6, 1 /* Clear RI in MSR */
698 11: mtspr SPRN_HSRR0, r8
702 6: mfspr r6,SPRN_HDAR
707 * Try to handle an hcall in real mode.
708 * Returns to the guest if we handle it, or continues on up to
709 * the kernel if we can't (i.e. if we don't have a handler for
710 * it, or if the handler returns H_TOO_HARD).
712 .globl hcall_try_real_mode
714 ld r3,VCPU_GPR(r3)(r9)
718 cmpldi r3,hcall_real_table_end - hcall_real_table
720 LOAD_REG_ADDR(r4, hcall_real_table)
726 mr r3,r9 /* get vcpu pointer */
727 ld r4,VCPU_GPR(r4)(r9)
730 beq hcall_real_fallback
731 ld r4,HSTATE_KVM_VCPU(r13)
732 std r3,VCPU_GPR(r3)(r4)
737 /* We've attempted a real mode hcall, but it's punted it back
738 * to userspace. We need to restore some clobbered volatiles
739 * before resuming the pass-it-to-qemu path */
741 li r12,BOOK3S_INTERRUPT_SYSCALL
742 ld r9, HSTATE_KVM_VCPU(r13)
747 .globl hcall_real_table
749 .long 0 /* 0 - unused */
750 .long .kvmppc_h_remove - hcall_real_table
751 .long .kvmppc_h_enter - hcall_real_table
752 .long .kvmppc_h_read - hcall_real_table
753 .long 0 /* 0x10 - H_CLEAR_MOD */
754 .long 0 /* 0x14 - H_CLEAR_REF */
755 .long .kvmppc_h_protect - hcall_real_table
756 .long 0 /* 0x1c - H_GET_TCE */
757 .long 0 /* 0x20 - H_SET_TCE */
758 .long 0 /* 0x24 - H_SET_SPRG0 */
759 .long .kvmppc_h_set_dabr - hcall_real_table
822 .long .kvmppc_h_bulk_remove - hcall_real_table
823 hcall_real_table_end:
829 bounce_ext_interrupt:
833 li r10,BOOK3S_INTERRUPT_EXTERNAL
834 LOAD_REG_IMMEDIATE(r11,MSR_SF | MSR_ME);
837 _GLOBAL(kvmppc_h_set_dabr)
844 * Save away FP, VMX and VSX registers.
847 _GLOBAL(kvmppc_save_fp)
850 #ifdef CONFIG_ALTIVEC
853 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
858 END_FTR_SECTION_IFSET(CPU_FTR_VSX)
866 li r6,reg*16+VCPU_VSRS
874 stfd reg,reg*8+VCPU_FPRS(r3)
878 ALT_FTR_SECTION_END_IFSET(CPU_FTR_VSX)
881 stfd fr0,VCPU_FPSCR(r3)
883 #ifdef CONFIG_ALTIVEC
887 li r6,reg*16+VCPU_VRS
894 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
897 stw r6,VCPU_VRSAVE(r3)
903 * Load up FP, VMX and VSX registers
906 .globl kvmppc_load_fp
910 #ifdef CONFIG_ALTIVEC
913 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
918 END_FTR_SECTION_IFSET(CPU_FTR_VSX)
922 lfd fr0,VCPU_FPSCR(r4)
928 li r7,reg*16+VCPU_VSRS
936 lfd reg,reg*8+VCPU_FPRS(r4)
940 ALT_FTR_SECTION_END_IFSET(CPU_FTR_VSX)
943 #ifdef CONFIG_ALTIVEC
950 li r7,reg*16+VCPU_VRS
954 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
956 lwz r7,VCPU_VRSAVE(r4)