]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/commitdiff
Merge tag 'kvm_mips_4.9_1' of git://git.kernel.org/pub/scm/linux/kernel/git/jhogan...
authorRadim Krčmář <rkrcmar@redhat.com>
Thu, 29 Sep 2016 14:26:52 +0000 (16:26 +0200)
committerRadim Krčmář <rkrcmar@redhat.com>
Thu, 29 Sep 2016 14:26:52 +0000 (16:26 +0200)
MIPS KVM updates for v4.9

- A couple of fixes in preparation for supporting MIPS EVA host kernels.
- MIPS SMP host & TLB invalidation fixes.

arch/mips/include/asm/kvm_host.h
arch/mips/kvm/emulate.c
arch/mips/kvm/mips.c
arch/mips/kvm/mmu.c
arch/mips/kvm/trap_emul.c

index 5f488dc8a7d593755e5ef6e30ef7963db4b3fc4c..07f58cfc1ab98b2724d01630130527c0546b1126 100644 (file)
 #define KVM_INVALID_INST               0xdeadbeef
 #define KVM_INVALID_ADDR               0xdeadbeef
 
+/*
+ * EVA has overlapping user & kernel address spaces, so user VAs may be >
+ * PAGE_OFFSET. For this reason we can't use the default KVM_HVA_ERR_BAD of
+ * PAGE_OFFSET.
+ */
+
+#define KVM_HVA_ERR_BAD                        (-1UL)
+#define KVM_HVA_ERR_RO_BAD             (-2UL)
+
+static inline bool kvm_is_error_hva(unsigned long addr)
+{
+       return IS_ERR_VALUE(addr);
+}
+
 extern atomic_t kvm_mips_instance;
 
 struct kvm_vm_stat {
@@ -314,6 +328,9 @@ struct kvm_vcpu_arch {
        u32 guest_kernel_asid[NR_CPUS];
        struct mm_struct guest_kernel_mm, guest_user_mm;
 
+       /* Guest ASID of last user mode execution */
+       unsigned int last_user_gasid;
+
        int last_sched_cpu;
 
        /* WAIT executed */
index e788515f766b46cefb2a36dfc95ab6bfcec6e8e3..4db4c03708590f3030bdf84d87264ad2a79f6bc4 100644 (file)
@@ -846,6 +846,47 @@ enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu)
        return EMULATE_FAIL;
 }
 
+/**
+ * kvm_mips_invalidate_guest_tlb() - Indicates a change in guest MMU map.
+ * @vcpu:      VCPU with changed mappings.
+ * @tlb:       TLB entry being removed.
+ *
+ * This is called to indicate a single change in guest MMU mappings, so that we
+ * can arrange TLB flushes on this and other CPUs.
+ */
+static void kvm_mips_invalidate_guest_tlb(struct kvm_vcpu *vcpu,
+                                         struct kvm_mips_tlb *tlb)
+{
+       int cpu, i;
+       bool user;
+
+       /* No need to flush for entries which are already invalid */
+       if (!((tlb->tlb_lo[0] | tlb->tlb_lo[1]) & ENTRYLO_V))
+               return;
+       /* User address space doesn't need flushing for KSeg2/3 changes */
+       user = tlb->tlb_hi < KVM_GUEST_KSEG0;
+
+       preempt_disable();
+
+       /*
+        * Probe the shadow host TLB for the entry being overwritten, if one
+        * matches, invalidate it
+        */
+       kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
+
+       /* Invalidate the whole ASID on other CPUs */
+       cpu = smp_processor_id();
+       for_each_possible_cpu(i) {
+               if (i == cpu)
+                       continue;
+               if (user)
+                       vcpu->arch.guest_user_asid[i] = 0;
+               vcpu->arch.guest_kernel_asid[i] = 0;
+       }
+
+       preempt_enable();
+}
+
 /* Write Guest TLB Entry @ Index */
 enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu)
 {
@@ -865,11 +906,8 @@ enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu)
        }
 
        tlb = &vcpu->arch.guest_tlb[index];
-       /*
-        * Probe the shadow host TLB for the entry being overwritten, if one
-        * matches, invalidate it
-        */
-       kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
+
+       kvm_mips_invalidate_guest_tlb(vcpu, tlb);
 
        tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
        tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
@@ -898,11 +936,7 @@ enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu)
 
        tlb = &vcpu->arch.guest_tlb[index];
 
-       /*
-        * Probe the shadow host TLB for the entry being overwritten, if one
-        * matches, invalidate it
-        */
-       kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
+       kvm_mips_invalidate_guest_tlb(vcpu, tlb);
 
        tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
        tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
@@ -1026,6 +1060,7 @@ enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst,
        enum emulation_result er = EMULATE_DONE;
        u32 rt, rd, sel;
        unsigned long curr_pc;
+       int cpu, i;
 
        /*
         * Update PC and hold onto current PC in case there is
@@ -1127,16 +1162,31 @@ enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst,
                        } else if (rd == MIPS_CP0_TLB_HI && sel == 0) {
                                u32 nasid =
                                        vcpu->arch.gprs[rt] & KVM_ENTRYHI_ASID;
-                               if ((KSEGX(vcpu->arch.gprs[rt]) != CKSEG0) &&
-                                   ((kvm_read_c0_guest_entryhi(cop0) &
+                               if (((kvm_read_c0_guest_entryhi(cop0) &
                                      KVM_ENTRYHI_ASID) != nasid)) {
                                        trace_kvm_asid_change(vcpu,
                                                kvm_read_c0_guest_entryhi(cop0)
                                                        & KVM_ENTRYHI_ASID,
                                                nasid);
 
-                                       /* Blow away the shadow host TLBs */
-                                       kvm_mips_flush_host_tlb(1);
+                                       /*
+                                        * Regenerate/invalidate kernel MMU
+                                        * context.
+                                        * The user MMU context will be
+                                        * regenerated lazily on re-entry to
+                                        * guest user if the guest ASID actually
+                                        * changes.
+                                        */
+                                       preempt_disable();
+                                       cpu = smp_processor_id();
+                                       kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm,
+                                                               cpu, vcpu);
+                                       vcpu->arch.guest_kernel_asid[cpu] =
+                                               vcpu->arch.guest_kernel_mm.context.asid[cpu];
+                                       for_each_possible_cpu(i)
+                                               if (i != cpu)
+                                                       vcpu->arch.guest_kernel_asid[i] = 0;
+                                       preempt_enable();
                                }
                                kvm_write_c0_guest_entryhi(cop0,
                                                           vcpu->arch.gprs[rt]);
index 49b25e74d0c7fef69982656aa76459a319781dbb..ce961495b5e123f374a4d129387daffa20974373 100644 (file)
@@ -421,6 +421,31 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
        return -ENOIOCTLCMD;
 }
 
+/* Must be called with preemption disabled, just before entering guest */
+static void kvm_mips_check_asids(struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       int cpu = smp_processor_id();
+       unsigned int gasid;
+
+       /*
+        * Lazy host ASID regeneration for guest user mode.
+        * If the guest ASID has changed since the last guest usermode
+        * execution, regenerate the host ASID so as to invalidate stale TLB
+        * entries.
+        */
+       if (!KVM_GUEST_KERNEL_MODE(vcpu)) {
+               gasid = kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID;
+               if (gasid != vcpu->arch.last_user_gasid) {
+                       kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu,
+                                               vcpu);
+                       vcpu->arch.guest_user_asid[cpu] =
+                               vcpu->arch.guest_user_mm.context.asid[cpu];
+                       vcpu->arch.last_user_gasid = gasid;
+               }
+       }
+}
+
 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
 {
        int r = 0;
@@ -448,6 +473,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
        htw_stop();
 
        trace_kvm_enter(vcpu);
+
+       kvm_mips_check_asids(vcpu);
+
        r = vcpu->arch.vcpu_run(run, vcpu);
        trace_kvm_out(vcpu);
 
@@ -1561,6 +1589,8 @@ skip_emul:
        if (ret == RESUME_GUEST) {
                trace_kvm_reenter(vcpu);
 
+               kvm_mips_check_asids(vcpu);
+
                /*
                 * If FPU / MSA are enabled (i.e. the guest's FPU / MSA context
                 * is live), restore FCR31 / MSACSR.
index 121008c0fcc92e81847176b0fa016947f8ef8cd0..03883ba806e252d451f5df348c5414b3b31971cb 100644 (file)
@@ -250,15 +250,27 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
                kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm, cpu, vcpu);
                vcpu->arch.guest_kernel_asid[cpu] =
                    vcpu->arch.guest_kernel_mm.context.asid[cpu];
+               newasid++;
+
+               kvm_debug("[%d]: cpu_context: %#lx\n", cpu,
+                         cpu_context(cpu, current->mm));
+               kvm_debug("[%d]: Allocated new ASID for Guest Kernel: %#x\n",
+                         cpu, vcpu->arch.guest_kernel_asid[cpu]);
+       }
+
+       if ((vcpu->arch.guest_user_asid[cpu] ^ asid_cache(cpu)) &
+                                               asid_version_mask(cpu)) {
+               u32 gasid = kvm_read_c0_guest_entryhi(vcpu->arch.cop0) &
+                               KVM_ENTRYHI_ASID;
+
                kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu, vcpu);
                vcpu->arch.guest_user_asid[cpu] =
                    vcpu->arch.guest_user_mm.context.asid[cpu];
+               vcpu->arch.last_user_gasid = gasid;
                newasid++;
 
                kvm_debug("[%d]: cpu_context: %#lx\n", cpu,
                          cpu_context(cpu, current->mm));
-               kvm_debug("[%d]: Allocated new ASID for Guest Kernel: %#x\n",
-                         cpu, vcpu->arch.guest_kernel_asid[cpu]);
                kvm_debug("[%d]: Allocated new ASID for Guest User: %#x\n", cpu,
                          vcpu->arch.guest_user_asid[cpu]);
        }
index 091553942bcbca51941fa768b67bdcc3f5dfecdc..3a5484f9aa5078a3c5a67b35c2e56b8cbd09c35b 100644 (file)
@@ -175,6 +175,24 @@ static int kvm_trap_emul_handle_tlb_miss(struct kvm_vcpu *vcpu, bool store)
                        run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
                        ret = RESUME_HOST;
                }
+       } else if (KVM_GUEST_KERNEL_MODE(vcpu)
+                  && (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) {
+               /*
+                * With EVA we may get a TLB exception instead of an address
+                * error when the guest performs MMIO to KSeg1 addresses.
+                */
+               kvm_debug("Emulate %s MMIO space\n",
+                         store ? "Store to" : "Load from");
+               er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
+               if (er == EMULATE_FAIL) {
+                       kvm_err("Emulate %s MMIO space failed\n",
+                               store ? "Store to" : "Load from");
+                       run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+                       ret = RESUME_HOST;
+               } else {
+                       run->exit_reason = KVM_EXIT_MMIO;
+                       ret = RESUME_HOST;
+               }
        } else {
                kvm_err("Illegal TLB %s fault address , cause %#x, PC: %p, BadVaddr: %#lx\n",
                        store ? "ST" : "LD", cause, opc, badvaddr);