]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blobdiff - arch/powerpc/kvm/book3s_64_vio_hv.c
KVM: PPC: Use preregistered memory API to access TCE list
[mirror_ubuntu-zesty-kernel.git] / arch / powerpc / kvm / book3s_64_vio_hv.c
index e4c4ea973e57892ccae032eba7218d107fee6cd8..0f145fc7a3a567181223863666a9c1e6454959ba 100644 (file)
  * WARNING: This will be called in real or virtual mode on HV KVM and virtual
  *          mode on PR KVM
  */
-struct kvmppc_spapr_tce_table *kvmppc_find_table(struct kvm_vcpu *vcpu,
+struct kvmppc_spapr_tce_table *kvmppc_find_table(struct kvm *kvm,
                unsigned long liobn)
 {
-       struct kvm *kvm = vcpu->kvm;
        struct kvmppc_spapr_tce_table *stt;
 
        list_for_each_entry_lockless(stt, &kvm->arch.spapr_tce_tables, list)
@@ -182,12 +181,13 @@ EXPORT_SYMBOL_GPL(kvmppc_gpa_to_ua);
 long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
                unsigned long ioba, unsigned long tce)
 {
-       struct kvmppc_spapr_tce_table *stt = kvmppc_find_table(vcpu, liobn);
+       struct kvmppc_spapr_tce_table *stt;
        long ret;
 
        /* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */
        /*          liobn, ioba, tce); */
 
+       stt = kvmppc_find_table(vcpu->kvm, liobn);
        if (!stt)
                return H_TOO_HARD;
 
@@ -239,8 +239,9 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
        long i, ret = H_SUCCESS;
        unsigned long tces, entry, ua = 0;
        unsigned long *rmap = NULL;
+       bool prereg = false;
 
-       stt = kvmppc_find_table(vcpu, liobn);
+       stt = kvmppc_find_table(vcpu->kvm, liobn);
        if (!stt)
                return H_TOO_HARD;
 
@@ -259,23 +260,47 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
        if (ret != H_SUCCESS)
                return ret;
 
-       if (kvmppc_gpa_to_ua(vcpu->kvm, tce_list, &ua, &rmap))
-               return H_TOO_HARD;
+       if (mm_iommu_preregistered(vcpu->kvm->mm)) {
+               /*
+                * We get here if guest memory was pre-registered which
+                * is normally VFIO case and gpa->hpa translation does not
+                * depend on hpt.
+                */
+               struct mm_iommu_table_group_mem_t *mem;
 
-       rmap = (void *) vmalloc_to_phys(rmap);
+               if (kvmppc_gpa_to_ua(vcpu->kvm, tce_list, &ua, NULL))
+                       return H_TOO_HARD;
 
-       /*
-        * Synchronize with the MMU notifier callbacks in
-        * book3s_64_mmu_hv.c (kvm_unmap_hva_hv etc.).
-        * While we have the rmap lock, code running on other CPUs
-        * cannot finish unmapping the host real page that backs
-        * this guest real page, so we are OK to access the host
-        * real page.
-        */
-       lock_rmap(rmap);
-       if (kvmppc_rm_ua_to_hpa(vcpu, ua, &tces)) {
-               ret = H_TOO_HARD;
-               goto unlock_exit;
+               mem = mm_iommu_lookup_rm(vcpu->kvm->mm, ua, IOMMU_PAGE_SIZE_4K);
+               if (mem)
+                       prereg = mm_iommu_ua_to_hpa_rm(mem, ua, &tces) == 0;
+       }
+
+       if (!prereg) {
+               /*
+                * This is usually a case of a guest with emulated devices only
+                * when TCE list is not in preregistered memory.
+                * We do not require memory to be preregistered in this case
+                * so lock rmap and do __find_linux_pte_or_hugepte().
+                */
+               if (kvmppc_gpa_to_ua(vcpu->kvm, tce_list, &ua, &rmap))
+                       return H_TOO_HARD;
+
+               rmap = (void *) vmalloc_to_phys(rmap);
+
+               /*
+                * Synchronize with the MMU notifier callbacks in
+                * book3s_64_mmu_hv.c (kvm_unmap_hva_hv etc.).
+                * While we have the rmap lock, code running on other CPUs
+                * cannot finish unmapping the host real page that backs
+                * this guest real page, so we are OK to access the host
+                * real page.
+                */
+               lock_rmap(rmap);
+               if (kvmppc_rm_ua_to_hpa(vcpu, ua, &tces)) {
+                       ret = H_TOO_HARD;
+                       goto unlock_exit;
+               }
        }
 
        for (i = 0; i < npages; ++i) {
@@ -289,7 +314,8 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
        }
 
 unlock_exit:
-       unlock_rmap(rmap);
+       if (rmap)
+               unlock_rmap(rmap);
 
        return ret;
 }
@@ -301,7 +327,7 @@ long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
        struct kvmppc_spapr_tce_table *stt;
        long i, ret;
 
-       stt = kvmppc_find_table(vcpu, liobn);
+       stt = kvmppc_find_table(vcpu->kvm, liobn);
        if (!stt)
                return H_TOO_HARD;
 
@@ -322,12 +348,13 @@ long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
 long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
                      unsigned long ioba)
 {
-       struct kvmppc_spapr_tce_table *stt = kvmppc_find_table(vcpu, liobn);
+       struct kvmppc_spapr_tce_table *stt;
        long ret;
        unsigned long idx;
        struct page *page;
        u64 *tbl;
 
+       stt = kvmppc_find_table(vcpu->kvm, liobn);
        if (!stt)
                return H_TOO_HARD;