]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/commitdiff
Merge tag 'gvt-next-2017-02-15' of https://github.com/01org/gvt-linux into drm-intel...
authorJani Nikula <jani.nikula@intel.com>
Thu, 16 Feb 2017 09:58:16 +0000 (11:58 +0200)
committerJani Nikula <jani.nikula@intel.com>
Thu, 16 Feb 2017 09:58:37 +0000 (11:58 +0200)
gvt-next-2017-02-15

- Chuanxiao's IOMMU workaround fix
- debug message cleanup from Changbin
- oops fix in fail path of workload submission when GPU reset from Changbin
- other misc fixes

Signed-off-by: Jani Nikula <jani.nikula@intel.com>
13 files changed:
drivers/gpu/drm/i915/gvt/aperture_gm.c
drivers/gpu/drm/i915/gvt/cmd_parser.c
drivers/gpu/drm/i915/gvt/display.c
drivers/gpu/drm/i915/gvt/display.h
drivers/gpu/drm/i915/gvt/execlist.c
drivers/gpu/drm/i915/gvt/gtt.c
drivers/gpu/drm/i915/gvt/gvt.c
drivers/gpu/drm/i915/gvt/interrupt.c
drivers/gpu/drm/i915/gvt/kvmgt.c
drivers/gpu/drm/i915/gvt/render.c
drivers/gpu/drm/i915/gvt/sched_policy.c
drivers/gpu/drm/i915/gvt/scheduler.c
drivers/gpu/drm/i915/gvt/vgpu.c

index 7311aeab16f7ae4b1256cdc60e1e5d4f95acc474..3b6caaca975135d6b8d595393ffc372bc14107bc 100644 (file)
@@ -49,20 +49,21 @@ static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
        if (high_gm) {
                node = &vgpu->gm.high_gm_node;
                size = vgpu_hidden_sz(vgpu);
-               start = gvt_hidden_gmadr_base(gvt);
-               end = gvt_hidden_gmadr_end(gvt);
+               start = ALIGN(gvt_hidden_gmadr_base(gvt), I915_GTT_PAGE_SIZE);
+               end = ALIGN(gvt_hidden_gmadr_end(gvt), I915_GTT_PAGE_SIZE);
                flags = PIN_HIGH;
        } else {
                node = &vgpu->gm.low_gm_node;
                size = vgpu_aperture_sz(vgpu);
-               start = gvt_aperture_gmadr_base(gvt);
-               end = gvt_aperture_gmadr_end(gvt);
+               start = ALIGN(gvt_aperture_gmadr_base(gvt), I915_GTT_PAGE_SIZE);
+               end = ALIGN(gvt_aperture_gmadr_end(gvt), I915_GTT_PAGE_SIZE);
                flags = PIN_MAPPABLE;
        }
 
        mutex_lock(&dev_priv->drm.struct_mutex);
        ret = i915_gem_gtt_insert(&dev_priv->ggtt.base, node,
-                                 size, 4096, I915_COLOR_UNEVICTABLE,
+                                 size, I915_GTT_PAGE_SIZE,
+                                 I915_COLOR_UNEVICTABLE,
                                  start, end, flags);
        mutex_unlock(&dev_priv->drm.struct_mutex);
        if (ret)
@@ -254,7 +255,7 @@ static int alloc_resource(struct intel_vgpu *vgpu,
        if (request > avail)
                goto no_enough_resource;
 
-       vgpu_aperture_sz(vgpu) = request;
+       vgpu_aperture_sz(vgpu) = ALIGN(request, I915_GTT_PAGE_SIZE);
 
        item = "high GM space";
        max = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE;
@@ -265,7 +266,7 @@ static int alloc_resource(struct intel_vgpu *vgpu,
        if (request > avail)
                goto no_enough_resource;
 
-       vgpu_hidden_sz(vgpu) = request;
+       vgpu_hidden_sz(vgpu) = ALIGN(request, I915_GTT_PAGE_SIZE);
 
        item = "fence";
        max = gvt_fence_sz(gvt) - HOST_FENCE;
index 9a4b23c3ee97d5d87556d04bc0c5068fe6454eb1..7bb11a555b767fd5cabe1c1234e3c64ebe989f93 100644 (file)
@@ -1135,6 +1135,8 @@ static int skl_decode_mi_display_flip(struct parser_exec_state *s,
        u32 dword2 = cmd_val(s, 2);
        u32 plane = (dword0 & GENMASK(12, 8)) >> 8;
 
+       info->plane = PRIMARY_PLANE;
+
        switch (plane) {
        case MI_DISPLAY_FLIP_SKL_PLANE_1_A:
                info->pipe = PIPE_A;
@@ -1148,12 +1150,28 @@ static int skl_decode_mi_display_flip(struct parser_exec_state *s,
                info->pipe = PIPE_C;
                info->event = PRIMARY_C_FLIP_DONE;
                break;
+
+       case MI_DISPLAY_FLIP_SKL_PLANE_2_A:
+               info->pipe = PIPE_A;
+               info->event = SPRITE_A_FLIP_DONE;
+               info->plane = SPRITE_PLANE;
+               break;
+       case MI_DISPLAY_FLIP_SKL_PLANE_2_B:
+               info->pipe = PIPE_B;
+               info->event = SPRITE_B_FLIP_DONE;
+               info->plane = SPRITE_PLANE;
+               break;
+       case MI_DISPLAY_FLIP_SKL_PLANE_2_C:
+               info->pipe = PIPE_C;
+               info->event = SPRITE_C_FLIP_DONE;
+               info->plane = SPRITE_PLANE;
+               break;
+
        default:
                gvt_err("unknown plane code %d\n", plane);
                return -EINVAL;
        }
 
-       info->pipe = PRIMARY_PLANE;
        info->stride_val = (dword1 & GENMASK(15, 6)) >> 6;
        info->tile_val = (dword1 & GENMASK(2, 0));
        info->surf_val = (dword2 & GENMASK(31, 12)) >> 12;
index 1a4430724069f44f89a5e1eab0a00d1955f592d2..6d8fde880c39936f816eae411320c38dac87662c 100644 (file)
@@ -333,3 +333,15 @@ int intel_vgpu_init_display(struct intel_vgpu *vgpu)
        else
                return setup_virtual_dp_monitor(vgpu, PORT_B, GVT_DP_B);
 }
+
+/**
+ * intel_vgpu_reset_display- reset vGPU virtual display emulation
+ * @vgpu: a vGPU
+ *
+ * This function is used to reset vGPU virtual display emulation stuffs
+ *
+ */
+void intel_vgpu_reset_display(struct intel_vgpu *vgpu)
+{
+       emulate_monitor_status_change(vgpu);
+}
index 7a60cb8482687b92513f958b23eeaf8b46a1cb23..8b234ea961f67b96a185e3cd8bfd35728360a05d 100644 (file)
@@ -158,6 +158,7 @@ void intel_gvt_emulate_vblank(struct intel_gvt *gvt);
 void intel_gvt_check_vblank_emulation(struct intel_gvt *gvt);
 
 int intel_vgpu_init_display(struct intel_vgpu *vgpu);
+void intel_vgpu_reset_display(struct intel_vgpu *vgpu);
 void intel_vgpu_clean_display(struct intel_vgpu *vgpu);
 
 #endif
index f32bb6f6495ce0aafddf35d920298c5d315af9bc..136c6e77561abc4b1a481385a376e89ab538f494 100644 (file)
@@ -515,7 +515,7 @@ static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
 
 static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
 {
-       if (wa_ctx->indirect_ctx.size == 0)
+       if (!wa_ctx->indirect_ctx.obj)
                return;
 
        i915_gem_object_unpin_map(wa_ctx->indirect_ctx.obj);
index 47dec4acf7ff12951eb592e2b115953e961f6bdf..28c92346db0e4e3615c2b5c484b4421eeb097e6e 100644 (file)
@@ -606,21 +606,33 @@ struct intel_vgpu_guest_page *intel_vgpu_find_guest_page(
 static inline int init_shadow_page(struct intel_vgpu *vgpu,
                struct intel_vgpu_shadow_page *p, int type)
 {
+       struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev;
+       dma_addr_t daddr;
+
+       daddr = dma_map_page(kdev, p->page, 0, 4096, PCI_DMA_BIDIRECTIONAL);
+       if (dma_mapping_error(kdev, daddr)) {
+               gvt_err("fail to map dma addr\n");
+               return -EINVAL;
+       }
+
        p->vaddr = page_address(p->page);
        p->type = type;
 
        INIT_HLIST_NODE(&p->node);
 
-       p->mfn = intel_gvt_hypervisor_virt_to_mfn(p->vaddr);
-       if (p->mfn == INTEL_GVT_INVALID_ADDR)
-               return -EFAULT;
-
+       p->mfn = daddr >> GTT_PAGE_SHIFT;
        hash_add(vgpu->gtt.shadow_page_hash_table, &p->node, p->mfn);
        return 0;
 }
 
-static inline void clean_shadow_page(struct intel_vgpu_shadow_page *p)
+static inline void clean_shadow_page(struct intel_vgpu *vgpu,
+               struct intel_vgpu_shadow_page *p)
 {
+       struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev;
+
+       dma_unmap_page(kdev, p->mfn << GTT_PAGE_SHIFT, 4096,
+                       PCI_DMA_BIDIRECTIONAL);
+
        if (!hlist_unhashed(&p->node))
                hash_del(&p->node);
 }
@@ -670,7 +682,7 @@ static void ppgtt_free_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
 {
        trace_spt_free(spt->vgpu->id, spt, spt->shadow_page.type);
 
-       clean_shadow_page(&spt->shadow_page);
+       clean_shadow_page(spt->vgpu, &spt->shadow_page);
        intel_vgpu_clean_guest_page(spt->vgpu, &spt->guest_page);
        list_del_init(&spt->post_shadow_list);
 
@@ -1875,8 +1887,9 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
        int page_entry_num = GTT_PAGE_SIZE >>
                                vgpu->gvt->device_info.gtt_entry_size_shift;
        void *scratch_pt;
-       unsigned long mfn;
        int i;
+       struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
+       dma_addr_t daddr;
 
        if (WARN_ON(type < GTT_TYPE_PPGTT_PTE_PT || type >= GTT_TYPE_MAX))
                return -EINVAL;
@@ -1887,16 +1900,18 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
                return -ENOMEM;
        }
 
-       mfn = intel_gvt_hypervisor_virt_to_mfn(scratch_pt);
-       if (mfn == INTEL_GVT_INVALID_ADDR) {
-               gvt_err("fail to translate vaddr:0x%lx\n", (unsigned long)scratch_pt);
-               free_page((unsigned long)scratch_pt);
-               return -EFAULT;
+       daddr = dma_map_page(dev, virt_to_page(scratch_pt), 0,
+                       4096, PCI_DMA_BIDIRECTIONAL);
+       if (dma_mapping_error(dev, daddr)) {
+               gvt_err("fail to dmamap scratch_pt\n");
+               __free_page(virt_to_page(scratch_pt));
+               return -ENOMEM;
        }
-       gtt->scratch_pt[type].page_mfn = mfn;
+       gtt->scratch_pt[type].page_mfn =
+               (unsigned long)(daddr >> GTT_PAGE_SHIFT);
        gtt->scratch_pt[type].page = virt_to_page(scratch_pt);
        gvt_dbg_mm("vgpu%d create scratch_pt: type %d mfn=0x%lx\n",
-                       vgpu->id, type, mfn);
+                       vgpu->id, type, gtt->scratch_pt[type].page_mfn);
 
        /* Build the tree by full filled the scratch pt with the entries which
         * point to the next level scratch pt or scratch page. The
@@ -1930,9 +1945,14 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
 static int release_scratch_page_tree(struct intel_vgpu *vgpu)
 {
        int i;
+       struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
+       dma_addr_t daddr;
 
        for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
                if (vgpu->gtt.scratch_pt[i].page != NULL) {
+                       daddr = (dma_addr_t)(vgpu->gtt.scratch_pt[i].page_mfn <<
+                                       GTT_PAGE_SHIFT);
+                       dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL);
                        __free_page(vgpu->gtt.scratch_pt[i].page);
                        vgpu->gtt.scratch_pt[i].page = NULL;
                        vgpu->gtt.scratch_pt[i].page_mfn = 0;
@@ -2192,6 +2212,8 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
 {
        int ret;
        void *page;
+       struct device *dev = &gvt->dev_priv->drm.pdev->dev;
+       dma_addr_t daddr;
 
        gvt_dbg_core("init gtt\n");
 
@@ -2209,14 +2231,16 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
                gvt_err("fail to allocate scratch ggtt page\n");
                return -ENOMEM;
        }
-       gvt->gtt.scratch_ggtt_page = virt_to_page(page);
 
-       gvt->gtt.scratch_ggtt_mfn = intel_gvt_hypervisor_virt_to_mfn(page);
-       if (gvt->gtt.scratch_ggtt_mfn == INTEL_GVT_INVALID_ADDR) {
-               gvt_err("fail to translate scratch ggtt page\n");
-               __free_page(gvt->gtt.scratch_ggtt_page);
-               return -EFAULT;
+       daddr = dma_map_page(dev, virt_to_page(page), 0,
+                       4096, PCI_DMA_BIDIRECTIONAL);
+       if (dma_mapping_error(dev, daddr)) {
+               gvt_err("fail to dmamap scratch ggtt page\n");
+               __free_page(virt_to_page(page));
+               return -ENOMEM;
        }
+       gvt->gtt.scratch_ggtt_page = virt_to_page(page);
+       gvt->gtt.scratch_ggtt_mfn = (unsigned long)(daddr >> GTT_PAGE_SHIFT);
 
        if (enable_out_of_sync) {
                ret = setup_spt_oos(gvt);
@@ -2239,6 +2263,12 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
  */
 void intel_gvt_clean_gtt(struct intel_gvt *gvt)
 {
+       struct device *dev = &gvt->dev_priv->drm.pdev->dev;
+       dma_addr_t daddr = (dma_addr_t)(gvt->gtt.scratch_ggtt_mfn <<
+                                       GTT_PAGE_SHIFT);
+
+       dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL);
+
        __free_page(gvt->gtt.scratch_ggtt_page);
 
        if (enable_out_of_sync)
index 9a636a2c20774e8a0021604b24799c62f251d35e..3b9d59e457ba7dbf2a1baffff7cd4f4c9aa75f3f 100644 (file)
@@ -75,13 +75,6 @@ int intel_gvt_init_host(void)
        if (xen_domain() && !xen_initial_domain())
                return -ENODEV;
 
-#ifdef CONFIG_INTEL_IOMMU
-       if (intel_iommu_gfx_mapped) {
-               gvt_err("Hardware IOMMU compatibility not yet supported, try to boot with intel_iommu=igfx_off\n");
-               return -ENODEV;
-       }
-#endif
-
        /* Try to load MPT modules for hypervisors */
        if (xen_initial_domain()) {
                /* In Xen dom0 */
index f7be02ac4be193040765c050649c169b5717f388..92bb247e3478606dc295d30c9e89315b95baf163 100644 (file)
@@ -176,26 +176,15 @@ int intel_vgpu_reg_imr_handler(struct intel_vgpu *vgpu,
 {
        struct intel_gvt *gvt = vgpu->gvt;
        struct intel_gvt_irq_ops *ops = gvt->irq.ops;
-       u32 changed, masked, unmasked;
        u32 imr = *(u32 *)p_data;
 
-       gvt_dbg_irq("write IMR %x with val %x\n",
-               reg, imr);
-
-       gvt_dbg_irq("old vIMR %x\n", vgpu_vreg(vgpu, reg));
-
-       /* figure out newly masked/unmasked bits */
-       changed = vgpu_vreg(vgpu, reg) ^ imr;
-       masked = (vgpu_vreg(vgpu, reg) & changed) ^ changed;
-       unmasked = masked ^ changed;
-
-       gvt_dbg_irq("changed %x, masked %x, unmasked %x\n",
-               changed, masked, unmasked);
+       gvt_dbg_irq("write IMR %x, new %08x, old %08x, changed %08x\n",
+                   reg, imr, vgpu_vreg(vgpu, reg), vgpu_vreg(vgpu, reg) ^ imr);
 
        vgpu_vreg(vgpu, reg) = imr;
 
        ops->check_pending_irq(vgpu);
-       gvt_dbg_irq("IRQ: new vIMR %x\n", vgpu_vreg(vgpu, reg));
+
        return 0;
 }
 
@@ -217,14 +206,11 @@ int intel_vgpu_reg_master_irq_handler(struct intel_vgpu *vgpu,
 {
        struct intel_gvt *gvt = vgpu->gvt;
        struct intel_gvt_irq_ops *ops = gvt->irq.ops;
-       u32 changed, enabled, disabled;
        u32 ier = *(u32 *)p_data;
        u32 virtual_ier = vgpu_vreg(vgpu, reg);
 
-       gvt_dbg_irq("write master irq reg %x with val %x\n",
-               reg, ier);
-
-       gvt_dbg_irq("old vreg %x\n", vgpu_vreg(vgpu, reg));
+       gvt_dbg_irq("write MASTER_IRQ %x, new %08x, old %08x, changed %08x\n",
+                   reg, ier, virtual_ier, virtual_ier ^ ier);
 
        /*
         * GEN8_MASTER_IRQ is a special irq register,
@@ -236,16 +222,8 @@ int intel_vgpu_reg_master_irq_handler(struct intel_vgpu *vgpu,
        vgpu_vreg(vgpu, reg) &= ~GEN8_MASTER_IRQ_CONTROL;
        vgpu_vreg(vgpu, reg) |= ier;
 
-       /* figure out newly enabled/disable bits */
-       changed = virtual_ier ^ ier;
-       enabled = (virtual_ier & changed) ^ changed;
-       disabled = enabled ^ changed;
-
-       gvt_dbg_irq("changed %x, enabled %x, disabled %x\n",
-                       changed, enabled, disabled);
-
        ops->check_pending_irq(vgpu);
-       gvt_dbg_irq("new vreg %x\n", vgpu_vreg(vgpu, reg));
+
        return 0;
 }
 
@@ -268,21 +246,11 @@ int intel_vgpu_reg_ier_handler(struct intel_vgpu *vgpu,
        struct intel_gvt *gvt = vgpu->gvt;
        struct intel_gvt_irq_ops *ops = gvt->irq.ops;
        struct intel_gvt_irq_info *info;
-       u32 changed, enabled, disabled;
        u32 ier = *(u32 *)p_data;
 
-       gvt_dbg_irq("write IER %x with val %x\n",
-               reg, ier);
-
-       gvt_dbg_irq("old vIER %x\n", vgpu_vreg(vgpu, reg));
+       gvt_dbg_irq("write IER %x, new %08x, old %08x, changed %08x\n",
+                   reg, ier, vgpu_vreg(vgpu, reg), vgpu_vreg(vgpu, reg) ^ ier);
 
-       /* figure out newly enabled/disable bits */
-       changed = vgpu_vreg(vgpu, reg) ^ ier;
-       enabled = (vgpu_vreg(vgpu, reg) & changed) ^ changed;
-       disabled = enabled ^ changed;
-
-       gvt_dbg_irq("changed %x, enabled %x, disabled %x\n",
-                       changed, enabled, disabled);
        vgpu_vreg(vgpu, reg) = ier;
 
        info = regbase_to_irq_info(gvt, ier_to_regbase(reg));
@@ -293,7 +261,7 @@ int intel_vgpu_reg_ier_handler(struct intel_vgpu *vgpu,
                update_upstream_irq(vgpu, info);
 
        ops->check_pending_irq(vgpu);
-       gvt_dbg_irq("new vIER %x\n", vgpu_vreg(vgpu, reg));
+
        return 0;
 }
 
@@ -317,7 +285,8 @@ int intel_vgpu_reg_iir_handler(struct intel_vgpu *vgpu, unsigned int reg,
                iir_to_regbase(reg));
        u32 iir = *(u32 *)p_data;
 
-       gvt_dbg_irq("write IIR %x with val %x\n", reg, iir);
+       gvt_dbg_irq("write IIR %x, new %08x, old %08x, changed %08x\n",
+                   reg, iir, vgpu_vreg(vgpu, reg), vgpu_vreg(vgpu, reg) ^ iir);
 
        if (WARN_ON(!info))
                return -EINVAL;
@@ -619,6 +588,10 @@ static void gen8_init_irq(
                SET_BIT_INFO(irq, 3, PRIMARY_A_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_A);
                SET_BIT_INFO(irq, 3, PRIMARY_B_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_B);
                SET_BIT_INFO(irq, 3, PRIMARY_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
+
+               SET_BIT_INFO(irq, 4, SPRITE_A_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_A);
+               SET_BIT_INFO(irq, 4, SPRITE_B_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_B);
+               SET_BIT_INFO(irq, 4, SPRITE_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
        }
 
        /* GEN8 interrupt PCU events */
index 080ca77abd22b41f3a561bb8b024c4616b064759..10c3a4b95a9229938eb3b0a3cdfda54355687296 100644 (file)
@@ -77,7 +77,7 @@ struct kvmgt_guest_info {
 struct gvt_dma {
        struct rb_node node;
        gfn_t gfn;
-       kvm_pfn_t pfn;
+       unsigned long iova;
 };
 
 static inline bool handle_valid(unsigned long handle)
@@ -89,6 +89,35 @@ static int kvmgt_guest_init(struct mdev_device *mdev);
 static void intel_vgpu_release_work(struct work_struct *work);
 static bool kvmgt_guest_exit(struct kvmgt_guest_info *info);
 
+static int gvt_dma_map_iova(struct intel_vgpu *vgpu, kvm_pfn_t pfn,
+               unsigned long *iova)
+{
+       struct page *page;
+       struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
+       dma_addr_t daddr;
+
+       page = pfn_to_page(pfn);
+       if (is_error_page(page))
+               return -EFAULT;
+
+       daddr = dma_map_page(dev, page, 0, PAGE_SIZE,
+                       PCI_DMA_BIDIRECTIONAL);
+       if (dma_mapping_error(dev, daddr))
+               return -ENOMEM;
+
+       *iova = (unsigned long)(daddr >> PAGE_SHIFT);
+       return 0;
+}
+
+static void gvt_dma_unmap_iova(struct intel_vgpu *vgpu, unsigned long iova)
+{
+       struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
+       dma_addr_t daddr;
+
+       daddr = (dma_addr_t)(iova << PAGE_SHIFT);
+       dma_unmap_page(dev, daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+}
+
 static struct gvt_dma *__gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn)
 {
        struct rb_node *node = vgpu->vdev.cache.rb_node;
@@ -111,21 +140,22 @@ out:
        return ret;
 }
 
-static kvm_pfn_t gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn)
+static unsigned long gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn)
 {
        struct gvt_dma *entry;
-       kvm_pfn_t pfn;
+       unsigned long iova;
 
        mutex_lock(&vgpu->vdev.cache_lock);
 
        entry = __gvt_cache_find(vgpu, gfn);
-       pfn = (entry == NULL) ? 0 : entry->pfn;
+       iova = (entry == NULL) ? INTEL_GVT_INVALID_ADDR : entry->iova;
 
        mutex_unlock(&vgpu->vdev.cache_lock);
-       return pfn;
+       return iova;
 }
 
-static void gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, kvm_pfn_t pfn)
+static void gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn,
+               unsigned long iova)
 {
        struct gvt_dma *new, *itr;
        struct rb_node **link = &vgpu->vdev.cache.rb_node, *parent = NULL;
@@ -135,7 +165,7 @@ static void gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, kvm_pfn_t pfn)
                return;
 
        new->gfn = gfn;
-       new->pfn = pfn;
+       new->iova = iova;
 
        mutex_lock(&vgpu->vdev.cache_lock);
        while (*link) {
@@ -182,6 +212,7 @@ static void gvt_cache_remove(struct intel_vgpu *vgpu, gfn_t gfn)
        }
 
        g1 = gfn;
+       gvt_dma_unmap_iova(vgpu, this->iova);
        rc = vfio_unpin_pages(dev, &g1, 1);
        WARN_ON(rc != 1);
        __gvt_cache_remove_entry(vgpu, this);
@@ -204,6 +235,7 @@ static void gvt_cache_destroy(struct intel_vgpu *vgpu)
        mutex_lock(&vgpu->vdev.cache_lock);
        while ((node = rb_first(&vgpu->vdev.cache))) {
                dma = rb_entry(node, struct gvt_dma, node);
+               gvt_dma_unmap_iova(vgpu, dma->iova);
                gfn = dma->gfn;
 
                vfio_unpin_pages(dev, &gfn, 1);
@@ -965,11 +997,6 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
                        sparse->areas[0].offset =
                                        PAGE_ALIGN(vgpu_aperture_offset(vgpu));
                        sparse->areas[0].size = vgpu_aperture_sz(vgpu);
-                       if (!caps.buf) {
-                               kfree(caps.buf);
-                               caps.buf = NULL;
-                               caps.size = 0;
-                       }
                        break;
 
                case VFIO_PCI_BAR3_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
@@ -1353,7 +1380,7 @@ static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data)
 
 static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn)
 {
-       unsigned long pfn;
+       unsigned long iova, pfn;
        struct kvmgt_guest_info *info;
        struct device *dev;
        int rc;
@@ -1362,9 +1389,9 @@ static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn)
                return INTEL_GVT_INVALID_ADDR;
 
        info = (struct kvmgt_guest_info *)handle;
-       pfn = gvt_cache_find(info->vgpu, gfn);
-       if (pfn != 0)
-               return pfn;
+       iova = gvt_cache_find(info->vgpu, gfn);
+       if (iova != INTEL_GVT_INVALID_ADDR)
+               return iova;
 
        pfn = INTEL_GVT_INVALID_ADDR;
        dev = mdev_dev(info->vgpu->vdev.mdev);
@@ -1373,9 +1400,16 @@ static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn)
                gvt_err("vfio_pin_pages failed for gfn 0x%lx: %d\n", gfn, rc);
                return INTEL_GVT_INVALID_ADDR;
        }
+       /* transfer to host iova for GFX to use DMA */
+       rc = gvt_dma_map_iova(info->vgpu, pfn, &iova);
+       if (rc) {
+               gvt_err("gvt_dma_map_iova failed for gfn: 0x%lx\n", gfn);
+               vfio_unpin_pages(dev, &gfn, 1);
+               return INTEL_GVT_INVALID_ADDR;
+       }
 
-       gvt_cache_add(info->vgpu, gfn, pfn);
-       return pfn;
+       gvt_cache_add(info->vgpu, gfn, iova);
+       return iova;
 }
 
 static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa,
index 44136b1f3aabc78697d6c885a8b9c67a74ebe003..2b3a642284b6da67f8f5d821256314d896799298 100644 (file)
@@ -236,12 +236,18 @@ static void restore_mocs(struct intel_vgpu *vgpu, int ring_id)
        }
 }
 
+#define CTX_CONTEXT_CONTROL_VAL        0x03
+
 void intel_gvt_load_render_mmio(struct intel_vgpu *vgpu, int ring_id)
 {
        struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
        struct render_mmio *mmio;
        u32 v;
        int i, array_size;
+       u32 *reg_state = vgpu->shadow_ctx->engine[ring_id].lrc_reg_state;
+       u32 ctx_ctrl = reg_state[CTX_CONTEXT_CONTROL_VAL];
+       u32 inhibit_mask =
+               _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
 
        if (IS_SKYLAKE(vgpu->gvt->dev_priv)) {
                mmio = gen9_render_mmio_list;
@@ -257,6 +263,17 @@ void intel_gvt_load_render_mmio(struct intel_vgpu *vgpu, int ring_id)
                        continue;
 
                mmio->value = I915_READ(mmio->reg);
+
+               /*
+                * if it is an inhibit context, load in_context mmio
+                * into HW by mmio write. If it is not, skip this mmio
+                * write.
+                */
+               if (mmio->in_context &&
+                               ((ctx_ctrl & inhibit_mask) != inhibit_mask) &&
+                               i915.enable_execlists)
+                       continue;
+
                if (mmio->mask)
                        v = vgpu_vreg(vgpu, mmio->reg) | (mmio->mask << 16);
                else
index 678b0be853763ab4b0381a9a4f25880ea075c7a3..06c9584ac5f0333c28d628d797686b82d8f82806 100644 (file)
@@ -125,7 +125,6 @@ static void tbs_sched_func(struct work_struct *work)
                vgpu_data = scheduler->current_vgpu->sched_data;
                head = &vgpu_data->list;
        } else {
-               gvt_dbg_sched("no current vgpu search from q head\n");
                head = &sched_data->runq_head;
        }
 
index 7ea68a75dc4676b0074c6340ec29b698e2181e4a..d6b6d0efdd1aeef15463e9504a4054ff3f2c3f8f 100644 (file)
@@ -169,7 +169,8 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
        gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
                ring_id, workload);
 
-       shadow_ctx->desc_template = workload->ctx_desc.addressing_mode <<
+       shadow_ctx->desc_template &= ~(0x3 << GEN8_CTX_ADDRESSING_MODE_SHIFT);
+       shadow_ctx->desc_template |= workload->ctx_desc.addressing_mode <<
                                    GEN8_CTX_ADDRESSING_MODE_SHIFT;
 
        mutex_lock(&dev_priv->drm.struct_mutex);
@@ -456,7 +457,7 @@ static int workload_thread(void *priv)
                }
 
 complete:
-               gvt_dbg_sched("will complete workload %p\n, status: %d\n",
+               gvt_dbg_sched("will complete workload %p, status: %d\n",
                                workload, workload->status);
 
                if (workload->req)
index 67d471cee79ebdd6ce12b492e894ef9600689649..95a97aa0051e787430fff4266be7ac559973b78f 100644 (file)
@@ -385,6 +385,7 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
                intel_vgpu_reset_resource(vgpu);
                intel_vgpu_reset_mmio(vgpu);
                populate_pvinfo_page(vgpu);
+               intel_vgpu_reset_display(vgpu);
 
                if (dmlr)
                        intel_vgpu_reset_cfg_space(vgpu);