]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/commitdiff
drm/i915/gvt: GVT pin/unpin shadow context
authorChuanxiao Dong <chuanxiao.dong@intel.com>
Thu, 16 Mar 2017 01:47:58 +0000 (09:47 +0800)
committerZhenyu Wang <zhenyuw@linux.intel.com>
Fri, 17 Mar 2017 08:46:45 +0000 (16:46 +0800)
When handling guest request, GVT needs to populate/update shadow_ctx
with guest context. This behavior needs to make sure the shadow_ctx
is pinned. The current implementation is relying on i195 allocate request
to pin but this way cannot guarantee the i915 not to unpin the shadow_ctx
when GVT update the guest context from shadow_ctx. So GVT should pin/unpin
the shadow_ctx by itself.

Signed-off-by: Chuanxiao Dong <chuanxiao.dong@intel.com>
Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
drivers/gpu/drm/i915/gvt/scheduler.c

index 907e6bc794f61b732c7f8238ea1d5d6f40e1dacb..39a83eb7aeccaf42a574220dd5e659c3a4d298cf 100644 (file)
@@ -175,6 +175,7 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
        int ring_id = workload->ring_id;
        struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
        struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
+       struct intel_engine_cs *engine = dev_priv->engine[ring_id];
        struct drm_i915_gem_request *rq;
        struct intel_vgpu *vgpu = workload->vgpu;
        int ret;
@@ -188,6 +189,21 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
 
        mutex_lock(&dev_priv->drm.struct_mutex);
 
+       /* pin shadow context by gvt even the shadow context will be pinned
+        * when i915 alloc request. That is because gvt will update the guest
+        * context from shadow context when workload is completed, and at that
+        * moment, i915 may already unpined the shadow context to make the
+        * shadow_ctx pages invalid. So gvt need to pin itself. After update
+        * the guest context, gvt can unpin the shadow_ctx safely.
+        */
+       ret = engine->context_pin(engine, shadow_ctx);
+       if (ret) {
+               gvt_vgpu_err("fail to pin shadow context\n");
+               workload->status = ret;
+               mutex_unlock(&dev_priv->drm.struct_mutex);
+               return ret;
+       }
+
        rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
        if (IS_ERR(rq)) {
                gvt_vgpu_err("fail to allocate gem request\n");
@@ -231,6 +247,9 @@ out:
 
        if (!IS_ERR_OR_NULL(rq))
                i915_add_request_no_flush(rq);
+       else
+               engine->context_unpin(engine, shadow_ctx);
+
        mutex_unlock(&dev_priv->drm.struct_mutex);
        return ret;
 }
@@ -380,6 +399,10 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
         * For the workload w/o request, directly complete the workload.
         */
        if (workload->req) {
+               struct drm_i915_private *dev_priv =
+                       workload->vgpu->gvt->dev_priv;
+               struct intel_engine_cs *engine =
+                       dev_priv->engine[workload->ring_id];
                wait_event(workload->shadow_ctx_status_wq,
                           !atomic_read(&workload->shadow_ctx_active));
 
@@ -392,6 +415,10 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
                                         INTEL_GVT_EVENT_MAX)
                                intel_vgpu_trigger_virtual_event(vgpu, event);
                }
+               mutex_lock(&dev_priv->drm.struct_mutex);
+               /* unpin shadow ctx as the shadow_ctx update is done */
+               engine->context_unpin(engine, workload->vgpu->shadow_ctx);
+               mutex_unlock(&dev_priv->drm.struct_mutex);
        }
 
        gvt_dbg_sched("ring id %d complete workload %p status %d\n",