2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * Zhi Wang <zhi.a.wang@intel.com>
27 * Ping Gao <ping.a.gao@intel.com>
28 * Tina Zhang <tina.zhang@intel.com>
29 * Chanbin Du <changbin.du@intel.com>
30 * Min He <min.he@intel.com>
31 * Bing Niu <bing.niu@intel.com>
32 * Zhenyu Wang <zhenyuw@linux.intel.com>
36 #include <linux/kthread.h>
41 #define RING_CTX_OFF(x) \
42 offsetof(struct execlist_ring_context, x)
44 static void set_context_pdp_root_pointer(
45 struct execlist_ring_context
*ring_context
,
48 struct execlist_mmio_pair
*pdp_pair
= &ring_context
->pdp3_UDW
;
51 for (i
= 0; i
< 8; i
++)
52 pdp_pair
[i
].val
= pdp
[7 - i
];
55 static int populate_shadow_context(struct intel_vgpu_workload
*workload
)
57 struct intel_vgpu
*vgpu
= workload
->vgpu
;
58 struct intel_gvt
*gvt
= vgpu
->gvt
;
59 int ring_id
= workload
->ring_id
;
60 struct i915_gem_context
*shadow_ctx
= workload
->vgpu
->shadow_ctx
;
61 struct drm_i915_gem_object
*ctx_obj
=
62 shadow_ctx
->engine
[ring_id
].state
->obj
;
63 struct execlist_ring_context
*shadow_ring_context
;
66 unsigned long context_gpa
, context_page_num
;
69 gvt_dbg_sched("ring id %d workload lrca %x", ring_id
,
70 workload
->ctx_desc
.lrca
);
72 context_page_num
= gvt
->dev_priv
->engine
[ring_id
]->context_size
;
74 context_page_num
= context_page_num
>> PAGE_SHIFT
;
76 if (IS_BROADWELL(gvt
->dev_priv
) && ring_id
== RCS
)
77 context_page_num
= 19;
81 while (i
< context_page_num
) {
82 context_gpa
= intel_vgpu_gma_to_gpa(vgpu
->gtt
.ggtt_mm
,
83 (u32
)((workload
->ctx_desc
.lrca
+ i
) <<
85 if (context_gpa
== INTEL_GVT_INVALID_ADDR
) {
86 gvt_vgpu_err("Invalid guest context descriptor\n");
90 page
= i915_gem_object_get_page(ctx_obj
, LRC_PPHWSP_PN
+ i
);
92 intel_gvt_hypervisor_read_gpa(vgpu
, context_gpa
, dst
,
98 page
= i915_gem_object_get_page(ctx_obj
, LRC_STATE_PN
);
99 shadow_ring_context
= kmap(page
);
101 #define COPY_REG(name) \
102 intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \
103 + RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
106 COPY_REG(ctx_timestamp
);
108 if (ring_id
== RCS
) {
109 COPY_REG(bb_per_ctx_ptr
);
110 COPY_REG(rcs_indirect_ctx
);
111 COPY_REG(rcs_indirect_ctx_offset
);
115 set_context_pdp_root_pointer(shadow_ring_context
,
116 workload
->shadow_mm
->shadow_page_table
);
118 intel_gvt_hypervisor_read_gpa(vgpu
,
119 workload
->ring_context_gpa
+
120 sizeof(*shadow_ring_context
),
121 (void *)shadow_ring_context
+
122 sizeof(*shadow_ring_context
),
123 GTT_PAGE_SIZE
- sizeof(*shadow_ring_context
));
129 static inline bool is_gvt_request(struct drm_i915_gem_request
*req
)
131 return i915_gem_context_force_single_submission(req
->ctx
);
134 static int shadow_context_status_change(struct notifier_block
*nb
,
135 unsigned long action
, void *data
)
137 struct drm_i915_gem_request
*req
= (struct drm_i915_gem_request
*)data
;
138 struct intel_gvt
*gvt
= container_of(nb
, struct intel_gvt
,
139 shadow_ctx_notifier_block
[req
->engine
->id
]);
140 struct intel_gvt_workload_scheduler
*scheduler
= &gvt
->scheduler
;
141 enum intel_engine_id ring_id
= req
->engine
->id
;
142 struct intel_vgpu_workload
*workload
;
144 if (!is_gvt_request(req
)) {
145 spin_lock_bh(&scheduler
->mmio_context_lock
);
146 if (action
== INTEL_CONTEXT_SCHEDULE_IN
&&
147 scheduler
->engine_owner
[ring_id
]) {
148 /* Switch ring from vGPU to host. */
149 intel_gvt_switch_mmio(scheduler
->engine_owner
[ring_id
],
151 scheduler
->engine_owner
[ring_id
] = NULL
;
153 spin_unlock_bh(&scheduler
->mmio_context_lock
);
158 workload
= scheduler
->current_workload
[ring_id
];
159 if (unlikely(!workload
))
163 case INTEL_CONTEXT_SCHEDULE_IN
:
164 spin_lock_bh(&scheduler
->mmio_context_lock
);
165 if (workload
->vgpu
!= scheduler
->engine_owner
[ring_id
]) {
166 /* Switch ring from host to vGPU or vGPU to vGPU. */
167 intel_gvt_switch_mmio(scheduler
->engine_owner
[ring_id
],
168 workload
->vgpu
, ring_id
);
169 scheduler
->engine_owner
[ring_id
] = workload
->vgpu
;
171 gvt_dbg_sched("skip ring %d mmio switch for vgpu%d\n",
172 ring_id
, workload
->vgpu
->id
);
173 spin_unlock_bh(&scheduler
->mmio_context_lock
);
174 atomic_set(&workload
->shadow_ctx_active
, 1);
176 case INTEL_CONTEXT_SCHEDULE_OUT
:
177 atomic_set(&workload
->shadow_ctx_active
, 0);
183 wake_up(&workload
->shadow_ctx_status_wq
);
187 static int dispatch_workload(struct intel_vgpu_workload
*workload
)
189 int ring_id
= workload
->ring_id
;
190 struct i915_gem_context
*shadow_ctx
= workload
->vgpu
->shadow_ctx
;
191 struct drm_i915_private
*dev_priv
= workload
->vgpu
->gvt
->dev_priv
;
192 struct intel_engine_cs
*engine
= dev_priv
->engine
[ring_id
];
193 struct drm_i915_gem_request
*rq
;
194 struct intel_vgpu
*vgpu
= workload
->vgpu
;
195 struct intel_ring
*ring
;
198 gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
201 shadow_ctx
->desc_template
&= ~(0x3 << GEN8_CTX_ADDRESSING_MODE_SHIFT
);
202 shadow_ctx
->desc_template
|= workload
->ctx_desc
.addressing_mode
<<
203 GEN8_CTX_ADDRESSING_MODE_SHIFT
;
205 mutex_lock(&dev_priv
->drm
.struct_mutex
);
207 /* pin shadow context by gvt even the shadow context will be pinned
208 * when i915 alloc request. That is because gvt will update the guest
209 * context from shadow context when workload is completed, and at that
210 * moment, i915 may already unpined the shadow context to make the
211 * shadow_ctx pages invalid. So gvt need to pin itself. After update
212 * the guest context, gvt can unpin the shadow_ctx safely.
214 ring
= engine
->context_pin(engine
, shadow_ctx
);
217 gvt_vgpu_err("fail to pin shadow context\n");
218 workload
->status
= ret
;
219 mutex_unlock(&dev_priv
->drm
.struct_mutex
);
223 rq
= i915_gem_request_alloc(dev_priv
->engine
[ring_id
], shadow_ctx
);
225 gvt_vgpu_err("fail to allocate gem request\n");
230 gvt_dbg_sched("ring id %d get i915 gem request %p\n", ring_id
, rq
);
232 workload
->req
= i915_gem_request_get(rq
);
234 ret
= intel_gvt_scan_and_shadow_workload(workload
);
238 if ((workload
->ring_id
== RCS
) &&
239 (workload
->wa_ctx
.indirect_ctx
.size
!= 0)) {
240 ret
= intel_gvt_scan_and_shadow_wa_ctx(&workload
->wa_ctx
);
245 ret
= populate_shadow_context(workload
);
249 if (workload
->prepare
) {
250 ret
= workload
->prepare(workload
);
255 gvt_dbg_sched("ring id %d submit workload to i915 %p\n",
256 ring_id
, workload
->req
);
259 workload
->dispatched
= true;
262 workload
->status
= ret
;
264 if (!IS_ERR_OR_NULL(rq
))
265 i915_add_request(rq
);
267 engine
->context_unpin(engine
, shadow_ctx
);
269 mutex_unlock(&dev_priv
->drm
.struct_mutex
);
273 static struct intel_vgpu_workload
*pick_next_workload(
274 struct intel_gvt
*gvt
, int ring_id
)
276 struct intel_gvt_workload_scheduler
*scheduler
= &gvt
->scheduler
;
277 struct intel_vgpu_workload
*workload
= NULL
;
279 mutex_lock(&gvt
->lock
);
282 * no current vgpu / will be scheduled out / no workload
285 if (!scheduler
->current_vgpu
) {
286 gvt_dbg_sched("ring id %d stop - no current vgpu\n", ring_id
);
290 if (scheduler
->need_reschedule
) {
291 gvt_dbg_sched("ring id %d stop - will reschedule\n", ring_id
);
295 if (list_empty(workload_q_head(scheduler
->current_vgpu
, ring_id
)))
299 * still have current workload, maybe the workload disptacher
300 * fail to submit it for some reason, resubmit it.
302 if (scheduler
->current_workload
[ring_id
]) {
303 workload
= scheduler
->current_workload
[ring_id
];
304 gvt_dbg_sched("ring id %d still have current workload %p\n",
310 * pick a workload as current workload
311 * once current workload is set, schedule policy routines
312 * will wait the current workload is finished when trying to
313 * schedule out a vgpu.
315 scheduler
->current_workload
[ring_id
] = container_of(
316 workload_q_head(scheduler
->current_vgpu
, ring_id
)->next
,
317 struct intel_vgpu_workload
, list
);
319 workload
= scheduler
->current_workload
[ring_id
];
321 gvt_dbg_sched("ring id %d pick new workload %p\n", ring_id
, workload
);
323 atomic_inc(&workload
->vgpu
->running_workload_num
);
325 mutex_unlock(&gvt
->lock
);
329 static void update_guest_context(struct intel_vgpu_workload
*workload
)
331 struct intel_vgpu
*vgpu
= workload
->vgpu
;
332 struct intel_gvt
*gvt
= vgpu
->gvt
;
333 int ring_id
= workload
->ring_id
;
334 struct i915_gem_context
*shadow_ctx
= workload
->vgpu
->shadow_ctx
;
335 struct drm_i915_gem_object
*ctx_obj
=
336 shadow_ctx
->engine
[ring_id
].state
->obj
;
337 struct execlist_ring_context
*shadow_ring_context
;
340 unsigned long context_gpa
, context_page_num
;
343 gvt_dbg_sched("ring id %d workload lrca %x\n", ring_id
,
344 workload
->ctx_desc
.lrca
);
346 context_page_num
= gvt
->dev_priv
->engine
[ring_id
]->context_size
;
348 context_page_num
= context_page_num
>> PAGE_SHIFT
;
350 if (IS_BROADWELL(gvt
->dev_priv
) && ring_id
== RCS
)
351 context_page_num
= 19;
355 while (i
< context_page_num
) {
356 context_gpa
= intel_vgpu_gma_to_gpa(vgpu
->gtt
.ggtt_mm
,
357 (u32
)((workload
->ctx_desc
.lrca
+ i
) <<
359 if (context_gpa
== INTEL_GVT_INVALID_ADDR
) {
360 gvt_vgpu_err("invalid guest context descriptor\n");
364 page
= i915_gem_object_get_page(ctx_obj
, LRC_PPHWSP_PN
+ i
);
366 intel_gvt_hypervisor_write_gpa(vgpu
, context_gpa
, src
,
372 intel_gvt_hypervisor_write_gpa(vgpu
, workload
->ring_context_gpa
+
373 RING_CTX_OFF(ring_header
.val
), &workload
->rb_tail
, 4);
375 page
= i915_gem_object_get_page(ctx_obj
, LRC_STATE_PN
);
376 shadow_ring_context
= kmap(page
);
378 #define COPY_REG(name) \
379 intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa + \
380 RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
383 COPY_REG(ctx_timestamp
);
387 intel_gvt_hypervisor_write_gpa(vgpu
,
388 workload
->ring_context_gpa
+
389 sizeof(*shadow_ring_context
),
390 (void *)shadow_ring_context
+
391 sizeof(*shadow_ring_context
),
392 GTT_PAGE_SIZE
- sizeof(*shadow_ring_context
));
397 static void complete_current_workload(struct intel_gvt
*gvt
, int ring_id
)
399 struct intel_gvt_workload_scheduler
*scheduler
= &gvt
->scheduler
;
400 struct intel_vgpu_workload
*workload
;
401 struct intel_vgpu
*vgpu
;
404 mutex_lock(&gvt
->lock
);
406 workload
= scheduler
->current_workload
[ring_id
];
407 vgpu
= workload
->vgpu
;
409 /* For the workload w/ request, needs to wait for the context
410 * switch to make sure request is completed.
411 * For the workload w/o request, directly complete the workload.
414 struct drm_i915_private
*dev_priv
=
415 workload
->vgpu
->gvt
->dev_priv
;
416 struct intel_engine_cs
*engine
=
417 dev_priv
->engine
[workload
->ring_id
];
418 wait_event(workload
->shadow_ctx_status_wq
,
419 !atomic_read(&workload
->shadow_ctx_active
));
421 /* If this request caused GPU hang, req->fence.error will
422 * be set to -EIO. Use -EIO to set workload status so
423 * that when this request caused GPU hang, didn't trigger
424 * context switch interrupt to guest.
426 if (likely(workload
->status
== -EINPROGRESS
)) {
427 if (workload
->req
->fence
.error
== -EIO
)
428 workload
->status
= -EIO
;
430 workload
->status
= 0;
433 i915_gem_request_put(fetch_and_zero(&workload
->req
));
435 if (!workload
->status
&& !(vgpu
->resetting_eng
&
436 ENGINE_MASK(ring_id
))) {
437 update_guest_context(workload
);
439 for_each_set_bit(event
, workload
->pending_events
,
441 intel_vgpu_trigger_virtual_event(vgpu
, event
);
443 mutex_lock(&dev_priv
->drm
.struct_mutex
);
444 /* unpin shadow ctx as the shadow_ctx update is done */
445 engine
->context_unpin(engine
, workload
->vgpu
->shadow_ctx
);
446 mutex_unlock(&dev_priv
->drm
.struct_mutex
);
449 gvt_dbg_sched("ring id %d complete workload %p status %d\n",
450 ring_id
, workload
, workload
->status
);
452 scheduler
->current_workload
[ring_id
] = NULL
;
454 list_del_init(&workload
->list
);
455 workload
->complete(workload
);
457 atomic_dec(&vgpu
->running_workload_num
);
458 wake_up(&scheduler
->workload_complete_wq
);
460 if (gvt
->scheduler
.need_reschedule
)
461 intel_gvt_request_service(gvt
, INTEL_GVT_REQUEST_EVENT_SCHED
);
463 mutex_unlock(&gvt
->lock
);
466 struct workload_thread_param
{
467 struct intel_gvt
*gvt
;
471 static int workload_thread(void *priv
)
473 struct workload_thread_param
*p
= (struct workload_thread_param
*)priv
;
474 struct intel_gvt
*gvt
= p
->gvt
;
475 int ring_id
= p
->ring_id
;
476 struct intel_gvt_workload_scheduler
*scheduler
= &gvt
->scheduler
;
477 struct intel_vgpu_workload
*workload
= NULL
;
478 struct intel_vgpu
*vgpu
= NULL
;
480 bool need_force_wake
= IS_SKYLAKE(gvt
->dev_priv
)
481 || IS_KABYLAKE(gvt
->dev_priv
);
482 DEFINE_WAIT_FUNC(wait
, woken_wake_function
);
486 gvt_dbg_core("workload thread for ring %d started\n", ring_id
);
488 while (!kthread_should_stop()) {
489 add_wait_queue(&scheduler
->waitq
[ring_id
], &wait
);
491 workload
= pick_next_workload(gvt
, ring_id
);
494 wait_woken(&wait
, TASK_INTERRUPTIBLE
,
495 MAX_SCHEDULE_TIMEOUT
);
496 } while (!kthread_should_stop());
497 remove_wait_queue(&scheduler
->waitq
[ring_id
], &wait
);
502 gvt_dbg_sched("ring id %d next workload %p vgpu %d\n",
503 workload
->ring_id
, workload
,
506 intel_runtime_pm_get(gvt
->dev_priv
);
508 gvt_dbg_sched("ring id %d will dispatch workload %p\n",
509 workload
->ring_id
, workload
);
512 intel_uncore_forcewake_get(gvt
->dev_priv
,
515 mutex_lock(&gvt
->lock
);
516 ret
= dispatch_workload(workload
);
517 mutex_unlock(&gvt
->lock
);
520 vgpu
= workload
->vgpu
;
521 gvt_vgpu_err("fail to dispatch workload, skip\n");
525 gvt_dbg_sched("ring id %d wait workload %p\n",
526 workload
->ring_id
, workload
);
527 i915_wait_request(workload
->req
, 0, MAX_SCHEDULE_TIMEOUT
);
530 gvt_dbg_sched("will complete workload %p, status: %d\n",
531 workload
, workload
->status
);
533 complete_current_workload(gvt
, ring_id
);
536 intel_uncore_forcewake_put(gvt
->dev_priv
,
539 intel_runtime_pm_put(gvt
->dev_priv
);
544 void intel_gvt_wait_vgpu_idle(struct intel_vgpu
*vgpu
)
546 struct intel_gvt
*gvt
= vgpu
->gvt
;
547 struct intel_gvt_workload_scheduler
*scheduler
= &gvt
->scheduler
;
549 if (atomic_read(&vgpu
->running_workload_num
)) {
550 gvt_dbg_sched("wait vgpu idle\n");
552 wait_event(scheduler
->workload_complete_wq
,
553 !atomic_read(&vgpu
->running_workload_num
));
557 void intel_gvt_clean_workload_scheduler(struct intel_gvt
*gvt
)
559 struct intel_gvt_workload_scheduler
*scheduler
= &gvt
->scheduler
;
560 struct intel_engine_cs
*engine
;
561 enum intel_engine_id i
;
563 gvt_dbg_core("clean workload scheduler\n");
565 for_each_engine(engine
, gvt
->dev_priv
, i
) {
566 atomic_notifier_chain_unregister(
567 &engine
->context_status_notifier
,
568 &gvt
->shadow_ctx_notifier_block
[i
]);
569 kthread_stop(scheduler
->thread
[i
]);
573 int intel_gvt_init_workload_scheduler(struct intel_gvt
*gvt
)
575 struct intel_gvt_workload_scheduler
*scheduler
= &gvt
->scheduler
;
576 struct workload_thread_param
*param
= NULL
;
577 struct intel_engine_cs
*engine
;
578 enum intel_engine_id i
;
581 gvt_dbg_core("init workload scheduler\n");
583 init_waitqueue_head(&scheduler
->workload_complete_wq
);
585 for_each_engine(engine
, gvt
->dev_priv
, i
) {
586 init_waitqueue_head(&scheduler
->waitq
[i
]);
588 param
= kzalloc(sizeof(*param
), GFP_KERNEL
);
597 scheduler
->thread
[i
] = kthread_run(workload_thread
, param
,
598 "gvt workload %d", i
);
599 if (IS_ERR(scheduler
->thread
[i
])) {
600 gvt_err("fail to create workload thread\n");
601 ret
= PTR_ERR(scheduler
->thread
[i
]);
605 gvt
->shadow_ctx_notifier_block
[i
].notifier_call
=
606 shadow_context_status_change
;
607 atomic_notifier_chain_register(&engine
->context_status_notifier
,
608 &gvt
->shadow_ctx_notifier_block
[i
]);
612 intel_gvt_clean_workload_scheduler(gvt
);
618 void intel_vgpu_clean_gvt_context(struct intel_vgpu
*vgpu
)
620 i915_gem_context_put_unlocked(vgpu
->shadow_ctx
);
623 int intel_vgpu_init_gvt_context(struct intel_vgpu
*vgpu
)
625 atomic_set(&vgpu
->running_workload_num
, 0);
627 vgpu
->shadow_ctx
= i915_gem_context_create_gvt(
628 &vgpu
->gvt
->dev_priv
->drm
);
629 if (IS_ERR(vgpu
->shadow_ctx
))
630 return PTR_ERR(vgpu
->shadow_ctx
);
632 vgpu
->shadow_ctx
->engine
[RCS
].initialised
= true;