]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/gpu/drm/i915/gvt/scheduler.c
Merge tag 'mmc-v4.15-2' of git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/mmc
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / i915 / gvt / scheduler.c
1 /*
2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Zhi Wang <zhi.a.wang@intel.com>
25 *
26 * Contributors:
27 * Ping Gao <ping.a.gao@intel.com>
28 * Tina Zhang <tina.zhang@intel.com>
29 * Chanbin Du <changbin.du@intel.com>
30 * Min He <min.he@intel.com>
31 * Bing Niu <bing.niu@intel.com>
32 * Zhenyu Wang <zhenyuw@linux.intel.com>
33 *
34 */
35
36 #include <linux/kthread.h>
37
38 #include "i915_drv.h"
39 #include "gvt.h"
40
41 #define RING_CTX_OFF(x) \
42 offsetof(struct execlist_ring_context, x)
43
44 static void set_context_pdp_root_pointer(
45 struct execlist_ring_context *ring_context,
46 u32 pdp[8])
47 {
48 struct execlist_mmio_pair *pdp_pair = &ring_context->pdp3_UDW;
49 int i;
50
51 for (i = 0; i < 8; i++)
52 pdp_pair[i].val = pdp[7 - i];
53 }
54
55 static int populate_shadow_context(struct intel_vgpu_workload *workload)
56 {
57 struct intel_vgpu *vgpu = workload->vgpu;
58 struct intel_gvt *gvt = vgpu->gvt;
59 int ring_id = workload->ring_id;
60 struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
61 struct drm_i915_gem_object *ctx_obj =
62 shadow_ctx->engine[ring_id].state->obj;
63 struct execlist_ring_context *shadow_ring_context;
64 struct page *page;
65 void *dst;
66 unsigned long context_gpa, context_page_num;
67 int i;
68
69 gvt_dbg_sched("ring id %d workload lrca %x", ring_id,
70 workload->ctx_desc.lrca);
71
72 context_page_num = gvt->dev_priv->engine[ring_id]->context_size;
73
74 context_page_num = context_page_num >> PAGE_SHIFT;
75
76 if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS)
77 context_page_num = 19;
78
79 i = 2;
80
81 while (i < context_page_num) {
82 context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
83 (u32)((workload->ctx_desc.lrca + i) <<
84 GTT_PAGE_SHIFT));
85 if (context_gpa == INTEL_GVT_INVALID_ADDR) {
86 gvt_vgpu_err("Invalid guest context descriptor\n");
87 return -EINVAL;
88 }
89
90 page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i);
91 dst = kmap(page);
92 intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst,
93 GTT_PAGE_SIZE);
94 kunmap(page);
95 i++;
96 }
97
98 page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
99 shadow_ring_context = kmap(page);
100
101 #define COPY_REG(name) \
102 intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \
103 + RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
104
105 COPY_REG(ctx_ctrl);
106 COPY_REG(ctx_timestamp);
107
108 if (ring_id == RCS) {
109 COPY_REG(bb_per_ctx_ptr);
110 COPY_REG(rcs_indirect_ctx);
111 COPY_REG(rcs_indirect_ctx_offset);
112 }
113 #undef COPY_REG
114
115 set_context_pdp_root_pointer(shadow_ring_context,
116 workload->shadow_mm->shadow_page_table);
117
118 intel_gvt_hypervisor_read_gpa(vgpu,
119 workload->ring_context_gpa +
120 sizeof(*shadow_ring_context),
121 (void *)shadow_ring_context +
122 sizeof(*shadow_ring_context),
123 GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
124
125 kunmap(page);
126 return 0;
127 }
128
129 static inline bool is_gvt_request(struct drm_i915_gem_request *req)
130 {
131 return i915_gem_context_force_single_submission(req->ctx);
132 }
133
134 static int shadow_context_status_change(struct notifier_block *nb,
135 unsigned long action, void *data)
136 {
137 struct drm_i915_gem_request *req = (struct drm_i915_gem_request *)data;
138 struct intel_gvt *gvt = container_of(nb, struct intel_gvt,
139 shadow_ctx_notifier_block[req->engine->id]);
140 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
141 enum intel_engine_id ring_id = req->engine->id;
142 struct intel_vgpu_workload *workload;
143 unsigned long flags;
144
145 if (!is_gvt_request(req)) {
146 spin_lock_irqsave(&scheduler->mmio_context_lock, flags);
147 if (action == INTEL_CONTEXT_SCHEDULE_IN &&
148 scheduler->engine_owner[ring_id]) {
149 /* Switch ring from vGPU to host. */
150 intel_gvt_switch_mmio(scheduler->engine_owner[ring_id],
151 NULL, ring_id);
152 scheduler->engine_owner[ring_id] = NULL;
153 }
154 spin_unlock_irqrestore(&scheduler->mmio_context_lock, flags);
155
156 return NOTIFY_OK;
157 }
158
159 workload = scheduler->current_workload[ring_id];
160 if (unlikely(!workload))
161 return NOTIFY_OK;
162
163 switch (action) {
164 case INTEL_CONTEXT_SCHEDULE_IN:
165 spin_lock_irqsave(&scheduler->mmio_context_lock, flags);
166 if (workload->vgpu != scheduler->engine_owner[ring_id]) {
167 /* Switch ring from host to vGPU or vGPU to vGPU. */
168 intel_gvt_switch_mmio(scheduler->engine_owner[ring_id],
169 workload->vgpu, ring_id);
170 scheduler->engine_owner[ring_id] = workload->vgpu;
171 } else
172 gvt_dbg_sched("skip ring %d mmio switch for vgpu%d\n",
173 ring_id, workload->vgpu->id);
174 spin_unlock_irqrestore(&scheduler->mmio_context_lock, flags);
175 atomic_set(&workload->shadow_ctx_active, 1);
176 break;
177 case INTEL_CONTEXT_SCHEDULE_OUT:
178 case INTEL_CONTEXT_SCHEDULE_PREEMPTED:
179 atomic_set(&workload->shadow_ctx_active, 0);
180 break;
181 default:
182 WARN_ON(1);
183 return NOTIFY_OK;
184 }
185 wake_up(&workload->shadow_ctx_status_wq);
186 return NOTIFY_OK;
187 }
188
189 static void shadow_context_descriptor_update(struct i915_gem_context *ctx,
190 struct intel_engine_cs *engine)
191 {
192 struct intel_context *ce = &ctx->engine[engine->id];
193 u64 desc = 0;
194
195 desc = ce->lrc_desc;
196
197 /* Update bits 0-11 of the context descriptor which includes flags
198 * like GEN8_CTX_* cached in desc_template
199 */
200 desc &= U64_MAX << 12;
201 desc |= ctx->desc_template & ((1ULL << 12) - 1);
202
203 ce->lrc_desc = desc;
204 }
205
206 static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload)
207 {
208 struct intel_vgpu *vgpu = workload->vgpu;
209 void *shadow_ring_buffer_va;
210 u32 *cs;
211
212 /* allocate shadow ring buffer */
213 cs = intel_ring_begin(workload->req, workload->rb_len / sizeof(u32));
214 if (IS_ERR(cs)) {
215 gvt_vgpu_err("fail to alloc size =%ld shadow ring buffer\n",
216 workload->rb_len);
217 return PTR_ERR(cs);
218 }
219
220 shadow_ring_buffer_va = workload->shadow_ring_buffer_va;
221
222 /* get shadow ring buffer va */
223 workload->shadow_ring_buffer_va = cs;
224
225 memcpy(cs, shadow_ring_buffer_va,
226 workload->rb_len);
227
228 cs += workload->rb_len / sizeof(u32);
229 intel_ring_advance(workload->req, cs);
230
231 return 0;
232 }
233
234 void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
235 {
236 if (!wa_ctx->indirect_ctx.obj)
237 return;
238
239 i915_gem_object_unpin_map(wa_ctx->indirect_ctx.obj);
240 i915_gem_object_put(wa_ctx->indirect_ctx.obj);
241 }
242
243 /**
244 * intel_gvt_scan_and_shadow_workload - audit the workload by scanning and
245 * shadow it as well, include ringbuffer,wa_ctx and ctx.
246 * @workload: an abstract entity for each execlist submission.
247 *
248 * This function is called before the workload submitting to i915, to make
249 * sure the content of the workload is valid.
250 */
251 int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
252 {
253 int ring_id = workload->ring_id;
254 struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
255 struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
256 struct intel_engine_cs *engine = dev_priv->engine[ring_id];
257 struct intel_vgpu *vgpu = workload->vgpu;
258 struct intel_ring *ring;
259 int ret;
260
261 lockdep_assert_held(&dev_priv->drm.struct_mutex);
262
263 if (workload->shadowed)
264 return 0;
265
266 shadow_ctx->desc_template &= ~(0x3 << GEN8_CTX_ADDRESSING_MODE_SHIFT);
267 shadow_ctx->desc_template |= workload->ctx_desc.addressing_mode <<
268 GEN8_CTX_ADDRESSING_MODE_SHIFT;
269
270 if (!test_and_set_bit(ring_id, vgpu->shadow_ctx_desc_updated))
271 shadow_context_descriptor_update(shadow_ctx,
272 dev_priv->engine[ring_id]);
273
274 ret = intel_gvt_scan_and_shadow_ringbuffer(workload);
275 if (ret)
276 goto err_scan;
277
278 if ((workload->ring_id == RCS) &&
279 (workload->wa_ctx.indirect_ctx.size != 0)) {
280 ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx);
281 if (ret)
282 goto err_scan;
283 }
284
285 /* pin shadow context by gvt even the shadow context will be pinned
286 * when i915 alloc request. That is because gvt will update the guest
287 * context from shadow context when workload is completed, and at that
288 * moment, i915 may already unpined the shadow context to make the
289 * shadow_ctx pages invalid. So gvt need to pin itself. After update
290 * the guest context, gvt can unpin the shadow_ctx safely.
291 */
292 ring = engine->context_pin(engine, shadow_ctx);
293 if (IS_ERR(ring)) {
294 ret = PTR_ERR(ring);
295 gvt_vgpu_err("fail to pin shadow context\n");
296 goto err_shadow;
297 }
298
299 ret = populate_shadow_context(workload);
300 if (ret)
301 goto err_unpin;
302 workload->shadowed = true;
303 return 0;
304
305 err_unpin:
306 engine->context_unpin(engine, shadow_ctx);
307 err_shadow:
308 release_shadow_wa_ctx(&workload->wa_ctx);
309 err_scan:
310 return ret;
311 }
312
313 int intel_gvt_generate_request(struct intel_vgpu_workload *workload)
314 {
315 int ring_id = workload->ring_id;
316 struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
317 struct intel_engine_cs *engine = dev_priv->engine[ring_id];
318 struct drm_i915_gem_request *rq;
319 struct intel_vgpu *vgpu = workload->vgpu;
320 struct i915_gem_context *shadow_ctx = vgpu->shadow_ctx;
321 int ret;
322
323 rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
324 if (IS_ERR(rq)) {
325 gvt_vgpu_err("fail to allocate gem request\n");
326 ret = PTR_ERR(rq);
327 goto err_unpin;
328 }
329
330 gvt_dbg_sched("ring id %d get i915 gem request %p\n", ring_id, rq);
331
332 workload->req = i915_gem_request_get(rq);
333 ret = copy_workload_to_ring_buffer(workload);
334 if (ret)
335 goto err_unpin;
336 return 0;
337
338 err_unpin:
339 engine->context_unpin(engine, shadow_ctx);
340 release_shadow_wa_ctx(&workload->wa_ctx);
341 return ret;
342 }
343
344 static int dispatch_workload(struct intel_vgpu_workload *workload)
345 {
346 int ring_id = workload->ring_id;
347 struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
348 struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
349 struct intel_engine_cs *engine = dev_priv->engine[ring_id];
350 int ret = 0;
351
352 gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
353 ring_id, workload);
354
355 mutex_lock(&dev_priv->drm.struct_mutex);
356
357 ret = intel_gvt_scan_and_shadow_workload(workload);
358 if (ret)
359 goto out;
360
361 if (workload->prepare) {
362 ret = workload->prepare(workload);
363 if (ret) {
364 engine->context_unpin(engine, shadow_ctx);
365 goto out;
366 }
367 }
368
369 out:
370 if (ret)
371 workload->status = ret;
372
373 if (!IS_ERR_OR_NULL(workload->req)) {
374 gvt_dbg_sched("ring id %d submit workload to i915 %p\n",
375 ring_id, workload->req);
376 i915_add_request(workload->req);
377 workload->dispatched = true;
378 }
379
380 mutex_unlock(&dev_priv->drm.struct_mutex);
381 return ret;
382 }
383
384 static struct intel_vgpu_workload *pick_next_workload(
385 struct intel_gvt *gvt, int ring_id)
386 {
387 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
388 struct intel_vgpu_workload *workload = NULL;
389
390 mutex_lock(&gvt->lock);
391
392 /*
393 * no current vgpu / will be scheduled out / no workload
394 * bail out
395 */
396 if (!scheduler->current_vgpu) {
397 gvt_dbg_sched("ring id %d stop - no current vgpu\n", ring_id);
398 goto out;
399 }
400
401 if (scheduler->need_reschedule) {
402 gvt_dbg_sched("ring id %d stop - will reschedule\n", ring_id);
403 goto out;
404 }
405
406 if (list_empty(workload_q_head(scheduler->current_vgpu, ring_id)))
407 goto out;
408
409 /*
410 * still have current workload, maybe the workload disptacher
411 * fail to submit it for some reason, resubmit it.
412 */
413 if (scheduler->current_workload[ring_id]) {
414 workload = scheduler->current_workload[ring_id];
415 gvt_dbg_sched("ring id %d still have current workload %p\n",
416 ring_id, workload);
417 goto out;
418 }
419
420 /*
421 * pick a workload as current workload
422 * once current workload is set, schedule policy routines
423 * will wait the current workload is finished when trying to
424 * schedule out a vgpu.
425 */
426 scheduler->current_workload[ring_id] = container_of(
427 workload_q_head(scheduler->current_vgpu, ring_id)->next,
428 struct intel_vgpu_workload, list);
429
430 workload = scheduler->current_workload[ring_id];
431
432 gvt_dbg_sched("ring id %d pick new workload %p\n", ring_id, workload);
433
434 atomic_inc(&workload->vgpu->running_workload_num);
435 out:
436 mutex_unlock(&gvt->lock);
437 return workload;
438 }
439
440 static void update_guest_context(struct intel_vgpu_workload *workload)
441 {
442 struct intel_vgpu *vgpu = workload->vgpu;
443 struct intel_gvt *gvt = vgpu->gvt;
444 int ring_id = workload->ring_id;
445 struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
446 struct drm_i915_gem_object *ctx_obj =
447 shadow_ctx->engine[ring_id].state->obj;
448 struct execlist_ring_context *shadow_ring_context;
449 struct page *page;
450 void *src;
451 unsigned long context_gpa, context_page_num;
452 int i;
453
454 gvt_dbg_sched("ring id %d workload lrca %x\n", ring_id,
455 workload->ctx_desc.lrca);
456
457 context_page_num = gvt->dev_priv->engine[ring_id]->context_size;
458
459 context_page_num = context_page_num >> PAGE_SHIFT;
460
461 if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS)
462 context_page_num = 19;
463
464 i = 2;
465
466 while (i < context_page_num) {
467 context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
468 (u32)((workload->ctx_desc.lrca + i) <<
469 GTT_PAGE_SHIFT));
470 if (context_gpa == INTEL_GVT_INVALID_ADDR) {
471 gvt_vgpu_err("invalid guest context descriptor\n");
472 return;
473 }
474
475 page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i);
476 src = kmap(page);
477 intel_gvt_hypervisor_write_gpa(vgpu, context_gpa, src,
478 GTT_PAGE_SIZE);
479 kunmap(page);
480 i++;
481 }
482
483 intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa +
484 RING_CTX_OFF(ring_header.val), &workload->rb_tail, 4);
485
486 page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
487 shadow_ring_context = kmap(page);
488
489 #define COPY_REG(name) \
490 intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa + \
491 RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
492
493 COPY_REG(ctx_ctrl);
494 COPY_REG(ctx_timestamp);
495
496 #undef COPY_REG
497
498 intel_gvt_hypervisor_write_gpa(vgpu,
499 workload->ring_context_gpa +
500 sizeof(*shadow_ring_context),
501 (void *)shadow_ring_context +
502 sizeof(*shadow_ring_context),
503 GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
504
505 kunmap(page);
506 }
507
508 static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
509 {
510 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
511 struct intel_vgpu_workload *workload;
512 struct intel_vgpu *vgpu;
513 int event;
514
515 mutex_lock(&gvt->lock);
516
517 workload = scheduler->current_workload[ring_id];
518 vgpu = workload->vgpu;
519
520 /* For the workload w/ request, needs to wait for the context
521 * switch to make sure request is completed.
522 * For the workload w/o request, directly complete the workload.
523 */
524 if (workload->req) {
525 struct drm_i915_private *dev_priv =
526 workload->vgpu->gvt->dev_priv;
527 struct intel_engine_cs *engine =
528 dev_priv->engine[workload->ring_id];
529 wait_event(workload->shadow_ctx_status_wq,
530 !atomic_read(&workload->shadow_ctx_active));
531
532 /* If this request caused GPU hang, req->fence.error will
533 * be set to -EIO. Use -EIO to set workload status so
534 * that when this request caused GPU hang, didn't trigger
535 * context switch interrupt to guest.
536 */
537 if (likely(workload->status == -EINPROGRESS)) {
538 if (workload->req->fence.error == -EIO)
539 workload->status = -EIO;
540 else
541 workload->status = 0;
542 }
543
544 i915_gem_request_put(fetch_and_zero(&workload->req));
545
546 if (!workload->status && !(vgpu->resetting_eng &
547 ENGINE_MASK(ring_id))) {
548 update_guest_context(workload);
549
550 for_each_set_bit(event, workload->pending_events,
551 INTEL_GVT_EVENT_MAX)
552 intel_vgpu_trigger_virtual_event(vgpu, event);
553 }
554 mutex_lock(&dev_priv->drm.struct_mutex);
555 /* unpin shadow ctx as the shadow_ctx update is done */
556 engine->context_unpin(engine, workload->vgpu->shadow_ctx);
557 mutex_unlock(&dev_priv->drm.struct_mutex);
558 }
559
560 gvt_dbg_sched("ring id %d complete workload %p status %d\n",
561 ring_id, workload, workload->status);
562
563 scheduler->current_workload[ring_id] = NULL;
564
565 list_del_init(&workload->list);
566 workload->complete(workload);
567
568 atomic_dec(&vgpu->running_workload_num);
569 wake_up(&scheduler->workload_complete_wq);
570
571 if (gvt->scheduler.need_reschedule)
572 intel_gvt_request_service(gvt, INTEL_GVT_REQUEST_EVENT_SCHED);
573
574 mutex_unlock(&gvt->lock);
575 }
576
577 struct workload_thread_param {
578 struct intel_gvt *gvt;
579 int ring_id;
580 };
581
582 static int workload_thread(void *priv)
583 {
584 struct workload_thread_param *p = (struct workload_thread_param *)priv;
585 struct intel_gvt *gvt = p->gvt;
586 int ring_id = p->ring_id;
587 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
588 struct intel_vgpu_workload *workload = NULL;
589 struct intel_vgpu *vgpu = NULL;
590 int ret;
591 bool need_force_wake = IS_SKYLAKE(gvt->dev_priv)
592 || IS_KABYLAKE(gvt->dev_priv);
593 DEFINE_WAIT_FUNC(wait, woken_wake_function);
594
595 kfree(p);
596
597 gvt_dbg_core("workload thread for ring %d started\n", ring_id);
598
599 while (!kthread_should_stop()) {
600 add_wait_queue(&scheduler->waitq[ring_id], &wait);
601 do {
602 workload = pick_next_workload(gvt, ring_id);
603 if (workload)
604 break;
605 wait_woken(&wait, TASK_INTERRUPTIBLE,
606 MAX_SCHEDULE_TIMEOUT);
607 } while (!kthread_should_stop());
608 remove_wait_queue(&scheduler->waitq[ring_id], &wait);
609
610 if (!workload)
611 break;
612
613 gvt_dbg_sched("ring id %d next workload %p vgpu %d\n",
614 workload->ring_id, workload,
615 workload->vgpu->id);
616
617 intel_runtime_pm_get(gvt->dev_priv);
618
619 gvt_dbg_sched("ring id %d will dispatch workload %p\n",
620 workload->ring_id, workload);
621
622 if (need_force_wake)
623 intel_uncore_forcewake_get(gvt->dev_priv,
624 FORCEWAKE_ALL);
625
626 mutex_lock(&gvt->lock);
627 ret = dispatch_workload(workload);
628 mutex_unlock(&gvt->lock);
629
630 if (ret) {
631 vgpu = workload->vgpu;
632 gvt_vgpu_err("fail to dispatch workload, skip\n");
633 goto complete;
634 }
635
636 gvt_dbg_sched("ring id %d wait workload %p\n",
637 workload->ring_id, workload);
638 i915_wait_request(workload->req, 0, MAX_SCHEDULE_TIMEOUT);
639
640 complete:
641 gvt_dbg_sched("will complete workload %p, status: %d\n",
642 workload, workload->status);
643
644 complete_current_workload(gvt, ring_id);
645
646 if (need_force_wake)
647 intel_uncore_forcewake_put(gvt->dev_priv,
648 FORCEWAKE_ALL);
649
650 intel_runtime_pm_put(gvt->dev_priv);
651 }
652 return 0;
653 }
654
655 void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu)
656 {
657 struct intel_gvt *gvt = vgpu->gvt;
658 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
659
660 if (atomic_read(&vgpu->running_workload_num)) {
661 gvt_dbg_sched("wait vgpu idle\n");
662
663 wait_event(scheduler->workload_complete_wq,
664 !atomic_read(&vgpu->running_workload_num));
665 }
666 }
667
668 void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt)
669 {
670 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
671 struct intel_engine_cs *engine;
672 enum intel_engine_id i;
673
674 gvt_dbg_core("clean workload scheduler\n");
675
676 for_each_engine(engine, gvt->dev_priv, i) {
677 atomic_notifier_chain_unregister(
678 &engine->context_status_notifier,
679 &gvt->shadow_ctx_notifier_block[i]);
680 kthread_stop(scheduler->thread[i]);
681 }
682 }
683
684 int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt)
685 {
686 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
687 struct workload_thread_param *param = NULL;
688 struct intel_engine_cs *engine;
689 enum intel_engine_id i;
690 int ret;
691
692 gvt_dbg_core("init workload scheduler\n");
693
694 init_waitqueue_head(&scheduler->workload_complete_wq);
695
696 for_each_engine(engine, gvt->dev_priv, i) {
697 init_waitqueue_head(&scheduler->waitq[i]);
698
699 param = kzalloc(sizeof(*param), GFP_KERNEL);
700 if (!param) {
701 ret = -ENOMEM;
702 goto err;
703 }
704
705 param->gvt = gvt;
706 param->ring_id = i;
707
708 scheduler->thread[i] = kthread_run(workload_thread, param,
709 "gvt workload %d", i);
710 if (IS_ERR(scheduler->thread[i])) {
711 gvt_err("fail to create workload thread\n");
712 ret = PTR_ERR(scheduler->thread[i]);
713 goto err;
714 }
715
716 gvt->shadow_ctx_notifier_block[i].notifier_call =
717 shadow_context_status_change;
718 atomic_notifier_chain_register(&engine->context_status_notifier,
719 &gvt->shadow_ctx_notifier_block[i]);
720 }
721 return 0;
722 err:
723 intel_gvt_clean_workload_scheduler(gvt);
724 kfree(param);
725 param = NULL;
726 return ret;
727 }
728
729 void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu)
730 {
731 i915_gem_context_put(vgpu->shadow_ctx);
732 }
733
734 int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu)
735 {
736 atomic_set(&vgpu->running_workload_num, 0);
737
738 vgpu->shadow_ctx = i915_gem_context_create_gvt(
739 &vgpu->gvt->dev_priv->drm);
740 if (IS_ERR(vgpu->shadow_ctx))
741 return PTR_ERR(vgpu->shadow_ctx);
742
743 vgpu->shadow_ctx->engine[RCS].initialised = true;
744
745 bitmap_zero(vgpu->shadow_ctx_desc_updated, I915_NUM_ENGINES);
746
747 return 0;
748 }