]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/gpu/drm/i915/gvt/scheduler.c
618f0f6404059aa946493eff009eb7704cb2d65e
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / i915 / gvt / scheduler.c
1 /*
2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Zhi Wang <zhi.a.wang@intel.com>
25 *
26 * Contributors:
27 * Ping Gao <ping.a.gao@intel.com>
28 * Tina Zhang <tina.zhang@intel.com>
29 * Chanbin Du <changbin.du@intel.com>
30 * Min He <min.he@intel.com>
31 * Bing Niu <bing.niu@intel.com>
32 * Zhenyu Wang <zhenyuw@linux.intel.com>
33 *
34 */
35
36 #include <linux/kthread.h>
37
38 #include "i915_drv.h"
39 #include "gvt.h"
40
41 #define RING_CTX_OFF(x) \
42 offsetof(struct execlist_ring_context, x)
43
44 static void set_context_pdp_root_pointer(
45 struct execlist_ring_context *ring_context,
46 u32 pdp[8])
47 {
48 struct execlist_mmio_pair *pdp_pair = &ring_context->pdp3_UDW;
49 int i;
50
51 for (i = 0; i < 8; i++)
52 pdp_pair[i].val = pdp[7 - i];
53 }
54
55 static int populate_shadow_context(struct intel_vgpu_workload *workload)
56 {
57 struct intel_vgpu *vgpu = workload->vgpu;
58 struct intel_gvt *gvt = vgpu->gvt;
59 int ring_id = workload->ring_id;
60 struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
61 struct drm_i915_gem_object *ctx_obj =
62 shadow_ctx->engine[ring_id].state->obj;
63 struct execlist_ring_context *shadow_ring_context;
64 struct page *page;
65 void *dst;
66 unsigned long context_gpa, context_page_num;
67 int i;
68
69 gvt_dbg_sched("ring id %d workload lrca %x", ring_id,
70 workload->ctx_desc.lrca);
71
72 context_page_num = gvt->dev_priv->engine[ring_id]->context_size;
73
74 context_page_num = context_page_num >> PAGE_SHIFT;
75
76 if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS)
77 context_page_num = 19;
78
79 i = 2;
80
81 while (i < context_page_num) {
82 context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
83 (u32)((workload->ctx_desc.lrca + i) <<
84 I915_GTT_PAGE_SHIFT));
85 if (context_gpa == INTEL_GVT_INVALID_ADDR) {
86 gvt_vgpu_err("Invalid guest context descriptor\n");
87 return -EINVAL;
88 }
89
90 page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i);
91 dst = kmap(page);
92 intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst,
93 I915_GTT_PAGE_SIZE);
94 kunmap(page);
95 i++;
96 }
97
98 page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
99 shadow_ring_context = kmap(page);
100
101 #define COPY_REG(name) \
102 intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \
103 + RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
104
105 COPY_REG(ctx_ctrl);
106 COPY_REG(ctx_timestamp);
107
108 if (ring_id == RCS) {
109 COPY_REG(bb_per_ctx_ptr);
110 COPY_REG(rcs_indirect_ctx);
111 COPY_REG(rcs_indirect_ctx_offset);
112 }
113 #undef COPY_REG
114
115 set_context_pdp_root_pointer(shadow_ring_context,
116 workload->shadow_mm->shadow_page_table);
117
118 intel_gvt_hypervisor_read_gpa(vgpu,
119 workload->ring_context_gpa +
120 sizeof(*shadow_ring_context),
121 (void *)shadow_ring_context +
122 sizeof(*shadow_ring_context),
123 I915_GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
124
125 kunmap(page);
126 return 0;
127 }
128
129 static inline bool is_gvt_request(struct drm_i915_gem_request *req)
130 {
131 return i915_gem_context_force_single_submission(req->ctx);
132 }
133
134 static void save_ring_hw_state(struct intel_vgpu *vgpu, int ring_id)
135 {
136 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
137 u32 ring_base = dev_priv->engine[ring_id]->mmio_base;
138 i915_reg_t reg;
139
140 reg = RING_INSTDONE(ring_base);
141 vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg);
142 reg = RING_ACTHD(ring_base);
143 vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg);
144 reg = RING_ACTHD_UDW(ring_base);
145 vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg);
146 }
147
148 static int shadow_context_status_change(struct notifier_block *nb,
149 unsigned long action, void *data)
150 {
151 struct drm_i915_gem_request *req = (struct drm_i915_gem_request *)data;
152 struct intel_gvt *gvt = container_of(nb, struct intel_gvt,
153 shadow_ctx_notifier_block[req->engine->id]);
154 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
155 enum intel_engine_id ring_id = req->engine->id;
156 struct intel_vgpu_workload *workload;
157 unsigned long flags;
158
159 if (!is_gvt_request(req)) {
160 spin_lock_irqsave(&scheduler->mmio_context_lock, flags);
161 if (action == INTEL_CONTEXT_SCHEDULE_IN &&
162 scheduler->engine_owner[ring_id]) {
163 /* Switch ring from vGPU to host. */
164 intel_gvt_switch_mmio(scheduler->engine_owner[ring_id],
165 NULL, ring_id);
166 scheduler->engine_owner[ring_id] = NULL;
167 }
168 spin_unlock_irqrestore(&scheduler->mmio_context_lock, flags);
169
170 return NOTIFY_OK;
171 }
172
173 workload = scheduler->current_workload[ring_id];
174 if (unlikely(!workload))
175 return NOTIFY_OK;
176
177 switch (action) {
178 case INTEL_CONTEXT_SCHEDULE_IN:
179 spin_lock_irqsave(&scheduler->mmio_context_lock, flags);
180 if (workload->vgpu != scheduler->engine_owner[ring_id]) {
181 /* Switch ring from host to vGPU or vGPU to vGPU. */
182 intel_gvt_switch_mmio(scheduler->engine_owner[ring_id],
183 workload->vgpu, ring_id);
184 scheduler->engine_owner[ring_id] = workload->vgpu;
185 } else
186 gvt_dbg_sched("skip ring %d mmio switch for vgpu%d\n",
187 ring_id, workload->vgpu->id);
188 spin_unlock_irqrestore(&scheduler->mmio_context_lock, flags);
189 atomic_set(&workload->shadow_ctx_active, 1);
190 break;
191 case INTEL_CONTEXT_SCHEDULE_OUT:
192 save_ring_hw_state(workload->vgpu, ring_id);
193 atomic_set(&workload->shadow_ctx_active, 0);
194 break;
195 case INTEL_CONTEXT_SCHEDULE_PREEMPTED:
196 save_ring_hw_state(workload->vgpu, ring_id);
197 break;
198 default:
199 WARN_ON(1);
200 return NOTIFY_OK;
201 }
202 wake_up(&workload->shadow_ctx_status_wq);
203 return NOTIFY_OK;
204 }
205
206 static void shadow_context_descriptor_update(struct i915_gem_context *ctx,
207 struct intel_engine_cs *engine)
208 {
209 struct intel_context *ce = &ctx->engine[engine->id];
210 u64 desc = 0;
211
212 desc = ce->lrc_desc;
213
214 /* Update bits 0-11 of the context descriptor which includes flags
215 * like GEN8_CTX_* cached in desc_template
216 */
217 desc &= U64_MAX << 12;
218 desc |= ctx->desc_template & ((1ULL << 12) - 1);
219
220 ce->lrc_desc = desc;
221 }
222
223 static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload)
224 {
225 struct intel_vgpu *vgpu = workload->vgpu;
226 void *shadow_ring_buffer_va;
227 u32 *cs;
228
229 /* allocate shadow ring buffer */
230 cs = intel_ring_begin(workload->req, workload->rb_len / sizeof(u32));
231 if (IS_ERR(cs)) {
232 gvt_vgpu_err("fail to alloc size =%ld shadow ring buffer\n",
233 workload->rb_len);
234 return PTR_ERR(cs);
235 }
236
237 shadow_ring_buffer_va = workload->shadow_ring_buffer_va;
238
239 /* get shadow ring buffer va */
240 workload->shadow_ring_buffer_va = cs;
241
242 memcpy(cs, shadow_ring_buffer_va,
243 workload->rb_len);
244
245 cs += workload->rb_len / sizeof(u32);
246 intel_ring_advance(workload->req, cs);
247
248 return 0;
249 }
250
251 void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
252 {
253 if (!wa_ctx->indirect_ctx.obj)
254 return;
255
256 i915_gem_object_unpin_map(wa_ctx->indirect_ctx.obj);
257 i915_gem_object_put(wa_ctx->indirect_ctx.obj);
258 }
259
260 /**
261 * intel_gvt_scan_and_shadow_workload - audit the workload by scanning and
262 * shadow it as well, include ringbuffer,wa_ctx and ctx.
263 * @workload: an abstract entity for each execlist submission.
264 *
265 * This function is called before the workload submitting to i915, to make
266 * sure the content of the workload is valid.
267 */
268 int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
269 {
270 int ring_id = workload->ring_id;
271 struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
272 struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
273 struct intel_engine_cs *engine = dev_priv->engine[ring_id];
274 struct intel_vgpu *vgpu = workload->vgpu;
275 struct intel_ring *ring;
276 int ret;
277
278 lockdep_assert_held(&dev_priv->drm.struct_mutex);
279
280 if (workload->shadowed)
281 return 0;
282
283 shadow_ctx->desc_template &= ~(0x3 << GEN8_CTX_ADDRESSING_MODE_SHIFT);
284 shadow_ctx->desc_template |= workload->ctx_desc.addressing_mode <<
285 GEN8_CTX_ADDRESSING_MODE_SHIFT;
286
287 if (!test_and_set_bit(ring_id, vgpu->shadow_ctx_desc_updated))
288 shadow_context_descriptor_update(shadow_ctx,
289 dev_priv->engine[ring_id]);
290
291 ret = intel_gvt_scan_and_shadow_ringbuffer(workload);
292 if (ret)
293 goto err_scan;
294
295 if ((workload->ring_id == RCS) &&
296 (workload->wa_ctx.indirect_ctx.size != 0)) {
297 ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx);
298 if (ret)
299 goto err_scan;
300 }
301
302 /* pin shadow context by gvt even the shadow context will be pinned
303 * when i915 alloc request. That is because gvt will update the guest
304 * context from shadow context when workload is completed, and at that
305 * moment, i915 may already unpined the shadow context to make the
306 * shadow_ctx pages invalid. So gvt need to pin itself. After update
307 * the guest context, gvt can unpin the shadow_ctx safely.
308 */
309 ring = engine->context_pin(engine, shadow_ctx);
310 if (IS_ERR(ring)) {
311 ret = PTR_ERR(ring);
312 gvt_vgpu_err("fail to pin shadow context\n");
313 goto err_shadow;
314 }
315
316 ret = populate_shadow_context(workload);
317 if (ret)
318 goto err_unpin;
319 workload->shadowed = true;
320 return 0;
321
322 err_unpin:
323 engine->context_unpin(engine, shadow_ctx);
324 err_shadow:
325 release_shadow_wa_ctx(&workload->wa_ctx);
326 err_scan:
327 return ret;
328 }
329
330 int intel_gvt_generate_request(struct intel_vgpu_workload *workload)
331 {
332 int ring_id = workload->ring_id;
333 struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
334 struct intel_engine_cs *engine = dev_priv->engine[ring_id];
335 struct drm_i915_gem_request *rq;
336 struct intel_vgpu *vgpu = workload->vgpu;
337 struct i915_gem_context *shadow_ctx = vgpu->shadow_ctx;
338 int ret;
339
340 rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
341 if (IS_ERR(rq)) {
342 gvt_vgpu_err("fail to allocate gem request\n");
343 ret = PTR_ERR(rq);
344 goto err_unpin;
345 }
346
347 gvt_dbg_sched("ring id %d get i915 gem request %p\n", ring_id, rq);
348
349 workload->req = i915_gem_request_get(rq);
350 ret = copy_workload_to_ring_buffer(workload);
351 if (ret)
352 goto err_unpin;
353 return 0;
354
355 err_unpin:
356 engine->context_unpin(engine, shadow_ctx);
357 release_shadow_wa_ctx(&workload->wa_ctx);
358 return ret;
359 }
360
361 static int dispatch_workload(struct intel_vgpu_workload *workload)
362 {
363 int ring_id = workload->ring_id;
364 struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
365 struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
366 struct intel_engine_cs *engine = dev_priv->engine[ring_id];
367 int ret = 0;
368
369 gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
370 ring_id, workload);
371
372 mutex_lock(&dev_priv->drm.struct_mutex);
373
374 ret = intel_gvt_scan_and_shadow_workload(workload);
375 if (ret)
376 goto out;
377
378 if (workload->prepare) {
379 ret = workload->prepare(workload);
380 if (ret) {
381 engine->context_unpin(engine, shadow_ctx);
382 goto out;
383 }
384 }
385
386 out:
387 if (ret)
388 workload->status = ret;
389
390 if (!IS_ERR_OR_NULL(workload->req)) {
391 gvt_dbg_sched("ring id %d submit workload to i915 %p\n",
392 ring_id, workload->req);
393 i915_add_request(workload->req);
394 workload->dispatched = true;
395 }
396
397 mutex_unlock(&dev_priv->drm.struct_mutex);
398 return ret;
399 }
400
401 static struct intel_vgpu_workload *pick_next_workload(
402 struct intel_gvt *gvt, int ring_id)
403 {
404 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
405 struct intel_vgpu_workload *workload = NULL;
406
407 mutex_lock(&gvt->lock);
408
409 /*
410 * no current vgpu / will be scheduled out / no workload
411 * bail out
412 */
413 if (!scheduler->current_vgpu) {
414 gvt_dbg_sched("ring id %d stop - no current vgpu\n", ring_id);
415 goto out;
416 }
417
418 if (scheduler->need_reschedule) {
419 gvt_dbg_sched("ring id %d stop - will reschedule\n", ring_id);
420 goto out;
421 }
422
423 if (list_empty(workload_q_head(scheduler->current_vgpu, ring_id)))
424 goto out;
425
426 /*
427 * still have current workload, maybe the workload disptacher
428 * fail to submit it for some reason, resubmit it.
429 */
430 if (scheduler->current_workload[ring_id]) {
431 workload = scheduler->current_workload[ring_id];
432 gvt_dbg_sched("ring id %d still have current workload %p\n",
433 ring_id, workload);
434 goto out;
435 }
436
437 /*
438 * pick a workload as current workload
439 * once current workload is set, schedule policy routines
440 * will wait the current workload is finished when trying to
441 * schedule out a vgpu.
442 */
443 scheduler->current_workload[ring_id] = container_of(
444 workload_q_head(scheduler->current_vgpu, ring_id)->next,
445 struct intel_vgpu_workload, list);
446
447 workload = scheduler->current_workload[ring_id];
448
449 gvt_dbg_sched("ring id %d pick new workload %p\n", ring_id, workload);
450
451 atomic_inc(&workload->vgpu->running_workload_num);
452 out:
453 mutex_unlock(&gvt->lock);
454 return workload;
455 }
456
457 static void update_guest_context(struct intel_vgpu_workload *workload)
458 {
459 struct intel_vgpu *vgpu = workload->vgpu;
460 struct intel_gvt *gvt = vgpu->gvt;
461 int ring_id = workload->ring_id;
462 struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
463 struct drm_i915_gem_object *ctx_obj =
464 shadow_ctx->engine[ring_id].state->obj;
465 struct execlist_ring_context *shadow_ring_context;
466 struct page *page;
467 void *src;
468 unsigned long context_gpa, context_page_num;
469 int i;
470
471 gvt_dbg_sched("ring id %d workload lrca %x\n", ring_id,
472 workload->ctx_desc.lrca);
473
474 context_page_num = gvt->dev_priv->engine[ring_id]->context_size;
475
476 context_page_num = context_page_num >> PAGE_SHIFT;
477
478 if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS)
479 context_page_num = 19;
480
481 i = 2;
482
483 while (i < context_page_num) {
484 context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
485 (u32)((workload->ctx_desc.lrca + i) <<
486 I915_GTT_PAGE_SHIFT));
487 if (context_gpa == INTEL_GVT_INVALID_ADDR) {
488 gvt_vgpu_err("invalid guest context descriptor\n");
489 return;
490 }
491
492 page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i);
493 src = kmap(page);
494 intel_gvt_hypervisor_write_gpa(vgpu, context_gpa, src,
495 I915_GTT_PAGE_SIZE);
496 kunmap(page);
497 i++;
498 }
499
500 intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa +
501 RING_CTX_OFF(ring_header.val), &workload->rb_tail, 4);
502
503 page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
504 shadow_ring_context = kmap(page);
505
506 #define COPY_REG(name) \
507 intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa + \
508 RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
509
510 COPY_REG(ctx_ctrl);
511 COPY_REG(ctx_timestamp);
512
513 #undef COPY_REG
514
515 intel_gvt_hypervisor_write_gpa(vgpu,
516 workload->ring_context_gpa +
517 sizeof(*shadow_ring_context),
518 (void *)shadow_ring_context +
519 sizeof(*shadow_ring_context),
520 I915_GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
521
522 kunmap(page);
523 }
524
525 static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
526 {
527 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
528 struct intel_vgpu_workload *workload;
529 struct intel_vgpu *vgpu;
530 int event;
531
532 mutex_lock(&gvt->lock);
533
534 workload = scheduler->current_workload[ring_id];
535 vgpu = workload->vgpu;
536
537 /* For the workload w/ request, needs to wait for the context
538 * switch to make sure request is completed.
539 * For the workload w/o request, directly complete the workload.
540 */
541 if (workload->req) {
542 struct drm_i915_private *dev_priv =
543 workload->vgpu->gvt->dev_priv;
544 struct intel_engine_cs *engine =
545 dev_priv->engine[workload->ring_id];
546 wait_event(workload->shadow_ctx_status_wq,
547 !atomic_read(&workload->shadow_ctx_active));
548
549 /* If this request caused GPU hang, req->fence.error will
550 * be set to -EIO. Use -EIO to set workload status so
551 * that when this request caused GPU hang, didn't trigger
552 * context switch interrupt to guest.
553 */
554 if (likely(workload->status == -EINPROGRESS)) {
555 if (workload->req->fence.error == -EIO)
556 workload->status = -EIO;
557 else
558 workload->status = 0;
559 }
560
561 i915_gem_request_put(fetch_and_zero(&workload->req));
562
563 if (!workload->status && !(vgpu->resetting_eng &
564 ENGINE_MASK(ring_id))) {
565 update_guest_context(workload);
566
567 for_each_set_bit(event, workload->pending_events,
568 INTEL_GVT_EVENT_MAX)
569 intel_vgpu_trigger_virtual_event(vgpu, event);
570 }
571 mutex_lock(&dev_priv->drm.struct_mutex);
572 /* unpin shadow ctx as the shadow_ctx update is done */
573 engine->context_unpin(engine, workload->vgpu->shadow_ctx);
574 mutex_unlock(&dev_priv->drm.struct_mutex);
575 }
576
577 gvt_dbg_sched("ring id %d complete workload %p status %d\n",
578 ring_id, workload, workload->status);
579
580 scheduler->current_workload[ring_id] = NULL;
581
582 list_del_init(&workload->list);
583 workload->complete(workload);
584
585 atomic_dec(&vgpu->running_workload_num);
586 wake_up(&scheduler->workload_complete_wq);
587
588 if (gvt->scheduler.need_reschedule)
589 intel_gvt_request_service(gvt, INTEL_GVT_REQUEST_EVENT_SCHED);
590
591 mutex_unlock(&gvt->lock);
592 }
593
594 struct workload_thread_param {
595 struct intel_gvt *gvt;
596 int ring_id;
597 };
598
599 static int workload_thread(void *priv)
600 {
601 struct workload_thread_param *p = (struct workload_thread_param *)priv;
602 struct intel_gvt *gvt = p->gvt;
603 int ring_id = p->ring_id;
604 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
605 struct intel_vgpu_workload *workload = NULL;
606 struct intel_vgpu *vgpu = NULL;
607 int ret;
608 bool need_force_wake = IS_SKYLAKE(gvt->dev_priv)
609 || IS_KABYLAKE(gvt->dev_priv);
610 DEFINE_WAIT_FUNC(wait, woken_wake_function);
611
612 kfree(p);
613
614 gvt_dbg_core("workload thread for ring %d started\n", ring_id);
615
616 while (!kthread_should_stop()) {
617 add_wait_queue(&scheduler->waitq[ring_id], &wait);
618 do {
619 workload = pick_next_workload(gvt, ring_id);
620 if (workload)
621 break;
622 wait_woken(&wait, TASK_INTERRUPTIBLE,
623 MAX_SCHEDULE_TIMEOUT);
624 } while (!kthread_should_stop());
625 remove_wait_queue(&scheduler->waitq[ring_id], &wait);
626
627 if (!workload)
628 break;
629
630 gvt_dbg_sched("ring id %d next workload %p vgpu %d\n",
631 workload->ring_id, workload,
632 workload->vgpu->id);
633
634 intel_runtime_pm_get(gvt->dev_priv);
635
636 gvt_dbg_sched("ring id %d will dispatch workload %p\n",
637 workload->ring_id, workload);
638
639 if (need_force_wake)
640 intel_uncore_forcewake_get(gvt->dev_priv,
641 FORCEWAKE_ALL);
642
643 mutex_lock(&gvt->lock);
644 ret = dispatch_workload(workload);
645 mutex_unlock(&gvt->lock);
646
647 if (ret) {
648 vgpu = workload->vgpu;
649 gvt_vgpu_err("fail to dispatch workload, skip\n");
650 goto complete;
651 }
652
653 gvt_dbg_sched("ring id %d wait workload %p\n",
654 workload->ring_id, workload);
655 i915_wait_request(workload->req, 0, MAX_SCHEDULE_TIMEOUT);
656
657 complete:
658 gvt_dbg_sched("will complete workload %p, status: %d\n",
659 workload, workload->status);
660
661 complete_current_workload(gvt, ring_id);
662
663 if (need_force_wake)
664 intel_uncore_forcewake_put(gvt->dev_priv,
665 FORCEWAKE_ALL);
666
667 intel_runtime_pm_put(gvt->dev_priv);
668 }
669 return 0;
670 }
671
672 void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu)
673 {
674 struct intel_gvt *gvt = vgpu->gvt;
675 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
676
677 if (atomic_read(&vgpu->running_workload_num)) {
678 gvt_dbg_sched("wait vgpu idle\n");
679
680 wait_event(scheduler->workload_complete_wq,
681 !atomic_read(&vgpu->running_workload_num));
682 }
683 }
684
685 void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt)
686 {
687 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
688 struct intel_engine_cs *engine;
689 enum intel_engine_id i;
690
691 gvt_dbg_core("clean workload scheduler\n");
692
693 for_each_engine(engine, gvt->dev_priv, i) {
694 atomic_notifier_chain_unregister(
695 &engine->context_status_notifier,
696 &gvt->shadow_ctx_notifier_block[i]);
697 kthread_stop(scheduler->thread[i]);
698 }
699 }
700
701 int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt)
702 {
703 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
704 struct workload_thread_param *param = NULL;
705 struct intel_engine_cs *engine;
706 enum intel_engine_id i;
707 int ret;
708
709 gvt_dbg_core("init workload scheduler\n");
710
711 init_waitqueue_head(&scheduler->workload_complete_wq);
712
713 for_each_engine(engine, gvt->dev_priv, i) {
714 init_waitqueue_head(&scheduler->waitq[i]);
715
716 param = kzalloc(sizeof(*param), GFP_KERNEL);
717 if (!param) {
718 ret = -ENOMEM;
719 goto err;
720 }
721
722 param->gvt = gvt;
723 param->ring_id = i;
724
725 scheduler->thread[i] = kthread_run(workload_thread, param,
726 "gvt workload %d", i);
727 if (IS_ERR(scheduler->thread[i])) {
728 gvt_err("fail to create workload thread\n");
729 ret = PTR_ERR(scheduler->thread[i]);
730 goto err;
731 }
732
733 gvt->shadow_ctx_notifier_block[i].notifier_call =
734 shadow_context_status_change;
735 atomic_notifier_chain_register(&engine->context_status_notifier,
736 &gvt->shadow_ctx_notifier_block[i]);
737 }
738 return 0;
739 err:
740 intel_gvt_clean_workload_scheduler(gvt);
741 kfree(param);
742 param = NULL;
743 return ret;
744 }
745
746 void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu)
747 {
748 i915_gem_context_put(vgpu->shadow_ctx);
749 }
750
751 int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu)
752 {
753 atomic_set(&vgpu->running_workload_num, 0);
754
755 vgpu->shadow_ctx = i915_gem_context_create_gvt(
756 &vgpu->gvt->dev_priv->drm);
757 if (IS_ERR(vgpu->shadow_ctx))
758 return PTR_ERR(vgpu->shadow_ctx);
759
760 if (INTEL_INFO(vgpu->gvt->dev_priv)->has_logical_ring_preemption)
761 vgpu->shadow_ctx->priority = INT_MAX;
762
763 vgpu->shadow_ctx->engine[RCS].initialised = true;
764
765 bitmap_zero(vgpu->shadow_ctx_desc_updated, I915_NUM_ENGINES);
766
767 return 0;
768 }