2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * Zhi Wang <zhi.a.wang@intel.com>
27 * Ping Gao <ping.a.gao@intel.com>
28 * Tina Zhang <tina.zhang@intel.com>
29 * Chanbin Du <changbin.du@intel.com>
30 * Min He <min.he@intel.com>
31 * Bing Niu <bing.niu@intel.com>
32 * Zhenyu Wang <zhenyuw@linux.intel.com>
36 #include <linux/kthread.h>
41 #define RING_CTX_OFF(x) \
42 offsetof(struct execlist_ring_context, x)
44 static void set_context_pdp_root_pointer(
45 struct execlist_ring_context
*ring_context
,
48 struct execlist_mmio_pair
*pdp_pair
= &ring_context
->pdp3_UDW
;
51 for (i
= 0; i
< 8; i
++)
52 pdp_pair
[i
].val
= pdp
[7 - i
];
55 static int populate_shadow_context(struct intel_vgpu_workload
*workload
)
57 struct intel_vgpu
*vgpu
= workload
->vgpu
;
58 struct intel_gvt
*gvt
= vgpu
->gvt
;
59 int ring_id
= workload
->ring_id
;
60 struct i915_gem_context
*shadow_ctx
= workload
->vgpu
->shadow_ctx
;
61 struct drm_i915_gem_object
*ctx_obj
=
62 shadow_ctx
->engine
[ring_id
].state
->obj
;
63 struct execlist_ring_context
*shadow_ring_context
;
66 unsigned long context_gpa
, context_page_num
;
69 gvt_dbg_sched("ring id %d workload lrca %x", ring_id
,
70 workload
->ctx_desc
.lrca
);
72 context_page_num
= gvt
->dev_priv
->engine
[ring_id
]->context_size
;
74 context_page_num
= context_page_num
>> PAGE_SHIFT
;
76 if (IS_BROADWELL(gvt
->dev_priv
) && ring_id
== RCS
)
77 context_page_num
= 19;
81 while (i
< context_page_num
) {
82 context_gpa
= intel_vgpu_gma_to_gpa(vgpu
->gtt
.ggtt_mm
,
83 (u32
)((workload
->ctx_desc
.lrca
+ i
) <<
84 I915_GTT_PAGE_SHIFT
));
85 if (context_gpa
== INTEL_GVT_INVALID_ADDR
) {
86 gvt_vgpu_err("Invalid guest context descriptor\n");
90 page
= i915_gem_object_get_page(ctx_obj
, LRC_HEADER_PAGES
+ i
);
92 intel_gvt_hypervisor_read_gpa(vgpu
, context_gpa
, dst
,
98 page
= i915_gem_object_get_page(ctx_obj
, LRC_STATE_PN
);
99 shadow_ring_context
= kmap(page
);
101 #define COPY_REG(name) \
102 intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \
103 + RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
106 COPY_REG(ctx_timestamp
);
108 if (ring_id
== RCS
) {
109 COPY_REG(bb_per_ctx_ptr
);
110 COPY_REG(rcs_indirect_ctx
);
111 COPY_REG(rcs_indirect_ctx_offset
);
115 set_context_pdp_root_pointer(shadow_ring_context
,
116 workload
->shadow_mm
->shadow_page_table
);
118 intel_gvt_hypervisor_read_gpa(vgpu
,
119 workload
->ring_context_gpa
+
120 sizeof(*shadow_ring_context
),
121 (void *)shadow_ring_context
+
122 sizeof(*shadow_ring_context
),
123 I915_GTT_PAGE_SIZE
- sizeof(*shadow_ring_context
));
129 static inline bool is_gvt_request(struct drm_i915_gem_request
*req
)
131 return i915_gem_context_force_single_submission(req
->ctx
);
134 static void save_ring_hw_state(struct intel_vgpu
*vgpu
, int ring_id
)
136 struct drm_i915_private
*dev_priv
= vgpu
->gvt
->dev_priv
;
137 u32 ring_base
= dev_priv
->engine
[ring_id
]->mmio_base
;
140 reg
= RING_INSTDONE(ring_base
);
141 vgpu_vreg(vgpu
, i915_mmio_reg_offset(reg
)) = I915_READ_FW(reg
);
142 reg
= RING_ACTHD(ring_base
);
143 vgpu_vreg(vgpu
, i915_mmio_reg_offset(reg
)) = I915_READ_FW(reg
);
144 reg
= RING_ACTHD_UDW(ring_base
);
145 vgpu_vreg(vgpu
, i915_mmio_reg_offset(reg
)) = I915_READ_FW(reg
);
148 static int shadow_context_status_change(struct notifier_block
*nb
,
149 unsigned long action
, void *data
)
151 struct drm_i915_gem_request
*req
= (struct drm_i915_gem_request
*)data
;
152 struct intel_gvt
*gvt
= container_of(nb
, struct intel_gvt
,
153 shadow_ctx_notifier_block
[req
->engine
->id
]);
154 struct intel_gvt_workload_scheduler
*scheduler
= &gvt
->scheduler
;
155 enum intel_engine_id ring_id
= req
->engine
->id
;
156 struct intel_vgpu_workload
*workload
;
159 if (!is_gvt_request(req
)) {
160 spin_lock_irqsave(&scheduler
->mmio_context_lock
, flags
);
161 if (action
== INTEL_CONTEXT_SCHEDULE_IN
&&
162 scheduler
->engine_owner
[ring_id
]) {
163 /* Switch ring from vGPU to host. */
164 intel_gvt_switch_mmio(scheduler
->engine_owner
[ring_id
],
166 scheduler
->engine_owner
[ring_id
] = NULL
;
168 spin_unlock_irqrestore(&scheduler
->mmio_context_lock
, flags
);
173 workload
= scheduler
->current_workload
[ring_id
];
174 if (unlikely(!workload
))
178 case INTEL_CONTEXT_SCHEDULE_IN
:
179 spin_lock_irqsave(&scheduler
->mmio_context_lock
, flags
);
180 if (workload
->vgpu
!= scheduler
->engine_owner
[ring_id
]) {
181 /* Switch ring from host to vGPU or vGPU to vGPU. */
182 intel_gvt_switch_mmio(scheduler
->engine_owner
[ring_id
],
183 workload
->vgpu
, ring_id
);
184 scheduler
->engine_owner
[ring_id
] = workload
->vgpu
;
186 gvt_dbg_sched("skip ring %d mmio switch for vgpu%d\n",
187 ring_id
, workload
->vgpu
->id
);
188 spin_unlock_irqrestore(&scheduler
->mmio_context_lock
, flags
);
189 atomic_set(&workload
->shadow_ctx_active
, 1);
191 case INTEL_CONTEXT_SCHEDULE_OUT
:
192 save_ring_hw_state(workload
->vgpu
, ring_id
);
193 atomic_set(&workload
->shadow_ctx_active
, 0);
195 case INTEL_CONTEXT_SCHEDULE_PREEMPTED
:
196 save_ring_hw_state(workload
->vgpu
, ring_id
);
202 wake_up(&workload
->shadow_ctx_status_wq
);
206 static void shadow_context_descriptor_update(struct i915_gem_context
*ctx
,
207 struct intel_engine_cs
*engine
)
209 struct intel_context
*ce
= &ctx
->engine
[engine
->id
];
214 /* Update bits 0-11 of the context descriptor which includes flags
215 * like GEN8_CTX_* cached in desc_template
217 desc
&= U64_MAX
<< 12;
218 desc
|= ctx
->desc_template
& ((1ULL << 12) - 1);
223 static int copy_workload_to_ring_buffer(struct intel_vgpu_workload
*workload
)
225 struct intel_vgpu
*vgpu
= workload
->vgpu
;
226 void *shadow_ring_buffer_va
;
229 /* allocate shadow ring buffer */
230 cs
= intel_ring_begin(workload
->req
, workload
->rb_len
/ sizeof(u32
));
232 gvt_vgpu_err("fail to alloc size =%ld shadow ring buffer\n",
237 shadow_ring_buffer_va
= workload
->shadow_ring_buffer_va
;
239 /* get shadow ring buffer va */
240 workload
->shadow_ring_buffer_va
= cs
;
242 memcpy(cs
, shadow_ring_buffer_va
,
245 cs
+= workload
->rb_len
/ sizeof(u32
);
246 intel_ring_advance(workload
->req
, cs
);
251 void release_shadow_wa_ctx(struct intel_shadow_wa_ctx
*wa_ctx
)
253 if (!wa_ctx
->indirect_ctx
.obj
)
256 i915_gem_object_unpin_map(wa_ctx
->indirect_ctx
.obj
);
257 i915_gem_object_put(wa_ctx
->indirect_ctx
.obj
);
261 * intel_gvt_scan_and_shadow_workload - audit the workload by scanning and
262 * shadow it as well, include ringbuffer,wa_ctx and ctx.
263 * @workload: an abstract entity for each execlist submission.
265 * This function is called before the workload submitting to i915, to make
266 * sure the content of the workload is valid.
268 int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload
*workload
)
270 int ring_id
= workload
->ring_id
;
271 struct i915_gem_context
*shadow_ctx
= workload
->vgpu
->shadow_ctx
;
272 struct drm_i915_private
*dev_priv
= workload
->vgpu
->gvt
->dev_priv
;
273 struct intel_engine_cs
*engine
= dev_priv
->engine
[ring_id
];
274 struct intel_vgpu
*vgpu
= workload
->vgpu
;
275 struct intel_ring
*ring
;
278 lockdep_assert_held(&dev_priv
->drm
.struct_mutex
);
280 if (workload
->shadowed
)
283 shadow_ctx
->desc_template
&= ~(0x3 << GEN8_CTX_ADDRESSING_MODE_SHIFT
);
284 shadow_ctx
->desc_template
|= workload
->ctx_desc
.addressing_mode
<<
285 GEN8_CTX_ADDRESSING_MODE_SHIFT
;
287 if (!test_and_set_bit(ring_id
, vgpu
->shadow_ctx_desc_updated
))
288 shadow_context_descriptor_update(shadow_ctx
,
289 dev_priv
->engine
[ring_id
]);
291 ret
= intel_gvt_scan_and_shadow_ringbuffer(workload
);
295 if ((workload
->ring_id
== RCS
) &&
296 (workload
->wa_ctx
.indirect_ctx
.size
!= 0)) {
297 ret
= intel_gvt_scan_and_shadow_wa_ctx(&workload
->wa_ctx
);
302 /* pin shadow context by gvt even the shadow context will be pinned
303 * when i915 alloc request. That is because gvt will update the guest
304 * context from shadow context when workload is completed, and at that
305 * moment, i915 may already unpined the shadow context to make the
306 * shadow_ctx pages invalid. So gvt need to pin itself. After update
307 * the guest context, gvt can unpin the shadow_ctx safely.
309 ring
= engine
->context_pin(engine
, shadow_ctx
);
312 gvt_vgpu_err("fail to pin shadow context\n");
316 ret
= populate_shadow_context(workload
);
319 workload
->shadowed
= true;
323 engine
->context_unpin(engine
, shadow_ctx
);
325 release_shadow_wa_ctx(&workload
->wa_ctx
);
330 int intel_gvt_generate_request(struct intel_vgpu_workload
*workload
)
332 int ring_id
= workload
->ring_id
;
333 struct drm_i915_private
*dev_priv
= workload
->vgpu
->gvt
->dev_priv
;
334 struct intel_engine_cs
*engine
= dev_priv
->engine
[ring_id
];
335 struct drm_i915_gem_request
*rq
;
336 struct intel_vgpu
*vgpu
= workload
->vgpu
;
337 struct i915_gem_context
*shadow_ctx
= vgpu
->shadow_ctx
;
340 rq
= i915_gem_request_alloc(dev_priv
->engine
[ring_id
], shadow_ctx
);
342 gvt_vgpu_err("fail to allocate gem request\n");
347 gvt_dbg_sched("ring id %d get i915 gem request %p\n", ring_id
, rq
);
349 workload
->req
= i915_gem_request_get(rq
);
350 ret
= copy_workload_to_ring_buffer(workload
);
356 engine
->context_unpin(engine
, shadow_ctx
);
357 release_shadow_wa_ctx(&workload
->wa_ctx
);
361 static int dispatch_workload(struct intel_vgpu_workload
*workload
)
363 int ring_id
= workload
->ring_id
;
364 struct i915_gem_context
*shadow_ctx
= workload
->vgpu
->shadow_ctx
;
365 struct drm_i915_private
*dev_priv
= workload
->vgpu
->gvt
->dev_priv
;
366 struct intel_engine_cs
*engine
= dev_priv
->engine
[ring_id
];
369 gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
372 mutex_lock(&dev_priv
->drm
.struct_mutex
);
374 ret
= intel_gvt_scan_and_shadow_workload(workload
);
378 if (workload
->prepare
) {
379 ret
= workload
->prepare(workload
);
381 engine
->context_unpin(engine
, shadow_ctx
);
388 workload
->status
= ret
;
390 if (!IS_ERR_OR_NULL(workload
->req
)) {
391 gvt_dbg_sched("ring id %d submit workload to i915 %p\n",
392 ring_id
, workload
->req
);
393 i915_add_request(workload
->req
);
394 workload
->dispatched
= true;
397 mutex_unlock(&dev_priv
->drm
.struct_mutex
);
401 static struct intel_vgpu_workload
*pick_next_workload(
402 struct intel_gvt
*gvt
, int ring_id
)
404 struct intel_gvt_workload_scheduler
*scheduler
= &gvt
->scheduler
;
405 struct intel_vgpu_workload
*workload
= NULL
;
407 mutex_lock(&gvt
->lock
);
410 * no current vgpu / will be scheduled out / no workload
413 if (!scheduler
->current_vgpu
) {
414 gvt_dbg_sched("ring id %d stop - no current vgpu\n", ring_id
);
418 if (scheduler
->need_reschedule
) {
419 gvt_dbg_sched("ring id %d stop - will reschedule\n", ring_id
);
423 if (list_empty(workload_q_head(scheduler
->current_vgpu
, ring_id
)))
427 * still have current workload, maybe the workload disptacher
428 * fail to submit it for some reason, resubmit it.
430 if (scheduler
->current_workload
[ring_id
]) {
431 workload
= scheduler
->current_workload
[ring_id
];
432 gvt_dbg_sched("ring id %d still have current workload %p\n",
438 * pick a workload as current workload
439 * once current workload is set, schedule policy routines
440 * will wait the current workload is finished when trying to
441 * schedule out a vgpu.
443 scheduler
->current_workload
[ring_id
] = container_of(
444 workload_q_head(scheduler
->current_vgpu
, ring_id
)->next
,
445 struct intel_vgpu_workload
, list
);
447 workload
= scheduler
->current_workload
[ring_id
];
449 gvt_dbg_sched("ring id %d pick new workload %p\n", ring_id
, workload
);
451 atomic_inc(&workload
->vgpu
->running_workload_num
);
453 mutex_unlock(&gvt
->lock
);
457 static void update_guest_context(struct intel_vgpu_workload
*workload
)
459 struct intel_vgpu
*vgpu
= workload
->vgpu
;
460 struct intel_gvt
*gvt
= vgpu
->gvt
;
461 int ring_id
= workload
->ring_id
;
462 struct i915_gem_context
*shadow_ctx
= workload
->vgpu
->shadow_ctx
;
463 struct drm_i915_gem_object
*ctx_obj
=
464 shadow_ctx
->engine
[ring_id
].state
->obj
;
465 struct execlist_ring_context
*shadow_ring_context
;
468 unsigned long context_gpa
, context_page_num
;
471 gvt_dbg_sched("ring id %d workload lrca %x\n", ring_id
,
472 workload
->ctx_desc
.lrca
);
474 context_page_num
= gvt
->dev_priv
->engine
[ring_id
]->context_size
;
476 context_page_num
= context_page_num
>> PAGE_SHIFT
;
478 if (IS_BROADWELL(gvt
->dev_priv
) && ring_id
== RCS
)
479 context_page_num
= 19;
483 while (i
< context_page_num
) {
484 context_gpa
= intel_vgpu_gma_to_gpa(vgpu
->gtt
.ggtt_mm
,
485 (u32
)((workload
->ctx_desc
.lrca
+ i
) <<
486 I915_GTT_PAGE_SHIFT
));
487 if (context_gpa
== INTEL_GVT_INVALID_ADDR
) {
488 gvt_vgpu_err("invalid guest context descriptor\n");
492 page
= i915_gem_object_get_page(ctx_obj
, LRC_HEADER_PAGES
+ i
);
494 intel_gvt_hypervisor_write_gpa(vgpu
, context_gpa
, src
,
500 intel_gvt_hypervisor_write_gpa(vgpu
, workload
->ring_context_gpa
+
501 RING_CTX_OFF(ring_header
.val
), &workload
->rb_tail
, 4);
503 page
= i915_gem_object_get_page(ctx_obj
, LRC_STATE_PN
);
504 shadow_ring_context
= kmap(page
);
506 #define COPY_REG(name) \
507 intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa + \
508 RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
511 COPY_REG(ctx_timestamp
);
515 intel_gvt_hypervisor_write_gpa(vgpu
,
516 workload
->ring_context_gpa
+
517 sizeof(*shadow_ring_context
),
518 (void *)shadow_ring_context
+
519 sizeof(*shadow_ring_context
),
520 I915_GTT_PAGE_SIZE
- sizeof(*shadow_ring_context
));
525 static void complete_current_workload(struct intel_gvt
*gvt
, int ring_id
)
527 struct intel_gvt_workload_scheduler
*scheduler
= &gvt
->scheduler
;
528 struct intel_vgpu_workload
*workload
;
529 struct intel_vgpu
*vgpu
;
532 mutex_lock(&gvt
->lock
);
534 workload
= scheduler
->current_workload
[ring_id
];
535 vgpu
= workload
->vgpu
;
537 /* For the workload w/ request, needs to wait for the context
538 * switch to make sure request is completed.
539 * For the workload w/o request, directly complete the workload.
542 struct drm_i915_private
*dev_priv
=
543 workload
->vgpu
->gvt
->dev_priv
;
544 struct intel_engine_cs
*engine
=
545 dev_priv
->engine
[workload
->ring_id
];
546 wait_event(workload
->shadow_ctx_status_wq
,
547 !atomic_read(&workload
->shadow_ctx_active
));
549 /* If this request caused GPU hang, req->fence.error will
550 * be set to -EIO. Use -EIO to set workload status so
551 * that when this request caused GPU hang, didn't trigger
552 * context switch interrupt to guest.
554 if (likely(workload
->status
== -EINPROGRESS
)) {
555 if (workload
->req
->fence
.error
== -EIO
)
556 workload
->status
= -EIO
;
558 workload
->status
= 0;
561 i915_gem_request_put(fetch_and_zero(&workload
->req
));
563 if (!workload
->status
&& !(vgpu
->resetting_eng
&
564 ENGINE_MASK(ring_id
))) {
565 update_guest_context(workload
);
567 for_each_set_bit(event
, workload
->pending_events
,
569 intel_vgpu_trigger_virtual_event(vgpu
, event
);
571 mutex_lock(&dev_priv
->drm
.struct_mutex
);
572 /* unpin shadow ctx as the shadow_ctx update is done */
573 engine
->context_unpin(engine
, workload
->vgpu
->shadow_ctx
);
574 mutex_unlock(&dev_priv
->drm
.struct_mutex
);
577 gvt_dbg_sched("ring id %d complete workload %p status %d\n",
578 ring_id
, workload
, workload
->status
);
580 scheduler
->current_workload
[ring_id
] = NULL
;
582 list_del_init(&workload
->list
);
583 workload
->complete(workload
);
585 atomic_dec(&vgpu
->running_workload_num
);
586 wake_up(&scheduler
->workload_complete_wq
);
588 if (gvt
->scheduler
.need_reschedule
)
589 intel_gvt_request_service(gvt
, INTEL_GVT_REQUEST_EVENT_SCHED
);
591 mutex_unlock(&gvt
->lock
);
594 struct workload_thread_param
{
595 struct intel_gvt
*gvt
;
599 static int workload_thread(void *priv
)
601 struct workload_thread_param
*p
= (struct workload_thread_param
*)priv
;
602 struct intel_gvt
*gvt
= p
->gvt
;
603 int ring_id
= p
->ring_id
;
604 struct intel_gvt_workload_scheduler
*scheduler
= &gvt
->scheduler
;
605 struct intel_vgpu_workload
*workload
= NULL
;
606 struct intel_vgpu
*vgpu
= NULL
;
608 bool need_force_wake
= IS_SKYLAKE(gvt
->dev_priv
)
609 || IS_KABYLAKE(gvt
->dev_priv
);
610 DEFINE_WAIT_FUNC(wait
, woken_wake_function
);
614 gvt_dbg_core("workload thread for ring %d started\n", ring_id
);
616 while (!kthread_should_stop()) {
617 add_wait_queue(&scheduler
->waitq
[ring_id
], &wait
);
619 workload
= pick_next_workload(gvt
, ring_id
);
622 wait_woken(&wait
, TASK_INTERRUPTIBLE
,
623 MAX_SCHEDULE_TIMEOUT
);
624 } while (!kthread_should_stop());
625 remove_wait_queue(&scheduler
->waitq
[ring_id
], &wait
);
630 gvt_dbg_sched("ring id %d next workload %p vgpu %d\n",
631 workload
->ring_id
, workload
,
634 intel_runtime_pm_get(gvt
->dev_priv
);
636 gvt_dbg_sched("ring id %d will dispatch workload %p\n",
637 workload
->ring_id
, workload
);
640 intel_uncore_forcewake_get(gvt
->dev_priv
,
643 mutex_lock(&gvt
->lock
);
644 ret
= dispatch_workload(workload
);
645 mutex_unlock(&gvt
->lock
);
648 vgpu
= workload
->vgpu
;
649 gvt_vgpu_err("fail to dispatch workload, skip\n");
653 gvt_dbg_sched("ring id %d wait workload %p\n",
654 workload
->ring_id
, workload
);
655 i915_wait_request(workload
->req
, 0, MAX_SCHEDULE_TIMEOUT
);
658 gvt_dbg_sched("will complete workload %p, status: %d\n",
659 workload
, workload
->status
);
661 complete_current_workload(gvt
, ring_id
);
664 intel_uncore_forcewake_put(gvt
->dev_priv
,
667 intel_runtime_pm_put(gvt
->dev_priv
);
672 void intel_gvt_wait_vgpu_idle(struct intel_vgpu
*vgpu
)
674 struct intel_gvt
*gvt
= vgpu
->gvt
;
675 struct intel_gvt_workload_scheduler
*scheduler
= &gvt
->scheduler
;
677 if (atomic_read(&vgpu
->running_workload_num
)) {
678 gvt_dbg_sched("wait vgpu idle\n");
680 wait_event(scheduler
->workload_complete_wq
,
681 !atomic_read(&vgpu
->running_workload_num
));
685 void intel_gvt_clean_workload_scheduler(struct intel_gvt
*gvt
)
687 struct intel_gvt_workload_scheduler
*scheduler
= &gvt
->scheduler
;
688 struct intel_engine_cs
*engine
;
689 enum intel_engine_id i
;
691 gvt_dbg_core("clean workload scheduler\n");
693 for_each_engine(engine
, gvt
->dev_priv
, i
) {
694 atomic_notifier_chain_unregister(
695 &engine
->context_status_notifier
,
696 &gvt
->shadow_ctx_notifier_block
[i
]);
697 kthread_stop(scheduler
->thread
[i
]);
701 int intel_gvt_init_workload_scheduler(struct intel_gvt
*gvt
)
703 struct intel_gvt_workload_scheduler
*scheduler
= &gvt
->scheduler
;
704 struct workload_thread_param
*param
= NULL
;
705 struct intel_engine_cs
*engine
;
706 enum intel_engine_id i
;
709 gvt_dbg_core("init workload scheduler\n");
711 init_waitqueue_head(&scheduler
->workload_complete_wq
);
713 for_each_engine(engine
, gvt
->dev_priv
, i
) {
714 init_waitqueue_head(&scheduler
->waitq
[i
]);
716 param
= kzalloc(sizeof(*param
), GFP_KERNEL
);
725 scheduler
->thread
[i
] = kthread_run(workload_thread
, param
,
726 "gvt workload %d", i
);
727 if (IS_ERR(scheduler
->thread
[i
])) {
728 gvt_err("fail to create workload thread\n");
729 ret
= PTR_ERR(scheduler
->thread
[i
]);
733 gvt
->shadow_ctx_notifier_block
[i
].notifier_call
=
734 shadow_context_status_change
;
735 atomic_notifier_chain_register(&engine
->context_status_notifier
,
736 &gvt
->shadow_ctx_notifier_block
[i
]);
740 intel_gvt_clean_workload_scheduler(gvt
);
746 void intel_vgpu_clean_gvt_context(struct intel_vgpu
*vgpu
)
748 i915_gem_context_put(vgpu
->shadow_ctx
);
751 int intel_vgpu_init_gvt_context(struct intel_vgpu
*vgpu
)
753 atomic_set(&vgpu
->running_workload_num
, 0);
755 vgpu
->shadow_ctx
= i915_gem_context_create_gvt(
756 &vgpu
->gvt
->dev_priv
->drm
);
757 if (IS_ERR(vgpu
->shadow_ctx
))
758 return PTR_ERR(vgpu
->shadow_ctx
);
760 if (INTEL_INFO(vgpu
->gvt
->dev_priv
)->has_logical_ring_preemption
)
761 vgpu
->shadow_ctx
->priority
= INT_MAX
;
763 vgpu
->shadow_ctx
->engine
[RCS
].initialised
= true;
765 bitmap_zero(vgpu
->shadow_ctx_desc_updated
, I915_NUM_ENGINES
);