]>
Commit | Line | Data |
---|---|---|
e4734057 ZW |
1 | /* |
2 | * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice (including the next | |
12 | * paragraph) shall be included in all copies or substantial portions of the | |
13 | * Software. | |
14 | * | |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
20 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
21 | * SOFTWARE. | |
22 | * | |
23 | * Authors: | |
24 | * Zhi Wang <zhi.a.wang@intel.com> | |
25 | * | |
26 | * Contributors: | |
27 | * Ping Gao <ping.a.gao@intel.com> | |
28 | * Tina Zhang <tina.zhang@intel.com> | |
29 | * Chanbin Du <changbin.du@intel.com> | |
30 | * Min He <min.he@intel.com> | |
31 | * Bing Niu <bing.niu@intel.com> | |
32 | * Zhenyu Wang <zhenyuw@linux.intel.com> | |
33 | * | |
34 | */ | |
35 | ||
e4734057 ZW |
36 | #include <linux/kthread.h> |
37 | ||
feddf6e8 ZW |
38 | #include "i915_drv.h" |
39 | #include "gvt.h" | |
40 | ||
e4734057 ZW |
41 | #define RING_CTX_OFF(x) \ |
42 | offsetof(struct execlist_ring_context, x) | |
43 | ||
999ccb40 DC |
44 | static void set_context_pdp_root_pointer( |
45 | struct execlist_ring_context *ring_context, | |
e4734057 ZW |
46 | u32 pdp[8]) |
47 | { | |
48 | struct execlist_mmio_pair *pdp_pair = &ring_context->pdp3_UDW; | |
49 | int i; | |
50 | ||
51 | for (i = 0; i < 8; i++) | |
52 | pdp_pair[i].val = pdp[7 - i]; | |
53 | } | |
54 | ||
55 | static int populate_shadow_context(struct intel_vgpu_workload *workload) | |
56 | { | |
57 | struct intel_vgpu *vgpu = workload->vgpu; | |
58 | struct intel_gvt *gvt = vgpu->gvt; | |
59 | int ring_id = workload->ring_id; | |
60 | struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx; | |
61 | struct drm_i915_gem_object *ctx_obj = | |
62 | shadow_ctx->engine[ring_id].state->obj; | |
63 | struct execlist_ring_context *shadow_ring_context; | |
64 | struct page *page; | |
65 | void *dst; | |
66 | unsigned long context_gpa, context_page_num; | |
67 | int i; | |
68 | ||
69 | gvt_dbg_sched("ring id %d workload lrca %x", ring_id, | |
70 | workload->ctx_desc.lrca); | |
71 | ||
72 | context_page_num = intel_lr_context_size( | |
1140f9ed | 73 | gvt->dev_priv->engine[ring_id]); |
e4734057 ZW |
74 | |
75 | context_page_num = context_page_num >> PAGE_SHIFT; | |
76 | ||
77 | if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS) | |
78 | context_page_num = 19; | |
79 | ||
80 | i = 2; | |
81 | ||
82 | while (i < context_page_num) { | |
83 | context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, | |
84 | (u32)((workload->ctx_desc.lrca + i) << | |
85 | GTT_PAGE_SHIFT)); | |
86 | if (context_gpa == INTEL_GVT_INVALID_ADDR) { | |
87 | gvt_err("Invalid guest context descriptor\n"); | |
88 | return -EINVAL; | |
89 | } | |
90 | ||
91 | page = i915_gem_object_get_page(ctx_obj, LRC_PPHWSP_PN + i); | |
92 | dst = kmap_atomic(page); | |
93 | intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst, | |
94 | GTT_PAGE_SIZE); | |
95 | kunmap_atomic(dst); | |
96 | i++; | |
97 | } | |
98 | ||
99 | page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN); | |
100 | shadow_ring_context = kmap_atomic(page); | |
101 | ||
102 | #define COPY_REG(name) \ | |
103 | intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \ | |
104 | + RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4) | |
105 | ||
106 | COPY_REG(ctx_ctrl); | |
107 | COPY_REG(ctx_timestamp); | |
108 | ||
109 | if (ring_id == RCS) { | |
110 | COPY_REG(bb_per_ctx_ptr); | |
111 | COPY_REG(rcs_indirect_ctx); | |
112 | COPY_REG(rcs_indirect_ctx_offset); | |
113 | } | |
114 | #undef COPY_REG | |
115 | ||
116 | set_context_pdp_root_pointer(shadow_ring_context, | |
117 | workload->shadow_mm->shadow_page_table); | |
118 | ||
119 | intel_gvt_hypervisor_read_gpa(vgpu, | |
120 | workload->ring_context_gpa + | |
121 | sizeof(*shadow_ring_context), | |
122 | (void *)shadow_ring_context + | |
123 | sizeof(*shadow_ring_context), | |
124 | GTT_PAGE_SIZE - sizeof(*shadow_ring_context)); | |
125 | ||
126 | kunmap_atomic(shadow_ring_context); | |
127 | return 0; | |
128 | } | |
129 | ||
130 | static int shadow_context_status_change(struct notifier_block *nb, | |
131 | unsigned long action, void *data) | |
132 | { | |
133 | struct intel_vgpu *vgpu = container_of(nb, | |
134 | struct intel_vgpu, shadow_ctx_notifier_block); | |
135 | struct drm_i915_gem_request *req = | |
136 | (struct drm_i915_gem_request *)data; | |
137 | struct intel_gvt_workload_scheduler *scheduler = | |
138 | &vgpu->gvt->scheduler; | |
139 | struct intel_vgpu_workload *workload = | |
140 | scheduler->current_workload[req->engine->id]; | |
141 | ||
142 | switch (action) { | |
143 | case INTEL_CONTEXT_SCHEDULE_IN: | |
17865713 ZW |
144 | intel_gvt_load_render_mmio(workload->vgpu, |
145 | workload->ring_id); | |
e4734057 ZW |
146 | atomic_set(&workload->shadow_ctx_active, 1); |
147 | break; | |
148 | case INTEL_CONTEXT_SCHEDULE_OUT: | |
17865713 ZW |
149 | intel_gvt_restore_render_mmio(workload->vgpu, |
150 | workload->ring_id); | |
e4734057 ZW |
151 | atomic_set(&workload->shadow_ctx_active, 0); |
152 | break; | |
153 | default: | |
154 | WARN_ON(1); | |
155 | return NOTIFY_OK; | |
156 | } | |
157 | wake_up(&workload->shadow_ctx_status_wq); | |
158 | return NOTIFY_OK; | |
159 | } | |
160 | ||
161 | static int dispatch_workload(struct intel_vgpu_workload *workload) | |
162 | { | |
163 | struct intel_vgpu *vgpu = workload->vgpu; | |
164 | struct intel_gvt *gvt = vgpu->gvt; | |
165 | int ring_id = workload->ring_id; | |
166 | struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx; | |
167 | struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv; | |
0eb742d7 | 168 | struct drm_i915_gem_request *rq; |
e4734057 ZW |
169 | int ret; |
170 | ||
171 | gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n", | |
172 | ring_id, workload); | |
173 | ||
174 | shadow_ctx->desc_template = workload->ctx_desc.addressing_mode << | |
175 | GEN8_CTX_ADDRESSING_MODE_SHIFT; | |
176 | ||
0eb742d7 CW |
177 | rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx); |
178 | if (IS_ERR(rq)) { | |
e4734057 | 179 | gvt_err("fail to allocate gem request\n"); |
0eb742d7 | 180 | workload->status = PTR_ERR(rq); |
e4734057 ZW |
181 | return workload->status; |
182 | } | |
183 | ||
0eb742d7 CW |
184 | gvt_dbg_sched("ring id %d get i915 gem request %p\n", ring_id, rq); |
185 | ||
186 | workload->req = i915_gem_request_get(rq); | |
e4734057 ZW |
187 | |
188 | mutex_lock(&gvt->lock); | |
189 | ||
be1da707 ZW |
190 | ret = intel_gvt_scan_and_shadow_workload(workload); |
191 | if (ret) | |
192 | goto err; | |
193 | ||
194 | ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx); | |
195 | if (ret) | |
196 | goto err; | |
197 | ||
e4734057 ZW |
198 | ret = populate_shadow_context(workload); |
199 | if (ret) | |
200 | goto err; | |
201 | ||
202 | if (workload->prepare) { | |
203 | ret = workload->prepare(workload); | |
204 | if (ret) | |
205 | goto err; | |
206 | } | |
207 | ||
208 | mutex_unlock(&gvt->lock); | |
209 | ||
210 | gvt_dbg_sched("ring id %d submit workload to i915 %p\n", | |
211 | ring_id, workload->req); | |
212 | ||
0eb742d7 | 213 | i915_add_request_no_flush(rq); |
e4734057 ZW |
214 | workload->dispatched = true; |
215 | return 0; | |
216 | err: | |
217 | workload->status = ret; | |
e4734057 ZW |
218 | |
219 | mutex_unlock(&gvt->lock); | |
0eb742d7 CW |
220 | |
221 | i915_add_request_no_flush(rq); | |
e4734057 ZW |
222 | return ret; |
223 | } | |
224 | ||
225 | static struct intel_vgpu_workload *pick_next_workload( | |
226 | struct intel_gvt *gvt, int ring_id) | |
227 | { | |
228 | struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; | |
229 | struct intel_vgpu_workload *workload = NULL; | |
230 | ||
231 | mutex_lock(&gvt->lock); | |
232 | ||
233 | /* | |
234 | * no current vgpu / will be scheduled out / no workload | |
235 | * bail out | |
236 | */ | |
237 | if (!scheduler->current_vgpu) { | |
238 | gvt_dbg_sched("ring id %d stop - no current vgpu\n", ring_id); | |
239 | goto out; | |
240 | } | |
241 | ||
242 | if (scheduler->need_reschedule) { | |
243 | gvt_dbg_sched("ring id %d stop - will reschedule\n", ring_id); | |
244 | goto out; | |
245 | } | |
246 | ||
247 | if (list_empty(workload_q_head(scheduler->current_vgpu, ring_id))) { | |
248 | gvt_dbg_sched("ring id %d stop - no available workload\n", | |
249 | ring_id); | |
250 | goto out; | |
251 | } | |
252 | ||
253 | /* | |
254 | * still have current workload, maybe the workload disptacher | |
255 | * fail to submit it for some reason, resubmit it. | |
256 | */ | |
257 | if (scheduler->current_workload[ring_id]) { | |
258 | workload = scheduler->current_workload[ring_id]; | |
259 | gvt_dbg_sched("ring id %d still have current workload %p\n", | |
260 | ring_id, workload); | |
261 | goto out; | |
262 | } | |
263 | ||
264 | /* | |
265 | * pick a workload as current workload | |
266 | * once current workload is set, schedule policy routines | |
267 | * will wait the current workload is finished when trying to | |
268 | * schedule out a vgpu. | |
269 | */ | |
270 | scheduler->current_workload[ring_id] = container_of( | |
271 | workload_q_head(scheduler->current_vgpu, ring_id)->next, | |
272 | struct intel_vgpu_workload, list); | |
273 | ||
274 | workload = scheduler->current_workload[ring_id]; | |
275 | ||
276 | gvt_dbg_sched("ring id %d pick new workload %p\n", ring_id, workload); | |
277 | ||
278 | atomic_inc(&workload->vgpu->running_workload_num); | |
279 | out: | |
280 | mutex_unlock(&gvt->lock); | |
281 | return workload; | |
282 | } | |
283 | ||
284 | static void update_guest_context(struct intel_vgpu_workload *workload) | |
285 | { | |
286 | struct intel_vgpu *vgpu = workload->vgpu; | |
287 | struct intel_gvt *gvt = vgpu->gvt; | |
288 | int ring_id = workload->ring_id; | |
289 | struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx; | |
290 | struct drm_i915_gem_object *ctx_obj = | |
291 | shadow_ctx->engine[ring_id].state->obj; | |
292 | struct execlist_ring_context *shadow_ring_context; | |
293 | struct page *page; | |
294 | void *src; | |
295 | unsigned long context_gpa, context_page_num; | |
296 | int i; | |
297 | ||
298 | gvt_dbg_sched("ring id %d workload lrca %x\n", ring_id, | |
299 | workload->ctx_desc.lrca); | |
300 | ||
301 | context_page_num = intel_lr_context_size( | |
1140f9ed | 302 | gvt->dev_priv->engine[ring_id]); |
e4734057 ZW |
303 | |
304 | context_page_num = context_page_num >> PAGE_SHIFT; | |
305 | ||
306 | if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS) | |
307 | context_page_num = 19; | |
308 | ||
309 | i = 2; | |
310 | ||
311 | while (i < context_page_num) { | |
312 | context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, | |
313 | (u32)((workload->ctx_desc.lrca + i) << | |
314 | GTT_PAGE_SHIFT)); | |
315 | if (context_gpa == INTEL_GVT_INVALID_ADDR) { | |
316 | gvt_err("invalid guest context descriptor\n"); | |
317 | return; | |
318 | } | |
319 | ||
320 | page = i915_gem_object_get_page(ctx_obj, LRC_PPHWSP_PN + i); | |
321 | src = kmap_atomic(page); | |
322 | intel_gvt_hypervisor_write_gpa(vgpu, context_gpa, src, | |
323 | GTT_PAGE_SIZE); | |
324 | kunmap_atomic(src); | |
325 | i++; | |
326 | } | |
327 | ||
328 | intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa + | |
329 | RING_CTX_OFF(ring_header.val), &workload->rb_tail, 4); | |
330 | ||
331 | page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN); | |
332 | shadow_ring_context = kmap_atomic(page); | |
333 | ||
334 | #define COPY_REG(name) \ | |
335 | intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa + \ | |
336 | RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4) | |
337 | ||
338 | COPY_REG(ctx_ctrl); | |
339 | COPY_REG(ctx_timestamp); | |
340 | ||
341 | #undef COPY_REG | |
342 | ||
343 | intel_gvt_hypervisor_write_gpa(vgpu, | |
344 | workload->ring_context_gpa + | |
345 | sizeof(*shadow_ring_context), | |
346 | (void *)shadow_ring_context + | |
347 | sizeof(*shadow_ring_context), | |
348 | GTT_PAGE_SIZE - sizeof(*shadow_ring_context)); | |
349 | ||
350 | kunmap_atomic(shadow_ring_context); | |
351 | } | |
352 | ||
353 | static void complete_current_workload(struct intel_gvt *gvt, int ring_id) | |
354 | { | |
355 | struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; | |
356 | struct intel_vgpu_workload *workload; | |
be1da707 | 357 | int event; |
e4734057 ZW |
358 | |
359 | mutex_lock(&gvt->lock); | |
360 | ||
361 | workload = scheduler->current_workload[ring_id]; | |
362 | ||
363 | if (!workload->status && !workload->vgpu->resetting) { | |
364 | wait_event(workload->shadow_ctx_status_wq, | |
365 | !atomic_read(&workload->shadow_ctx_active)); | |
366 | ||
367 | update_guest_context(workload); | |
be1da707 ZW |
368 | |
369 | for_each_set_bit(event, workload->pending_events, | |
370 | INTEL_GVT_EVENT_MAX) | |
371 | intel_vgpu_trigger_virtual_event(workload->vgpu, | |
372 | event); | |
e4734057 ZW |
373 | } |
374 | ||
375 | gvt_dbg_sched("ring id %d complete workload %p status %d\n", | |
376 | ring_id, workload, workload->status); | |
377 | ||
378 | scheduler->current_workload[ring_id] = NULL; | |
379 | ||
380 | atomic_dec(&workload->vgpu->running_workload_num); | |
381 | ||
382 | list_del_init(&workload->list); | |
383 | workload->complete(workload); | |
384 | ||
385 | wake_up(&scheduler->workload_complete_wq); | |
386 | mutex_unlock(&gvt->lock); | |
387 | } | |
388 | ||
389 | struct workload_thread_param { | |
390 | struct intel_gvt *gvt; | |
391 | int ring_id; | |
392 | }; | |
393 | ||
66bbc3b2 CW |
394 | static DEFINE_MUTEX(scheduler_mutex); |
395 | ||
e4734057 ZW |
396 | static int workload_thread(void *priv) |
397 | { | |
398 | struct workload_thread_param *p = (struct workload_thread_param *)priv; | |
399 | struct intel_gvt *gvt = p->gvt; | |
400 | int ring_id = p->ring_id; | |
401 | struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; | |
402 | struct intel_vgpu_workload *workload = NULL; | |
403 | int ret; | |
404 | bool need_force_wake = IS_SKYLAKE(gvt->dev_priv); | |
e45d7b7f | 405 | DEFINE_WAIT_FUNC(wait, woken_wake_function); |
e4734057 ZW |
406 | |
407 | kfree(p); | |
408 | ||
409 | gvt_dbg_core("workload thread for ring %d started\n", ring_id); | |
410 | ||
411 | while (!kthread_should_stop()) { | |
e45d7b7f DC |
412 | add_wait_queue(&scheduler->waitq[ring_id], &wait); |
413 | do { | |
414 | workload = pick_next_workload(gvt, ring_id); | |
415 | if (workload) | |
416 | break; | |
417 | wait_woken(&wait, TASK_INTERRUPTIBLE, | |
418 | MAX_SCHEDULE_TIMEOUT); | |
419 | } while (!kthread_should_stop()); | |
420 | remove_wait_queue(&scheduler->waitq[ring_id], &wait); | |
421 | ||
422 | if (!workload) | |
e4734057 ZW |
423 | break; |
424 | ||
66bbc3b2 CW |
425 | mutex_lock(&scheduler_mutex); |
426 | ||
e4734057 ZW |
427 | gvt_dbg_sched("ring id %d next workload %p vgpu %d\n", |
428 | workload->ring_id, workload, | |
429 | workload->vgpu->id); | |
430 | ||
431 | intel_runtime_pm_get(gvt->dev_priv); | |
432 | ||
e4734057 ZW |
433 | gvt_dbg_sched("ring id %d will dispatch workload %p\n", |
434 | workload->ring_id, workload); | |
435 | ||
436 | if (need_force_wake) | |
437 | intel_uncore_forcewake_get(gvt->dev_priv, | |
438 | FORCEWAKE_ALL); | |
439 | ||
66bbc3b2 | 440 | mutex_lock(&gvt->dev_priv->drm.struct_mutex); |
e4734057 | 441 | ret = dispatch_workload(workload); |
66bbc3b2 CW |
442 | mutex_unlock(&gvt->dev_priv->drm.struct_mutex); |
443 | ||
e4734057 ZW |
444 | if (ret) { |
445 | gvt_err("fail to dispatch workload, skip\n"); | |
446 | goto complete; | |
447 | } | |
448 | ||
449 | gvt_dbg_sched("ring id %d wait workload %p\n", | |
450 | workload->ring_id, workload); | |
451 | ||
452 | workload->status = i915_wait_request(workload->req, | |
66bbc3b2 | 453 | 0, NULL, NULL); |
e4734057 ZW |
454 | if (workload->status != 0) |
455 | gvt_err("fail to wait workload, skip\n"); | |
456 | ||
457 | complete: | |
458 | gvt_dbg_sched("will complete workload %p\n, status: %d\n", | |
459 | workload, workload->status); | |
460 | ||
66bbc3b2 | 461 | mutex_lock(&gvt->dev_priv->drm.struct_mutex); |
e4734057 | 462 | complete_current_workload(gvt, ring_id); |
66bbc3b2 | 463 | mutex_unlock(&gvt->dev_priv->drm.struct_mutex); |
e4734057 | 464 | |
0eb742d7 CW |
465 | i915_gem_request_put(fetch_and_zero(&workload->req)); |
466 | ||
e4734057 ZW |
467 | if (need_force_wake) |
468 | intel_uncore_forcewake_put(gvt->dev_priv, | |
469 | FORCEWAKE_ALL); | |
470 | ||
e4734057 | 471 | intel_runtime_pm_put(gvt->dev_priv); |
66bbc3b2 CW |
472 | |
473 | mutex_unlock(&scheduler_mutex); | |
474 | ||
e4734057 ZW |
475 | } |
476 | return 0; | |
477 | } | |
478 | ||
479 | void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu) | |
480 | { | |
481 | struct intel_gvt *gvt = vgpu->gvt; | |
482 | struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; | |
483 | ||
484 | if (atomic_read(&vgpu->running_workload_num)) { | |
485 | gvt_dbg_sched("wait vgpu idle\n"); | |
486 | ||
487 | wait_event(scheduler->workload_complete_wq, | |
488 | !atomic_read(&vgpu->running_workload_num)); | |
489 | } | |
490 | } | |
491 | ||
492 | void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt) | |
493 | { | |
494 | struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; | |
495 | int i; | |
496 | ||
497 | gvt_dbg_core("clean workload scheduler\n"); | |
498 | ||
499 | for (i = 0; i < I915_NUM_ENGINES; i++) { | |
500 | if (scheduler->thread[i]) { | |
501 | kthread_stop(scheduler->thread[i]); | |
502 | scheduler->thread[i] = NULL; | |
503 | } | |
504 | } | |
505 | } | |
506 | ||
507 | int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt) | |
508 | { | |
509 | struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; | |
510 | struct workload_thread_param *param = NULL; | |
511 | int ret; | |
512 | int i; | |
513 | ||
514 | gvt_dbg_core("init workload scheduler\n"); | |
515 | ||
516 | init_waitqueue_head(&scheduler->workload_complete_wq); | |
517 | ||
518 | for (i = 0; i < I915_NUM_ENGINES; i++) { | |
0fac21e7 ZW |
519 | /* check ring mask at init time */ |
520 | if (!HAS_ENGINE(gvt->dev_priv, i)) | |
521 | continue; | |
522 | ||
e4734057 ZW |
523 | init_waitqueue_head(&scheduler->waitq[i]); |
524 | ||
525 | param = kzalloc(sizeof(*param), GFP_KERNEL); | |
526 | if (!param) { | |
527 | ret = -ENOMEM; | |
528 | goto err; | |
529 | } | |
530 | ||
531 | param->gvt = gvt; | |
532 | param->ring_id = i; | |
533 | ||
534 | scheduler->thread[i] = kthread_run(workload_thread, param, | |
535 | "gvt workload %d", i); | |
536 | if (IS_ERR(scheduler->thread[i])) { | |
537 | gvt_err("fail to create workload thread\n"); | |
538 | ret = PTR_ERR(scheduler->thread[i]); | |
539 | goto err; | |
540 | } | |
541 | } | |
542 | return 0; | |
543 | err: | |
544 | intel_gvt_clean_workload_scheduler(gvt); | |
545 | kfree(param); | |
546 | param = NULL; | |
547 | return ret; | |
548 | } | |
549 | ||
550 | void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu) | |
551 | { | |
552 | struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; | |
553 | ||
554 | atomic_notifier_chain_unregister(&vgpu->shadow_ctx->status_notifier, | |
555 | &vgpu->shadow_ctx_notifier_block); | |
556 | ||
557 | mutex_lock(&dev_priv->drm.struct_mutex); | |
558 | ||
559 | /* a little hacky to mark as ctx closed */ | |
560 | vgpu->shadow_ctx->closed = true; | |
561 | i915_gem_context_put(vgpu->shadow_ctx); | |
562 | ||
563 | mutex_unlock(&dev_priv->drm.struct_mutex); | |
564 | } | |
565 | ||
566 | int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu) | |
567 | { | |
568 | atomic_set(&vgpu->running_workload_num, 0); | |
569 | ||
570 | vgpu->shadow_ctx = i915_gem_context_create_gvt( | |
571 | &vgpu->gvt->dev_priv->drm); | |
572 | if (IS_ERR(vgpu->shadow_ctx)) | |
573 | return PTR_ERR(vgpu->shadow_ctx); | |
574 | ||
575 | vgpu->shadow_ctx->engine[RCS].initialised = true; | |
576 | ||
577 | vgpu->shadow_ctx_notifier_block.notifier_call = | |
578 | shadow_context_status_change; | |
579 | ||
580 | atomic_notifier_chain_register(&vgpu->shadow_ctx->status_notifier, | |
581 | &vgpu->shadow_ctx_notifier_block); | |
582 | return 0; | |
583 | } |