2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * Zhi Wang <zhi.a.wang@intel.com>
27 * Ping Gao <ping.a.gao@intel.com>
28 * Tina Zhang <tina.zhang@intel.com>
29 * Chanbin Du <changbin.du@intel.com>
30 * Min He <min.he@intel.com>
31 * Bing Niu <bing.niu@intel.com>
32 * Zhenyu Wang <zhenyuw@linux.intel.com>
36 #include <linux/kthread.h>
41 #define RING_CTX_OFF(x) \
42 offsetof(struct execlist_ring_context, x)
44 static void set_context_pdp_root_pointer(
45 struct execlist_ring_context
*ring_context
,
48 struct execlist_mmio_pair
*pdp_pair
= &ring_context
->pdp3_UDW
;
51 for (i
= 0; i
< 8; i
++)
52 pdp_pair
[i
].val
= pdp
[7 - i
];
55 static int populate_shadow_context(struct intel_vgpu_workload
*workload
)
57 struct intel_vgpu
*vgpu
= workload
->vgpu
;
58 struct intel_gvt
*gvt
= vgpu
->gvt
;
59 int ring_id
= workload
->ring_id
;
60 struct i915_gem_context
*shadow_ctx
= workload
->vgpu
->shadow_ctx
;
61 struct drm_i915_gem_object
*ctx_obj
=
62 shadow_ctx
->engine
[ring_id
].state
->obj
;
63 struct execlist_ring_context
*shadow_ring_context
;
66 unsigned long context_gpa
, context_page_num
;
69 gvt_dbg_sched("ring id %d workload lrca %x", ring_id
,
70 workload
->ctx_desc
.lrca
);
72 context_page_num
= intel_lr_context_size(
73 gvt
->dev_priv
->engine
[ring_id
]);
75 context_page_num
= context_page_num
>> PAGE_SHIFT
;
77 if (IS_BROADWELL(gvt
->dev_priv
) && ring_id
== RCS
)
78 context_page_num
= 19;
82 while (i
< context_page_num
) {
83 context_gpa
= intel_vgpu_gma_to_gpa(vgpu
->gtt
.ggtt_mm
,
84 (u32
)((workload
->ctx_desc
.lrca
+ i
) <<
86 if (context_gpa
== INTEL_GVT_INVALID_ADDR
) {
87 gvt_err("Invalid guest context descriptor\n");
91 page
= i915_gem_object_get_page(ctx_obj
, LRC_PPHWSP_PN
+ i
);
93 intel_gvt_hypervisor_read_gpa(vgpu
, context_gpa
, dst
,
99 page
= i915_gem_object_get_page(ctx_obj
, LRC_STATE_PN
);
100 shadow_ring_context
= kmap(page
);
102 #define COPY_REG(name) \
103 intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \
104 + RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
107 COPY_REG(ctx_timestamp
);
109 if (ring_id
== RCS
) {
110 COPY_REG(bb_per_ctx_ptr
);
111 COPY_REG(rcs_indirect_ctx
);
112 COPY_REG(rcs_indirect_ctx_offset
);
116 set_context_pdp_root_pointer(shadow_ring_context
,
117 workload
->shadow_mm
->shadow_page_table
);
119 intel_gvt_hypervisor_read_gpa(vgpu
,
120 workload
->ring_context_gpa
+
121 sizeof(*shadow_ring_context
),
122 (void *)shadow_ring_context
+
123 sizeof(*shadow_ring_context
),
124 GTT_PAGE_SIZE
- sizeof(*shadow_ring_context
));
130 static int shadow_context_status_change(struct notifier_block
*nb
,
131 unsigned long action
, void *data
)
133 struct intel_vgpu
*vgpu
= container_of(nb
,
134 struct intel_vgpu
, shadow_ctx_notifier_block
);
135 struct drm_i915_gem_request
*req
=
136 (struct drm_i915_gem_request
*)data
;
137 struct intel_gvt_workload_scheduler
*scheduler
=
138 &vgpu
->gvt
->scheduler
;
139 struct intel_vgpu_workload
*workload
=
140 scheduler
->current_workload
[req
->engine
->id
];
143 case INTEL_CONTEXT_SCHEDULE_IN
:
144 intel_gvt_load_render_mmio(workload
->vgpu
,
146 atomic_set(&workload
->shadow_ctx_active
, 1);
148 case INTEL_CONTEXT_SCHEDULE_OUT
:
149 intel_gvt_restore_render_mmio(workload
->vgpu
,
151 atomic_set(&workload
->shadow_ctx_active
, 0);
157 wake_up(&workload
->shadow_ctx_status_wq
);
161 static int dispatch_workload(struct intel_vgpu_workload
*workload
)
163 int ring_id
= workload
->ring_id
;
164 struct i915_gem_context
*shadow_ctx
= workload
->vgpu
->shadow_ctx
;
165 struct drm_i915_private
*dev_priv
= workload
->vgpu
->gvt
->dev_priv
;
166 struct drm_i915_gem_request
*rq
;
169 gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
172 shadow_ctx
->desc_template
&= ~(0x3 << GEN8_CTX_ADDRESSING_MODE_SHIFT
);
173 shadow_ctx
->desc_template
|= workload
->ctx_desc
.addressing_mode
<<
174 GEN8_CTX_ADDRESSING_MODE_SHIFT
;
176 mutex_lock(&dev_priv
->drm
.struct_mutex
);
178 rq
= i915_gem_request_alloc(dev_priv
->engine
[ring_id
], shadow_ctx
);
180 gvt_err("fail to allocate gem request\n");
185 gvt_dbg_sched("ring id %d get i915 gem request %p\n", ring_id
, rq
);
187 workload
->req
= i915_gem_request_get(rq
);
189 ret
= intel_gvt_scan_and_shadow_workload(workload
);
193 ret
= intel_gvt_scan_and_shadow_wa_ctx(&workload
->wa_ctx
);
197 ret
= populate_shadow_context(workload
);
201 if (workload
->prepare
) {
202 ret
= workload
->prepare(workload
);
207 gvt_dbg_sched("ring id %d submit workload to i915 %p\n",
208 ring_id
, workload
->req
);
211 workload
->dispatched
= true;
214 workload
->status
= ret
;
216 if (!IS_ERR_OR_NULL(rq
))
217 i915_add_request_no_flush(rq
);
218 mutex_unlock(&dev_priv
->drm
.struct_mutex
);
222 static struct intel_vgpu_workload
*pick_next_workload(
223 struct intel_gvt
*gvt
, int ring_id
)
225 struct intel_gvt_workload_scheduler
*scheduler
= &gvt
->scheduler
;
226 struct intel_vgpu_workload
*workload
= NULL
;
228 mutex_lock(&gvt
->lock
);
231 * no current vgpu / will be scheduled out / no workload
234 if (!scheduler
->current_vgpu
) {
235 gvt_dbg_sched("ring id %d stop - no current vgpu\n", ring_id
);
239 if (scheduler
->need_reschedule
) {
240 gvt_dbg_sched("ring id %d stop - will reschedule\n", ring_id
);
244 if (list_empty(workload_q_head(scheduler
->current_vgpu
, ring_id
))) {
245 gvt_dbg_sched("ring id %d stop - no available workload\n",
251 * still have current workload, maybe the workload disptacher
252 * fail to submit it for some reason, resubmit it.
254 if (scheduler
->current_workload
[ring_id
]) {
255 workload
= scheduler
->current_workload
[ring_id
];
256 gvt_dbg_sched("ring id %d still have current workload %p\n",
262 * pick a workload as current workload
263 * once current workload is set, schedule policy routines
264 * will wait the current workload is finished when trying to
265 * schedule out a vgpu.
267 scheduler
->current_workload
[ring_id
] = container_of(
268 workload_q_head(scheduler
->current_vgpu
, ring_id
)->next
,
269 struct intel_vgpu_workload
, list
);
271 workload
= scheduler
->current_workload
[ring_id
];
273 gvt_dbg_sched("ring id %d pick new workload %p\n", ring_id
, workload
);
275 atomic_inc(&workload
->vgpu
->running_workload_num
);
277 mutex_unlock(&gvt
->lock
);
281 static void update_guest_context(struct intel_vgpu_workload
*workload
)
283 struct intel_vgpu
*vgpu
= workload
->vgpu
;
284 struct intel_gvt
*gvt
= vgpu
->gvt
;
285 int ring_id
= workload
->ring_id
;
286 struct i915_gem_context
*shadow_ctx
= workload
->vgpu
->shadow_ctx
;
287 struct drm_i915_gem_object
*ctx_obj
=
288 shadow_ctx
->engine
[ring_id
].state
->obj
;
289 struct execlist_ring_context
*shadow_ring_context
;
292 unsigned long context_gpa
, context_page_num
;
295 gvt_dbg_sched("ring id %d workload lrca %x\n", ring_id
,
296 workload
->ctx_desc
.lrca
);
298 context_page_num
= intel_lr_context_size(
299 gvt
->dev_priv
->engine
[ring_id
]);
301 context_page_num
= context_page_num
>> PAGE_SHIFT
;
303 if (IS_BROADWELL(gvt
->dev_priv
) && ring_id
== RCS
)
304 context_page_num
= 19;
308 while (i
< context_page_num
) {
309 context_gpa
= intel_vgpu_gma_to_gpa(vgpu
->gtt
.ggtt_mm
,
310 (u32
)((workload
->ctx_desc
.lrca
+ i
) <<
312 if (context_gpa
== INTEL_GVT_INVALID_ADDR
) {
313 gvt_err("invalid guest context descriptor\n");
317 page
= i915_gem_object_get_page(ctx_obj
, LRC_PPHWSP_PN
+ i
);
319 intel_gvt_hypervisor_write_gpa(vgpu
, context_gpa
, src
,
325 intel_gvt_hypervisor_write_gpa(vgpu
, workload
->ring_context_gpa
+
326 RING_CTX_OFF(ring_header
.val
), &workload
->rb_tail
, 4);
328 page
= i915_gem_object_get_page(ctx_obj
, LRC_STATE_PN
);
329 shadow_ring_context
= kmap(page
);
331 #define COPY_REG(name) \
332 intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa + \
333 RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
336 COPY_REG(ctx_timestamp
);
340 intel_gvt_hypervisor_write_gpa(vgpu
,
341 workload
->ring_context_gpa
+
342 sizeof(*shadow_ring_context
),
343 (void *)shadow_ring_context
+
344 sizeof(*shadow_ring_context
),
345 GTT_PAGE_SIZE
- sizeof(*shadow_ring_context
));
350 static void complete_current_workload(struct intel_gvt
*gvt
, int ring_id
)
352 struct intel_gvt_workload_scheduler
*scheduler
= &gvt
->scheduler
;
353 struct intel_vgpu_workload
*workload
;
354 struct intel_vgpu
*vgpu
;
357 mutex_lock(&gvt
->lock
);
359 workload
= scheduler
->current_workload
[ring_id
];
360 vgpu
= workload
->vgpu
;
362 if (!workload
->status
&& !vgpu
->resetting
) {
363 wait_event(workload
->shadow_ctx_status_wq
,
364 !atomic_read(&workload
->shadow_ctx_active
));
366 update_guest_context(workload
);
368 for_each_set_bit(event
, workload
->pending_events
,
370 intel_vgpu_trigger_virtual_event(vgpu
, event
);
373 gvt_dbg_sched("ring id %d complete workload %p status %d\n",
374 ring_id
, workload
, workload
->status
);
376 scheduler
->current_workload
[ring_id
] = NULL
;
378 list_del_init(&workload
->list
);
379 workload
->complete(workload
);
381 atomic_dec(&vgpu
->running_workload_num
);
382 wake_up(&scheduler
->workload_complete_wq
);
383 mutex_unlock(&gvt
->lock
);
386 struct workload_thread_param
{
387 struct intel_gvt
*gvt
;
391 static DEFINE_MUTEX(scheduler_mutex
);
393 static int workload_thread(void *priv
)
395 struct workload_thread_param
*p
= (struct workload_thread_param
*)priv
;
396 struct intel_gvt
*gvt
= p
->gvt
;
397 int ring_id
= p
->ring_id
;
398 struct intel_gvt_workload_scheduler
*scheduler
= &gvt
->scheduler
;
399 struct intel_vgpu_workload
*workload
= NULL
;
402 bool need_force_wake
= IS_SKYLAKE(gvt
->dev_priv
);
403 DEFINE_WAIT_FUNC(wait
, woken_wake_function
);
407 gvt_dbg_core("workload thread for ring %d started\n", ring_id
);
409 while (!kthread_should_stop()) {
410 add_wait_queue(&scheduler
->waitq
[ring_id
], &wait
);
412 workload
= pick_next_workload(gvt
, ring_id
);
415 wait_woken(&wait
, TASK_INTERRUPTIBLE
,
416 MAX_SCHEDULE_TIMEOUT
);
417 } while (!kthread_should_stop());
418 remove_wait_queue(&scheduler
->waitq
[ring_id
], &wait
);
423 mutex_lock(&scheduler_mutex
);
425 gvt_dbg_sched("ring id %d next workload %p vgpu %d\n",
426 workload
->ring_id
, workload
,
429 intel_runtime_pm_get(gvt
->dev_priv
);
431 gvt_dbg_sched("ring id %d will dispatch workload %p\n",
432 workload
->ring_id
, workload
);
435 intel_uncore_forcewake_get(gvt
->dev_priv
,
438 mutex_lock(&gvt
->lock
);
439 ret
= dispatch_workload(workload
);
440 mutex_unlock(&gvt
->lock
);
443 gvt_err("fail to dispatch workload, skip\n");
447 gvt_dbg_sched("ring id %d wait workload %p\n",
448 workload
->ring_id
, workload
);
450 lret
= i915_wait_request(workload
->req
,
451 0, MAX_SCHEDULE_TIMEOUT
);
453 workload
->status
= lret
;
454 gvt_err("fail to wait workload, skip\n");
456 workload
->status
= 0;
460 gvt_dbg_sched("will complete workload %p, status: %d\n",
461 workload
, workload
->status
);
464 i915_gem_request_put(fetch_and_zero(&workload
->req
));
466 complete_current_workload(gvt
, ring_id
);
469 intel_uncore_forcewake_put(gvt
->dev_priv
,
472 intel_runtime_pm_put(gvt
->dev_priv
);
474 mutex_unlock(&scheduler_mutex
);
480 void intel_gvt_wait_vgpu_idle(struct intel_vgpu
*vgpu
)
482 struct intel_gvt
*gvt
= vgpu
->gvt
;
483 struct intel_gvt_workload_scheduler
*scheduler
= &gvt
->scheduler
;
485 if (atomic_read(&vgpu
->running_workload_num
)) {
486 gvt_dbg_sched("wait vgpu idle\n");
488 wait_event(scheduler
->workload_complete_wq
,
489 !atomic_read(&vgpu
->running_workload_num
));
493 void intel_gvt_clean_workload_scheduler(struct intel_gvt
*gvt
)
495 struct intel_gvt_workload_scheduler
*scheduler
= &gvt
->scheduler
;
498 gvt_dbg_core("clean workload scheduler\n");
500 for (i
= 0; i
< I915_NUM_ENGINES
; i
++) {
501 if (scheduler
->thread
[i
]) {
502 kthread_stop(scheduler
->thread
[i
]);
503 scheduler
->thread
[i
] = NULL
;
508 int intel_gvt_init_workload_scheduler(struct intel_gvt
*gvt
)
510 struct intel_gvt_workload_scheduler
*scheduler
= &gvt
->scheduler
;
511 struct workload_thread_param
*param
= NULL
;
515 gvt_dbg_core("init workload scheduler\n");
517 init_waitqueue_head(&scheduler
->workload_complete_wq
);
519 for (i
= 0; i
< I915_NUM_ENGINES
; i
++) {
520 /* check ring mask at init time */
521 if (!HAS_ENGINE(gvt
->dev_priv
, i
))
524 init_waitqueue_head(&scheduler
->waitq
[i
]);
526 param
= kzalloc(sizeof(*param
), GFP_KERNEL
);
535 scheduler
->thread
[i
] = kthread_run(workload_thread
, param
,
536 "gvt workload %d", i
);
537 if (IS_ERR(scheduler
->thread
[i
])) {
538 gvt_err("fail to create workload thread\n");
539 ret
= PTR_ERR(scheduler
->thread
[i
]);
545 intel_gvt_clean_workload_scheduler(gvt
);
551 void intel_vgpu_clean_gvt_context(struct intel_vgpu
*vgpu
)
553 atomic_notifier_chain_unregister(&vgpu
->shadow_ctx
->status_notifier
,
554 &vgpu
->shadow_ctx_notifier_block
);
556 i915_gem_context_put_unlocked(vgpu
->shadow_ctx
);
559 int intel_vgpu_init_gvt_context(struct intel_vgpu
*vgpu
)
561 atomic_set(&vgpu
->running_workload_num
, 0);
563 vgpu
->shadow_ctx
= i915_gem_context_create_gvt(
564 &vgpu
->gvt
->dev_priv
->drm
);
565 if (IS_ERR(vgpu
->shadow_ctx
))
566 return PTR_ERR(vgpu
->shadow_ctx
);
568 vgpu
->shadow_ctx
->engine
[RCS
].initialised
= true;
570 vgpu
->shadow_ctx_notifier_block
.notifier_call
=
571 shadow_context_status_change
;
573 atomic_notifier_chain_register(&vgpu
->shadow_ctx
->status_notifier
,
574 &vgpu
->shadow_ctx_notifier_block
);