]>
Commit | Line | Data |
---|---|---|
8453d674 ZW |
1 | /* |
2 | * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice (including the next | |
12 | * paragraph) shall be included in all copies or substantial portions of the | |
13 | * Software. | |
14 | * | |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
20 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
21 | * SOFTWARE. | |
22 | * | |
23 | * Authors: | |
24 | * Zhiyuan Lv <zhiyuan.lv@intel.com> | |
25 | * Zhi Wang <zhi.a.wang@intel.com> | |
26 | * | |
27 | * Contributors: | |
28 | * Min He <min.he@intel.com> | |
29 | * Bing Niu <bing.niu@intel.com> | |
30 | * Ping Gao <ping.a.gao@intel.com> | |
31 | * Tina Zhang <tina.zhang@intel.com> | |
32 | * | |
33 | */ | |
34 | ||
35 | #include "i915_drv.h" | |
feddf6e8 | 36 | #include "gvt.h" |
8453d674 ZW |
37 | |
38 | #define _EL_OFFSET_STATUS 0x234 | |
39 | #define _EL_OFFSET_STATUS_BUF 0x370 | |
40 | #define _EL_OFFSET_STATUS_PTR 0x3A0 | |
41 | ||
42 | #define execlist_ring_mmio(gvt, ring_id, offset) \ | |
1140f9ed | 43 | (gvt->dev_priv->engine[ring_id]->mmio_base + (offset)) |
8453d674 ZW |
44 | |
45 | #define valid_context(ctx) ((ctx)->valid) | |
46 | #define same_context(a, b) (((a)->context_id == (b)->context_id) && \ | |
47 | ((a)->lrca == (b)->lrca)) | |
48 | ||
f2e2c00a CD |
49 | static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask); |
50 | ||
8453d674 ZW |
51 | static int context_switch_events[] = { |
52 | [RCS] = RCS_AS_CONTEXT_SWITCH, | |
53 | [BCS] = BCS_AS_CONTEXT_SWITCH, | |
54 | [VCS] = VCS_AS_CONTEXT_SWITCH, | |
55 | [VCS2] = VCS2_AS_CONTEXT_SWITCH, | |
56 | [VECS] = VECS_AS_CONTEXT_SWITCH, | |
57 | }; | |
58 | ||
59 | static int ring_id_to_context_switch_event(int ring_id) | |
60 | { | |
c821ee6d DC |
61 | if (WARN_ON(ring_id < RCS || |
62 | ring_id >= ARRAY_SIZE(context_switch_events))) | |
8453d674 ZW |
63 | return -EINVAL; |
64 | ||
65 | return context_switch_events[ring_id]; | |
66 | } | |
67 | ||
68 | static void switch_virtual_execlist_slot(struct intel_vgpu_execlist *execlist) | |
69 | { | |
70 | gvt_dbg_el("[before] running slot %d/context %x pending slot %d\n", | |
71 | execlist->running_slot ? | |
72 | execlist->running_slot->index : -1, | |
73 | execlist->running_context ? | |
74 | execlist->running_context->context_id : 0, | |
75 | execlist->pending_slot ? | |
76 | execlist->pending_slot->index : -1); | |
77 | ||
78 | execlist->running_slot = execlist->pending_slot; | |
79 | execlist->pending_slot = NULL; | |
80 | execlist->running_context = execlist->running_context ? | |
81 | &execlist->running_slot->ctx[0] : NULL; | |
82 | ||
83 | gvt_dbg_el("[after] running slot %d/context %x pending slot %d\n", | |
84 | execlist->running_slot ? | |
85 | execlist->running_slot->index : -1, | |
86 | execlist->running_context ? | |
87 | execlist->running_context->context_id : 0, | |
88 | execlist->pending_slot ? | |
89 | execlist->pending_slot->index : -1); | |
90 | } | |
91 | ||
92 | static void emulate_execlist_status(struct intel_vgpu_execlist *execlist) | |
93 | { | |
94 | struct intel_vgpu_execlist_slot *running = execlist->running_slot; | |
95 | struct intel_vgpu_execlist_slot *pending = execlist->pending_slot; | |
96 | struct execlist_ctx_descriptor_format *desc = execlist->running_context; | |
97 | struct intel_vgpu *vgpu = execlist->vgpu; | |
98 | struct execlist_status_format status; | |
99 | int ring_id = execlist->ring_id; | |
100 | u32 status_reg = execlist_ring_mmio(vgpu->gvt, | |
101 | ring_id, _EL_OFFSET_STATUS); | |
102 | ||
103 | status.ldw = vgpu_vreg(vgpu, status_reg); | |
104 | status.udw = vgpu_vreg(vgpu, status_reg + 4); | |
105 | ||
106 | if (running) { | |
107 | status.current_execlist_pointer = !!running->index; | |
108 | status.execlist_write_pointer = !!!running->index; | |
109 | status.execlist_0_active = status.execlist_0_valid = | |
110 | !!!(running->index); | |
111 | status.execlist_1_active = status.execlist_1_valid = | |
112 | !!(running->index); | |
113 | } else { | |
114 | status.context_id = 0; | |
115 | status.execlist_0_active = status.execlist_0_valid = 0; | |
116 | status.execlist_1_active = status.execlist_1_valid = 0; | |
117 | } | |
118 | ||
119 | status.context_id = desc ? desc->context_id : 0; | |
120 | status.execlist_queue_full = !!(pending); | |
121 | ||
122 | vgpu_vreg(vgpu, status_reg) = status.ldw; | |
123 | vgpu_vreg(vgpu, status_reg + 4) = status.udw; | |
124 | ||
125 | gvt_dbg_el("vgpu%d: status reg offset %x ldw %x udw %x\n", | |
126 | vgpu->id, status_reg, status.ldw, status.udw); | |
127 | } | |
128 | ||
129 | static void emulate_csb_update(struct intel_vgpu_execlist *execlist, | |
130 | struct execlist_context_status_format *status, | |
131 | bool trigger_interrupt_later) | |
132 | { | |
133 | struct intel_vgpu *vgpu = execlist->vgpu; | |
134 | int ring_id = execlist->ring_id; | |
135 | struct execlist_context_status_pointer_format ctx_status_ptr; | |
136 | u32 write_pointer; | |
137 | u32 ctx_status_ptr_reg, ctx_status_buf_reg, offset; | |
138 | ||
139 | ctx_status_ptr_reg = execlist_ring_mmio(vgpu->gvt, ring_id, | |
140 | _EL_OFFSET_STATUS_PTR); | |
141 | ctx_status_buf_reg = execlist_ring_mmio(vgpu->gvt, ring_id, | |
142 | _EL_OFFSET_STATUS_BUF); | |
143 | ||
144 | ctx_status_ptr.dw = vgpu_vreg(vgpu, ctx_status_ptr_reg); | |
145 | ||
146 | write_pointer = ctx_status_ptr.write_ptr; | |
147 | ||
148 | if (write_pointer == 0x7) | |
149 | write_pointer = 0; | |
150 | else { | |
151 | ++write_pointer; | |
152 | write_pointer %= 0x6; | |
153 | } | |
154 | ||
155 | offset = ctx_status_buf_reg + write_pointer * 8; | |
156 | ||
157 | vgpu_vreg(vgpu, offset) = status->ldw; | |
158 | vgpu_vreg(vgpu, offset + 4) = status->udw; | |
159 | ||
160 | ctx_status_ptr.write_ptr = write_pointer; | |
161 | vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw; | |
162 | ||
163 | gvt_dbg_el("vgpu%d: w pointer %u reg %x csb l %x csb h %x\n", | |
164 | vgpu->id, write_pointer, offset, status->ldw, status->udw); | |
165 | ||
166 | if (trigger_interrupt_later) | |
167 | return; | |
168 | ||
169 | intel_vgpu_trigger_virtual_event(vgpu, | |
170 | ring_id_to_context_switch_event(execlist->ring_id)); | |
171 | } | |
172 | ||
28c4c6ca | 173 | static int emulate_execlist_ctx_schedule_out( |
8453d674 ZW |
174 | struct intel_vgpu_execlist *execlist, |
175 | struct execlist_ctx_descriptor_format *ctx) | |
176 | { | |
695fbc08 | 177 | struct intel_vgpu *vgpu = execlist->vgpu; |
8453d674 ZW |
178 | struct intel_vgpu_execlist_slot *running = execlist->running_slot; |
179 | struct intel_vgpu_execlist_slot *pending = execlist->pending_slot; | |
180 | struct execlist_ctx_descriptor_format *ctx0 = &running->ctx[0]; | |
181 | struct execlist_ctx_descriptor_format *ctx1 = &running->ctx[1]; | |
182 | struct execlist_context_status_format status; | |
183 | ||
184 | memset(&status, 0, sizeof(status)); | |
185 | ||
186 | gvt_dbg_el("schedule out context id %x\n", ctx->context_id); | |
187 | ||
188 | if (WARN_ON(!same_context(ctx, execlist->running_context))) { | |
695fbc08 | 189 | gvt_vgpu_err("schedule out context is not running context," |
8453d674 ZW |
190 | "ctx id %x running ctx id %x\n", |
191 | ctx->context_id, | |
192 | execlist->running_context->context_id); | |
193 | return -EINVAL; | |
194 | } | |
195 | ||
196 | /* ctx1 is valid, ctx0/ctx is scheduled-out -> element switch */ | |
197 | if (valid_context(ctx1) && same_context(ctx0, ctx)) { | |
198 | gvt_dbg_el("ctx 1 valid, ctx/ctx 0 is scheduled-out\n"); | |
199 | ||
200 | execlist->running_context = ctx1; | |
201 | ||
202 | emulate_execlist_status(execlist); | |
203 | ||
204 | status.context_complete = status.element_switch = 1; | |
205 | status.context_id = ctx->context_id; | |
206 | ||
207 | emulate_csb_update(execlist, &status, false); | |
208 | /* | |
209 | * ctx1 is not valid, ctx == ctx0 | |
210 | * ctx1 is valid, ctx1 == ctx | |
211 | * --> last element is finished | |
212 | * emulate: | |
213 | * active-to-idle if there is *no* pending execlist | |
214 | * context-complete if there *is* pending execlist | |
215 | */ | |
216 | } else if ((!valid_context(ctx1) && same_context(ctx0, ctx)) | |
217 | || (valid_context(ctx1) && same_context(ctx1, ctx))) { | |
218 | gvt_dbg_el("need to switch virtual execlist slot\n"); | |
219 | ||
220 | switch_virtual_execlist_slot(execlist); | |
221 | ||
222 | emulate_execlist_status(execlist); | |
223 | ||
224 | status.context_complete = status.active_to_idle = 1; | |
225 | status.context_id = ctx->context_id; | |
226 | ||
227 | if (!pending) { | |
228 | emulate_csb_update(execlist, &status, false); | |
229 | } else { | |
230 | emulate_csb_update(execlist, &status, true); | |
231 | ||
232 | memset(&status, 0, sizeof(status)); | |
233 | ||
234 | status.idle_to_active = 1; | |
235 | status.context_id = 0; | |
236 | ||
237 | emulate_csb_update(execlist, &status, false); | |
238 | } | |
239 | } else { | |
240 | WARN_ON(1); | |
241 | return -EINVAL; | |
242 | } | |
243 | ||
244 | return 0; | |
245 | } | |
246 | ||
247 | static struct intel_vgpu_execlist_slot *get_next_execlist_slot( | |
248 | struct intel_vgpu_execlist *execlist) | |
249 | { | |
250 | struct intel_vgpu *vgpu = execlist->vgpu; | |
251 | int ring_id = execlist->ring_id; | |
252 | u32 status_reg = execlist_ring_mmio(vgpu->gvt, ring_id, | |
253 | _EL_OFFSET_STATUS); | |
254 | struct execlist_status_format status; | |
255 | ||
256 | status.ldw = vgpu_vreg(vgpu, status_reg); | |
257 | status.udw = vgpu_vreg(vgpu, status_reg + 4); | |
258 | ||
259 | if (status.execlist_queue_full) { | |
695fbc08 | 260 | gvt_vgpu_err("virtual execlist slots are full\n"); |
8453d674 ZW |
261 | return NULL; |
262 | } | |
263 | ||
264 | return &execlist->slot[status.execlist_write_pointer]; | |
265 | } | |
266 | ||
28c4c6ca | 267 | static int emulate_execlist_schedule_in(struct intel_vgpu_execlist *execlist, |
8453d674 ZW |
268 | struct execlist_ctx_descriptor_format ctx[2]) |
269 | { | |
270 | struct intel_vgpu_execlist_slot *running = execlist->running_slot; | |
271 | struct intel_vgpu_execlist_slot *slot = | |
272 | get_next_execlist_slot(execlist); | |
273 | ||
274 | struct execlist_ctx_descriptor_format *ctx0, *ctx1; | |
275 | struct execlist_context_status_format status; | |
695fbc08 | 276 | struct intel_vgpu *vgpu = execlist->vgpu; |
8453d674 ZW |
277 | |
278 | gvt_dbg_el("emulate schedule-in\n"); | |
279 | ||
280 | if (!slot) { | |
695fbc08 | 281 | gvt_vgpu_err("no available execlist slot\n"); |
8453d674 ZW |
282 | return -EINVAL; |
283 | } | |
284 | ||
285 | memset(&status, 0, sizeof(status)); | |
286 | memset(slot->ctx, 0, sizeof(slot->ctx)); | |
287 | ||
288 | slot->ctx[0] = ctx[0]; | |
289 | slot->ctx[1] = ctx[1]; | |
290 | ||
291 | gvt_dbg_el("alloc slot index %d ctx 0 %x ctx 1 %x\n", | |
292 | slot->index, ctx[0].context_id, | |
293 | ctx[1].context_id); | |
294 | ||
295 | /* | |
296 | * no running execlist, make this write bundle as running execlist | |
297 | * -> idle-to-active | |
298 | */ | |
299 | if (!running) { | |
300 | gvt_dbg_el("no current running execlist\n"); | |
301 | ||
302 | execlist->running_slot = slot; | |
303 | execlist->pending_slot = NULL; | |
304 | execlist->running_context = &slot->ctx[0]; | |
305 | ||
306 | gvt_dbg_el("running slot index %d running context %x\n", | |
307 | execlist->running_slot->index, | |
308 | execlist->running_context->context_id); | |
309 | ||
310 | emulate_execlist_status(execlist); | |
311 | ||
312 | status.idle_to_active = 1; | |
313 | status.context_id = 0; | |
314 | ||
315 | emulate_csb_update(execlist, &status, false); | |
316 | return 0; | |
317 | } | |
318 | ||
319 | ctx0 = &running->ctx[0]; | |
320 | ctx1 = &running->ctx[1]; | |
321 | ||
322 | gvt_dbg_el("current running slot index %d ctx 0 %x ctx 1 %x\n", | |
323 | running->index, ctx0->context_id, ctx1->context_id); | |
324 | ||
325 | /* | |
326 | * already has an running execlist | |
327 | * a. running ctx1 is valid, | |
328 | * ctx0 is finished, and running ctx1 == new execlist ctx[0] | |
329 | * b. running ctx1 is not valid, | |
330 | * ctx0 == new execlist ctx[0] | |
331 | * ----> lite-restore + preempted | |
332 | */ | |
333 | if ((valid_context(ctx1) && same_context(ctx1, &slot->ctx[0]) && | |
334 | /* condition a */ | |
335 | (!same_context(ctx0, execlist->running_context))) || | |
336 | (!valid_context(ctx1) && | |
337 | same_context(ctx0, &slot->ctx[0]))) { /* condition b */ | |
338 | gvt_dbg_el("need to switch virtual execlist slot\n"); | |
339 | ||
340 | execlist->pending_slot = slot; | |
341 | switch_virtual_execlist_slot(execlist); | |
342 | ||
343 | emulate_execlist_status(execlist); | |
344 | ||
345 | status.lite_restore = status.preempted = 1; | |
346 | status.context_id = ctx[0].context_id; | |
347 | ||
348 | emulate_csb_update(execlist, &status, false); | |
349 | } else { | |
350 | gvt_dbg_el("emulate as pending slot\n"); | |
351 | /* | |
352 | * otherwise | |
353 | * --> emulate pending execlist exist + but no preemption case | |
354 | */ | |
355 | execlist->pending_slot = slot; | |
356 | emulate_execlist_status(execlist); | |
357 | } | |
358 | return 0; | |
359 | } | |
360 | ||
28c4c6ca ZW |
361 | static void free_workload(struct intel_vgpu_workload *workload) |
362 | { | |
363 | intel_vgpu_unpin_mm(workload->shadow_mm); | |
364 | intel_gvt_mm_unreference(workload->shadow_mm); | |
365 | kmem_cache_free(workload->vgpu->workloads, workload); | |
366 | } | |
367 | ||
368 | #define get_desc_from_elsp_dwords(ed, i) \ | |
369 | ((struct execlist_ctx_descriptor_format *)&((ed)->data[i * 2])) | |
370 | ||
be1da707 ZW |
371 | static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload) |
372 | { | |
62f0a11e CW |
373 | const int gmadr_bytes = workload->vgpu->gvt->device_info.gmadr_bytes_in_cmd; |
374 | struct intel_shadow_bb_entry *entry_obj; | |
be1da707 ZW |
375 | |
376 | /* pin the gem object to ggtt */ | |
62f0a11e CW |
377 | list_for_each_entry(entry_obj, &workload->shadow_bb, list) { |
378 | struct i915_vma *vma; | |
be1da707 | 379 | |
62f0a11e CW |
380 | vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0, 4, 0); |
381 | if (IS_ERR(vma)) { | |
62f0a11e | 382 | return; |
be1da707 | 383 | } |
62f0a11e CW |
384 | |
385 | /* FIXME: we are not tracking our pinned VMA leaving it | |
386 | * up to the core to fix up the stray pin_count upon | |
387 | * free. | |
388 | */ | |
389 | ||
390 | /* update the relocate gma with shadow batch buffer*/ | |
391 | entry_obj->bb_start_cmd_va[1] = i915_ggtt_offset(vma); | |
392 | if (gmadr_bytes == 8) | |
393 | entry_obj->bb_start_cmd_va[2] = 0; | |
be1da707 ZW |
394 | } |
395 | } | |
396 | ||
397 | static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx) | |
398 | { | |
c10c1255 TZ |
399 | struct intel_vgpu_workload *workload = container_of(wa_ctx, |
400 | struct intel_vgpu_workload, | |
401 | wa_ctx); | |
402 | int ring_id = workload->ring_id; | |
403 | struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx; | |
be1da707 ZW |
404 | struct drm_i915_gem_object *ctx_obj = |
405 | shadow_ctx->engine[ring_id].state->obj; | |
406 | struct execlist_ring_context *shadow_ring_context; | |
407 | struct page *page; | |
408 | ||
409 | page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN); | |
410 | shadow_ring_context = kmap_atomic(page); | |
411 | ||
412 | shadow_ring_context->bb_per_ctx_ptr.val = | |
413 | (shadow_ring_context->bb_per_ctx_ptr.val & | |
414 | (~PER_CTX_ADDR_MASK)) | wa_ctx->per_ctx.shadow_gma; | |
415 | shadow_ring_context->rcs_indirect_ctx.val = | |
416 | (shadow_ring_context->rcs_indirect_ctx.val & | |
417 | (~INDIRECT_CTX_ADDR_MASK)) | wa_ctx->indirect_ctx.shadow_gma; | |
418 | ||
419 | kunmap_atomic(shadow_ring_context); | |
420 | return 0; | |
421 | } | |
422 | ||
423 | static void prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) | |
424 | { | |
425 | struct i915_vma *vma; | |
be1da707 ZW |
426 | unsigned char *per_ctx_va = |
427 | (unsigned char *)wa_ctx->indirect_ctx.shadow_va + | |
428 | wa_ctx->indirect_ctx.size; | |
429 | ||
430 | if (wa_ctx->indirect_ctx.size == 0) | |
431 | return; | |
432 | ||
b6d89142 CW |
433 | vma = i915_gem_object_ggtt_pin(wa_ctx->indirect_ctx.obj, NULL, |
434 | 0, CACHELINE_BYTES, 0); | |
be1da707 | 435 | if (IS_ERR(vma)) { |
be1da707 ZW |
436 | return; |
437 | } | |
eeacd86e CW |
438 | |
439 | /* FIXME: we are not tracking our pinned VMA leaving it | |
440 | * up to the core to fix up the stray pin_count upon | |
441 | * free. | |
442 | */ | |
be1da707 | 443 | |
b6d89142 | 444 | wa_ctx->indirect_ctx.shadow_gma = i915_ggtt_offset(vma); |
be1da707 ZW |
445 | |
446 | wa_ctx->per_ctx.shadow_gma = *((unsigned int *)per_ctx_va + 1); | |
447 | memset(per_ctx_va, 0, CACHELINE_BYTES); | |
448 | ||
449 | update_wa_ctx_2_shadow_ctx(wa_ctx); | |
450 | } | |
451 | ||
28c4c6ca ZW |
452 | static int prepare_execlist_workload(struct intel_vgpu_workload *workload) |
453 | { | |
454 | struct intel_vgpu *vgpu = workload->vgpu; | |
455 | struct execlist_ctx_descriptor_format ctx[2]; | |
456 | int ring_id = workload->ring_id; | |
457 | ||
458 | intel_vgpu_pin_mm(workload->shadow_mm); | |
459 | intel_vgpu_sync_oos_pages(workload->vgpu); | |
460 | intel_vgpu_flush_post_shadow(workload->vgpu); | |
be1da707 ZW |
461 | prepare_shadow_batch_buffer(workload); |
462 | prepare_shadow_wa_ctx(&workload->wa_ctx); | |
28c4c6ca ZW |
463 | if (!workload->emulate_schedule_in) |
464 | return 0; | |
465 | ||
466 | ctx[0] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 1); | |
467 | ctx[1] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 0); | |
468 | ||
469 | return emulate_execlist_schedule_in(&vgpu->execlist[ring_id], ctx); | |
470 | } | |
471 | ||
be1da707 ZW |
472 | static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload) |
473 | { | |
474 | /* release all the shadow batch buffer */ | |
475 | if (!list_empty(&workload->shadow_bb)) { | |
476 | struct intel_shadow_bb_entry *entry_obj = | |
477 | list_first_entry(&workload->shadow_bb, | |
478 | struct intel_shadow_bb_entry, | |
479 | list); | |
480 | struct intel_shadow_bb_entry *temp; | |
481 | ||
482 | list_for_each_entry_safe(entry_obj, temp, &workload->shadow_bb, | |
483 | list) { | |
a2861504 | 484 | i915_gem_object_unpin_map(entry_obj->obj); |
bbc36933 | 485 | i915_gem_object_put(entry_obj->obj); |
be1da707 ZW |
486 | list_del(&entry_obj->list); |
487 | kfree(entry_obj); | |
488 | } | |
489 | } | |
490 | } | |
491 | ||
492 | static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) | |
493 | { | |
7e5f3d30 | 494 | if (!wa_ctx->indirect_ctx.obj) |
be1da707 ZW |
495 | return; |
496 | ||
bcd0aede | 497 | i915_gem_object_unpin_map(wa_ctx->indirect_ctx.obj); |
bbc36933 | 498 | i915_gem_object_put(wa_ctx->indirect_ctx.obj); |
be1da707 ZW |
499 | } |
500 | ||
28c4c6ca ZW |
501 | static int complete_execlist_workload(struct intel_vgpu_workload *workload) |
502 | { | |
503 | struct intel_vgpu *vgpu = workload->vgpu; | |
6184cc8d CD |
504 | int ring_id = workload->ring_id; |
505 | struct intel_vgpu_execlist *execlist = &vgpu->execlist[ring_id]; | |
28c4c6ca | 506 | struct intel_vgpu_workload *next_workload; |
6184cc8d | 507 | struct list_head *next = workload_q_head(vgpu, ring_id)->next; |
28c4c6ca ZW |
508 | bool lite_restore = false; |
509 | int ret; | |
510 | ||
511 | gvt_dbg_el("complete workload %p status %d\n", workload, | |
512 | workload->status); | |
513 | ||
be1da707 ZW |
514 | release_shadow_batch_buffer(workload); |
515 | release_shadow_wa_ctx(&workload->wa_ctx); | |
516 | ||
f2e2c00a CD |
517 | if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) { |
518 | /* if workload->status is not successful means HW GPU | |
519 | * has occurred GPU hang or something wrong with i915/GVT, | |
520 | * and GVT won't inject context switch interrupt to guest. | |
521 | * So this error is a vGPU hang actually to the guest. | |
522 | * According to this we should emunlate a vGPU hang. If | |
523 | * there are pending workloads which are already submitted | |
524 | * from guest, we should clean them up like HW GPU does. | |
525 | * | |
526 | * if it is in middle of engine resetting, the pending | |
527 | * workloads won't be submitted to HW GPU and will be | |
528 | * cleaned up during the resetting process later, so doing | |
529 | * the workload clean up here doesn't have any impact. | |
530 | **/ | |
531 | clean_workloads(vgpu, ENGINE_MASK(ring_id)); | |
28c4c6ca | 532 | goto out; |
f2e2c00a | 533 | } |
28c4c6ca | 534 | |
6184cc8d | 535 | if (!list_empty(workload_q_head(vgpu, ring_id))) { |
28c4c6ca ZW |
536 | struct execlist_ctx_descriptor_format *this_desc, *next_desc; |
537 | ||
538 | next_workload = container_of(next, | |
539 | struct intel_vgpu_workload, list); | |
540 | this_desc = &workload->ctx_desc; | |
541 | next_desc = &next_workload->ctx_desc; | |
542 | ||
543 | lite_restore = same_context(this_desc, next_desc); | |
544 | } | |
545 | ||
546 | if (lite_restore) { | |
547 | gvt_dbg_el("next context == current - no schedule-out\n"); | |
548 | free_workload(workload); | |
549 | return 0; | |
550 | } | |
551 | ||
552 | ret = emulate_execlist_ctx_schedule_out(execlist, &workload->ctx_desc); | |
553 | if (ret) | |
554 | goto err; | |
555 | out: | |
556 | free_workload(workload); | |
557 | return 0; | |
558 | err: | |
559 | free_workload(workload); | |
560 | return ret; | |
561 | } | |
562 | ||
563 | #define RING_CTX_OFF(x) \ | |
564 | offsetof(struct execlist_ring_context, x) | |
565 | ||
566 | static void read_guest_pdps(struct intel_vgpu *vgpu, | |
567 | u64 ring_context_gpa, u32 pdp[8]) | |
568 | { | |
569 | u64 gpa; | |
570 | int i; | |
571 | ||
572 | gpa = ring_context_gpa + RING_CTX_OFF(pdp3_UDW.val); | |
573 | ||
574 | for (i = 0; i < 8; i++) | |
575 | intel_gvt_hypervisor_read_gpa(vgpu, | |
576 | gpa + i * 8, &pdp[7 - i], 4); | |
577 | } | |
578 | ||
579 | static int prepare_mm(struct intel_vgpu_workload *workload) | |
580 | { | |
581 | struct execlist_ctx_descriptor_format *desc = &workload->ctx_desc; | |
582 | struct intel_vgpu_mm *mm; | |
695fbc08 | 583 | struct intel_vgpu *vgpu = workload->vgpu; |
28c4c6ca ZW |
584 | int page_table_level; |
585 | u32 pdp[8]; | |
586 | ||
587 | if (desc->addressing_mode == 1) { /* legacy 32-bit */ | |
588 | page_table_level = 3; | |
589 | } else if (desc->addressing_mode == 3) { /* legacy 64 bit */ | |
590 | page_table_level = 4; | |
591 | } else { | |
695fbc08 | 592 | gvt_vgpu_err("Advanced Context mode(SVM) is not supported!\n"); |
28c4c6ca ZW |
593 | return -EINVAL; |
594 | } | |
595 | ||
596 | read_guest_pdps(workload->vgpu, workload->ring_context_gpa, pdp); | |
597 | ||
598 | mm = intel_vgpu_find_ppgtt_mm(workload->vgpu, page_table_level, pdp); | |
599 | if (mm) { | |
600 | intel_gvt_mm_reference(mm); | |
601 | } else { | |
602 | ||
603 | mm = intel_vgpu_create_mm(workload->vgpu, INTEL_GVT_MM_PPGTT, | |
604 | pdp, page_table_level, 0); | |
605 | if (IS_ERR(mm)) { | |
695fbc08 | 606 | gvt_vgpu_err("fail to create mm object.\n"); |
28c4c6ca ZW |
607 | return PTR_ERR(mm); |
608 | } | |
609 | } | |
610 | workload->shadow_mm = mm; | |
611 | return 0; | |
612 | } | |
613 | ||
614 | #define get_last_workload(q) \ | |
615 | (list_empty(q) ? NULL : container_of(q->prev, \ | |
616 | struct intel_vgpu_workload, list)) | |
617 | ||
76a79d59 | 618 | static int submit_context(struct intel_vgpu *vgpu, int ring_id, |
28c4c6ca ZW |
619 | struct execlist_ctx_descriptor_format *desc, |
620 | bool emulate_schedule_in) | |
621 | { | |
622 | struct list_head *q = workload_q_head(vgpu, ring_id); | |
623 | struct intel_vgpu_workload *last_workload = get_last_workload(q); | |
624 | struct intel_vgpu_workload *workload = NULL; | |
625 | u64 ring_context_gpa; | |
be1da707 | 626 | u32 head, tail, start, ctl, ctx_ctl, per_ctx, indirect_ctx; |
28c4c6ca ZW |
627 | int ret; |
628 | ||
629 | ring_context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, | |
630 | (u32)((desc->lrca + 1) << GTT_PAGE_SHIFT)); | |
631 | if (ring_context_gpa == INTEL_GVT_INVALID_ADDR) { | |
695fbc08 | 632 | gvt_vgpu_err("invalid guest context LRCA: %x\n", desc->lrca); |
28c4c6ca ZW |
633 | return -EINVAL; |
634 | } | |
635 | ||
636 | intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa + | |
637 | RING_CTX_OFF(ring_header.val), &head, 4); | |
638 | ||
639 | intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa + | |
640 | RING_CTX_OFF(ring_tail.val), &tail, 4); | |
641 | ||
642 | head &= RB_HEAD_OFF_MASK; | |
643 | tail &= RB_TAIL_OFF_MASK; | |
644 | ||
645 | if (last_workload && same_context(&last_workload->ctx_desc, desc)) { | |
646 | gvt_dbg_el("ring id %d cur workload == last\n", ring_id); | |
647 | gvt_dbg_el("ctx head %x real head %lx\n", head, | |
648 | last_workload->rb_tail); | |
649 | /* | |
650 | * cannot use guest context head pointer here, | |
651 | * as it might not be updated at this time | |
652 | */ | |
653 | head = last_workload->rb_tail; | |
654 | } | |
655 | ||
656 | gvt_dbg_el("ring id %d begin a new workload\n", ring_id); | |
657 | ||
658 | workload = kmem_cache_zalloc(vgpu->workloads, GFP_KERNEL); | |
659 | if (!workload) | |
660 | return -ENOMEM; | |
661 | ||
662 | /* record some ring buffer register values for scan and shadow */ | |
663 | intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa + | |
664 | RING_CTX_OFF(rb_start.val), &start, 4); | |
665 | intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa + | |
666 | RING_CTX_OFF(rb_ctrl.val), &ctl, 4); | |
667 | intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa + | |
668 | RING_CTX_OFF(ctx_ctrl.val), &ctx_ctl, 4); | |
669 | ||
670 | INIT_LIST_HEAD(&workload->list); | |
be1da707 | 671 | INIT_LIST_HEAD(&workload->shadow_bb); |
28c4c6ca ZW |
672 | |
673 | init_waitqueue_head(&workload->shadow_ctx_status_wq); | |
674 | atomic_set(&workload->shadow_ctx_active, 0); | |
675 | ||
676 | workload->vgpu = vgpu; | |
677 | workload->ring_id = ring_id; | |
678 | workload->ctx_desc = *desc; | |
679 | workload->ring_context_gpa = ring_context_gpa; | |
680 | workload->rb_head = head; | |
681 | workload->rb_tail = tail; | |
682 | workload->rb_start = start; | |
683 | workload->rb_ctl = ctl; | |
684 | workload->prepare = prepare_execlist_workload; | |
685 | workload->complete = complete_execlist_workload; | |
686 | workload->status = -EINPROGRESS; | |
687 | workload->emulate_schedule_in = emulate_schedule_in; | |
688 | ||
be1da707 ZW |
689 | if (ring_id == RCS) { |
690 | intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa + | |
691 | RING_CTX_OFF(bb_per_ctx_ptr.val), &per_ctx, 4); | |
692 | intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa + | |
693 | RING_CTX_OFF(rcs_indirect_ctx.val), &indirect_ctx, 4); | |
694 | ||
695 | workload->wa_ctx.indirect_ctx.guest_gma = | |
696 | indirect_ctx & INDIRECT_CTX_ADDR_MASK; | |
697 | workload->wa_ctx.indirect_ctx.size = | |
698 | (indirect_ctx & INDIRECT_CTX_SIZE_MASK) * | |
699 | CACHELINE_BYTES; | |
700 | workload->wa_ctx.per_ctx.guest_gma = | |
701 | per_ctx & PER_CTX_ADDR_MASK; | |
be1da707 ZW |
702 | |
703 | WARN_ON(workload->wa_ctx.indirect_ctx.size && !(per_ctx & 0x1)); | |
704 | } | |
705 | ||
28c4c6ca | 706 | if (emulate_schedule_in) |
fd3bd0a9 | 707 | workload->elsp_dwords = vgpu->execlist[ring_id].elsp_dwords; |
28c4c6ca ZW |
708 | |
709 | gvt_dbg_el("workload %p ring id %d head %x tail %x start %x ctl %x\n", | |
710 | workload, ring_id, head, tail, start, ctl); | |
711 | ||
712 | gvt_dbg_el("workload %p emulate schedule_in %d\n", workload, | |
713 | emulate_schedule_in); | |
714 | ||
715 | ret = prepare_mm(workload); | |
716 | if (ret) { | |
717 | kmem_cache_free(vgpu->workloads, workload); | |
718 | return ret; | |
719 | } | |
720 | ||
721 | queue_workload(workload); | |
722 | return 0; | |
723 | } | |
724 | ||
725 | int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id) | |
726 | { | |
727 | struct intel_vgpu_execlist *execlist = &vgpu->execlist[ring_id]; | |
5d0f5de1 CD |
728 | struct execlist_ctx_descriptor_format desc[2]; |
729 | int i, ret; | |
28c4c6ca | 730 | |
5d0f5de1 CD |
731 | desc[0] = *get_desc_from_elsp_dwords(&execlist->elsp_dwords, 1); |
732 | desc[1] = *get_desc_from_elsp_dwords(&execlist->elsp_dwords, 0); | |
28c4c6ca | 733 | |
5d0f5de1 CD |
734 | if (!desc[0].valid) { |
735 | gvt_vgpu_err("invalid elsp submission, desc0 is invalid\n"); | |
736 | goto inv_desc; | |
737 | } | |
28c4c6ca | 738 | |
5d0f5de1 CD |
739 | for (i = 0; i < ARRAY_SIZE(desc); i++) { |
740 | if (!desc[i].valid) | |
28c4c6ca | 741 | continue; |
5d0f5de1 | 742 | if (!desc[i].privilege_access) { |
695fbc08 | 743 | gvt_vgpu_err("unexpected GGTT elsp submission\n"); |
5d0f5de1 | 744 | goto inv_desc; |
28c4c6ca | 745 | } |
28c4c6ca ZW |
746 | } |
747 | ||
748 | /* submit workload */ | |
5d0f5de1 CD |
749 | for (i = 0; i < ARRAY_SIZE(desc); i++) { |
750 | if (!desc[i].valid) | |
751 | continue; | |
752 | ret = submit_context(vgpu, ring_id, &desc[i], i == 0); | |
28c4c6ca | 753 | if (ret) { |
5d0f5de1 | 754 | gvt_vgpu_err("failed to submit desc %d\n", i); |
28c4c6ca ZW |
755 | return ret; |
756 | } | |
28c4c6ca | 757 | } |
5d0f5de1 | 758 | |
28c4c6ca | 759 | return 0; |
5d0f5de1 CD |
760 | |
761 | inv_desc: | |
762 | gvt_vgpu_err("descriptors content: desc0 %08x %08x desc1 %08x %08x\n", | |
763 | desc[0].udw, desc[0].ldw, desc[1].udw, desc[1].ldw); | |
764 | return -EINVAL; | |
28c4c6ca ZW |
765 | } |
766 | ||
8453d674 ZW |
767 | static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id) |
768 | { | |
769 | struct intel_vgpu_execlist *execlist = &vgpu->execlist[ring_id]; | |
770 | struct execlist_context_status_pointer_format ctx_status_ptr; | |
771 | u32 ctx_status_ptr_reg; | |
772 | ||
773 | memset(execlist, 0, sizeof(*execlist)); | |
774 | ||
775 | execlist->vgpu = vgpu; | |
776 | execlist->ring_id = ring_id; | |
777 | execlist->slot[0].index = 0; | |
778 | execlist->slot[1].index = 1; | |
779 | ||
780 | ctx_status_ptr_reg = execlist_ring_mmio(vgpu->gvt, ring_id, | |
781 | _EL_OFFSET_STATUS_PTR); | |
782 | ||
783 | ctx_status_ptr.dw = vgpu_vreg(vgpu, ctx_status_ptr_reg); | |
a34f8363 MH |
784 | ctx_status_ptr.read_ptr = 0; |
785 | ctx_status_ptr.write_ptr = 0x7; | |
8453d674 ZW |
786 | vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw; |
787 | } | |
788 | ||
e274086e CD |
789 | static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask) |
790 | { | |
791 | struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; | |
792 | struct intel_engine_cs *engine; | |
793 | struct intel_vgpu_workload *pos, *n; | |
794 | unsigned int tmp; | |
795 | ||
796 | /* free the unsubmited workloads in the queues. */ | |
797 | for_each_engine_masked(engine, dev_priv, engine_mask, tmp) { | |
798 | list_for_each_entry_safe(pos, n, | |
799 | &vgpu->workload_q_head[engine->id], list) { | |
800 | list_del_init(&pos->list); | |
801 | free_workload(pos); | |
802 | } | |
803 | } | |
804 | } | |
805 | ||
28c4c6ca ZW |
806 | void intel_vgpu_clean_execlist(struct intel_vgpu *vgpu) |
807 | { | |
e274086e | 808 | clean_workloads(vgpu, ALL_ENGINES); |
28c4c6ca ZW |
809 | kmem_cache_destroy(vgpu->workloads); |
810 | } | |
811 | ||
8453d674 ZW |
812 | int intel_vgpu_init_execlist(struct intel_vgpu *vgpu) |
813 | { | |
0fac21e7 ZW |
814 | enum intel_engine_id i; |
815 | struct intel_engine_cs *engine; | |
8453d674 ZW |
816 | |
817 | /* each ring has a virtual execlist engine */ | |
0fac21e7 | 818 | for_each_engine(engine, vgpu->gvt->dev_priv, i) { |
8453d674 | 819 | init_vgpu_execlist(vgpu, i); |
28c4c6ca ZW |
820 | INIT_LIST_HEAD(&vgpu->workload_q_head[i]); |
821 | } | |
822 | ||
7283accf | 823 | vgpu->workloads = kmem_cache_create("gvt-g_vgpu_workload", |
28c4c6ca ZW |
824 | sizeof(struct intel_vgpu_workload), 0, |
825 | SLAB_HWCACHE_ALIGN, | |
826 | NULL); | |
827 | ||
828 | if (!vgpu->workloads) | |
829 | return -ENOMEM; | |
8453d674 ZW |
830 | |
831 | return 0; | |
832 | } | |
e4734057 ZW |
833 | |
834 | void intel_vgpu_reset_execlist(struct intel_vgpu *vgpu, | |
0427f06a | 835 | unsigned long engine_mask) |
e4734057 | 836 | { |
0427f06a CD |
837 | struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; |
838 | struct intel_engine_cs *engine; | |
0427f06a | 839 | unsigned int tmp; |
e4734057 | 840 | |
e274086e CD |
841 | clean_workloads(vgpu, engine_mask); |
842 | for_each_engine_masked(engine, dev_priv, engine_mask, tmp) | |
0427f06a | 843 | init_vgpu_execlist(vgpu, engine->id); |
e4734057 | 844 | } |