]>
Commit | Line | Data |
---|---|---|
8453d674 ZW |
1 | /* |
2 | * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice (including the next | |
12 | * paragraph) shall be included in all copies or substantial portions of the | |
13 | * Software. | |
14 | * | |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
20 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
21 | * SOFTWARE. | |
22 | * | |
23 | * Authors: | |
24 | * Zhiyuan Lv <zhiyuan.lv@intel.com> | |
25 | * Zhi Wang <zhi.a.wang@intel.com> | |
26 | * | |
27 | * Contributors: | |
28 | * Min He <min.he@intel.com> | |
29 | * Bing Niu <bing.niu@intel.com> | |
30 | * Ping Gao <ping.a.gao@intel.com> | |
31 | * Tina Zhang <tina.zhang@intel.com> | |
32 | * | |
33 | */ | |
34 | ||
35 | #include "i915_drv.h" | |
36 | ||
37 | #define _EL_OFFSET_STATUS 0x234 | |
38 | #define _EL_OFFSET_STATUS_BUF 0x370 | |
39 | #define _EL_OFFSET_STATUS_PTR 0x3A0 | |
40 | ||
41 | #define execlist_ring_mmio(gvt, ring_id, offset) \ | |
42 | (gvt->dev_priv->engine[ring_id].mmio_base + (offset)) | |
43 | ||
44 | #define valid_context(ctx) ((ctx)->valid) | |
45 | #define same_context(a, b) (((a)->context_id == (b)->context_id) && \ | |
46 | ((a)->lrca == (b)->lrca)) | |
47 | ||
48 | static int context_switch_events[] = { | |
49 | [RCS] = RCS_AS_CONTEXT_SWITCH, | |
50 | [BCS] = BCS_AS_CONTEXT_SWITCH, | |
51 | [VCS] = VCS_AS_CONTEXT_SWITCH, | |
52 | [VCS2] = VCS2_AS_CONTEXT_SWITCH, | |
53 | [VECS] = VECS_AS_CONTEXT_SWITCH, | |
54 | }; | |
55 | ||
56 | static int ring_id_to_context_switch_event(int ring_id) | |
57 | { | |
58 | if (WARN_ON(ring_id < RCS && ring_id > | |
59 | ARRAY_SIZE(context_switch_events))) | |
60 | return -EINVAL; | |
61 | ||
62 | return context_switch_events[ring_id]; | |
63 | } | |
64 | ||
65 | static void switch_virtual_execlist_slot(struct intel_vgpu_execlist *execlist) | |
66 | { | |
67 | gvt_dbg_el("[before] running slot %d/context %x pending slot %d\n", | |
68 | execlist->running_slot ? | |
69 | execlist->running_slot->index : -1, | |
70 | execlist->running_context ? | |
71 | execlist->running_context->context_id : 0, | |
72 | execlist->pending_slot ? | |
73 | execlist->pending_slot->index : -1); | |
74 | ||
75 | execlist->running_slot = execlist->pending_slot; | |
76 | execlist->pending_slot = NULL; | |
77 | execlist->running_context = execlist->running_context ? | |
78 | &execlist->running_slot->ctx[0] : NULL; | |
79 | ||
80 | gvt_dbg_el("[after] running slot %d/context %x pending slot %d\n", | |
81 | execlist->running_slot ? | |
82 | execlist->running_slot->index : -1, | |
83 | execlist->running_context ? | |
84 | execlist->running_context->context_id : 0, | |
85 | execlist->pending_slot ? | |
86 | execlist->pending_slot->index : -1); | |
87 | } | |
88 | ||
89 | static void emulate_execlist_status(struct intel_vgpu_execlist *execlist) | |
90 | { | |
91 | struct intel_vgpu_execlist_slot *running = execlist->running_slot; | |
92 | struct intel_vgpu_execlist_slot *pending = execlist->pending_slot; | |
93 | struct execlist_ctx_descriptor_format *desc = execlist->running_context; | |
94 | struct intel_vgpu *vgpu = execlist->vgpu; | |
95 | struct execlist_status_format status; | |
96 | int ring_id = execlist->ring_id; | |
97 | u32 status_reg = execlist_ring_mmio(vgpu->gvt, | |
98 | ring_id, _EL_OFFSET_STATUS); | |
99 | ||
100 | status.ldw = vgpu_vreg(vgpu, status_reg); | |
101 | status.udw = vgpu_vreg(vgpu, status_reg + 4); | |
102 | ||
103 | if (running) { | |
104 | status.current_execlist_pointer = !!running->index; | |
105 | status.execlist_write_pointer = !!!running->index; | |
106 | status.execlist_0_active = status.execlist_0_valid = | |
107 | !!!(running->index); | |
108 | status.execlist_1_active = status.execlist_1_valid = | |
109 | !!(running->index); | |
110 | } else { | |
111 | status.context_id = 0; | |
112 | status.execlist_0_active = status.execlist_0_valid = 0; | |
113 | status.execlist_1_active = status.execlist_1_valid = 0; | |
114 | } | |
115 | ||
116 | status.context_id = desc ? desc->context_id : 0; | |
117 | status.execlist_queue_full = !!(pending); | |
118 | ||
119 | vgpu_vreg(vgpu, status_reg) = status.ldw; | |
120 | vgpu_vreg(vgpu, status_reg + 4) = status.udw; | |
121 | ||
122 | gvt_dbg_el("vgpu%d: status reg offset %x ldw %x udw %x\n", | |
123 | vgpu->id, status_reg, status.ldw, status.udw); | |
124 | } | |
125 | ||
126 | static void emulate_csb_update(struct intel_vgpu_execlist *execlist, | |
127 | struct execlist_context_status_format *status, | |
128 | bool trigger_interrupt_later) | |
129 | { | |
130 | struct intel_vgpu *vgpu = execlist->vgpu; | |
131 | int ring_id = execlist->ring_id; | |
132 | struct execlist_context_status_pointer_format ctx_status_ptr; | |
133 | u32 write_pointer; | |
134 | u32 ctx_status_ptr_reg, ctx_status_buf_reg, offset; | |
135 | ||
136 | ctx_status_ptr_reg = execlist_ring_mmio(vgpu->gvt, ring_id, | |
137 | _EL_OFFSET_STATUS_PTR); | |
138 | ctx_status_buf_reg = execlist_ring_mmio(vgpu->gvt, ring_id, | |
139 | _EL_OFFSET_STATUS_BUF); | |
140 | ||
141 | ctx_status_ptr.dw = vgpu_vreg(vgpu, ctx_status_ptr_reg); | |
142 | ||
143 | write_pointer = ctx_status_ptr.write_ptr; | |
144 | ||
145 | if (write_pointer == 0x7) | |
146 | write_pointer = 0; | |
147 | else { | |
148 | ++write_pointer; | |
149 | write_pointer %= 0x6; | |
150 | } | |
151 | ||
152 | offset = ctx_status_buf_reg + write_pointer * 8; | |
153 | ||
154 | vgpu_vreg(vgpu, offset) = status->ldw; | |
155 | vgpu_vreg(vgpu, offset + 4) = status->udw; | |
156 | ||
157 | ctx_status_ptr.write_ptr = write_pointer; | |
158 | vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw; | |
159 | ||
160 | gvt_dbg_el("vgpu%d: w pointer %u reg %x csb l %x csb h %x\n", | |
161 | vgpu->id, write_pointer, offset, status->ldw, status->udw); | |
162 | ||
163 | if (trigger_interrupt_later) | |
164 | return; | |
165 | ||
166 | intel_vgpu_trigger_virtual_event(vgpu, | |
167 | ring_id_to_context_switch_event(execlist->ring_id)); | |
168 | } | |
169 | ||
28c4c6ca | 170 | static int emulate_execlist_ctx_schedule_out( |
8453d674 ZW |
171 | struct intel_vgpu_execlist *execlist, |
172 | struct execlist_ctx_descriptor_format *ctx) | |
173 | { | |
174 | struct intel_vgpu_execlist_slot *running = execlist->running_slot; | |
175 | struct intel_vgpu_execlist_slot *pending = execlist->pending_slot; | |
176 | struct execlist_ctx_descriptor_format *ctx0 = &running->ctx[0]; | |
177 | struct execlist_ctx_descriptor_format *ctx1 = &running->ctx[1]; | |
178 | struct execlist_context_status_format status; | |
179 | ||
180 | memset(&status, 0, sizeof(status)); | |
181 | ||
182 | gvt_dbg_el("schedule out context id %x\n", ctx->context_id); | |
183 | ||
184 | if (WARN_ON(!same_context(ctx, execlist->running_context))) { | |
185 | gvt_err("schedule out context is not running context," | |
186 | "ctx id %x running ctx id %x\n", | |
187 | ctx->context_id, | |
188 | execlist->running_context->context_id); | |
189 | return -EINVAL; | |
190 | } | |
191 | ||
192 | /* ctx1 is valid, ctx0/ctx is scheduled-out -> element switch */ | |
193 | if (valid_context(ctx1) && same_context(ctx0, ctx)) { | |
194 | gvt_dbg_el("ctx 1 valid, ctx/ctx 0 is scheduled-out\n"); | |
195 | ||
196 | execlist->running_context = ctx1; | |
197 | ||
198 | emulate_execlist_status(execlist); | |
199 | ||
200 | status.context_complete = status.element_switch = 1; | |
201 | status.context_id = ctx->context_id; | |
202 | ||
203 | emulate_csb_update(execlist, &status, false); | |
204 | /* | |
205 | * ctx1 is not valid, ctx == ctx0 | |
206 | * ctx1 is valid, ctx1 == ctx | |
207 | * --> last element is finished | |
208 | * emulate: | |
209 | * active-to-idle if there is *no* pending execlist | |
210 | * context-complete if there *is* pending execlist | |
211 | */ | |
212 | } else if ((!valid_context(ctx1) && same_context(ctx0, ctx)) | |
213 | || (valid_context(ctx1) && same_context(ctx1, ctx))) { | |
214 | gvt_dbg_el("need to switch virtual execlist slot\n"); | |
215 | ||
216 | switch_virtual_execlist_slot(execlist); | |
217 | ||
218 | emulate_execlist_status(execlist); | |
219 | ||
220 | status.context_complete = status.active_to_idle = 1; | |
221 | status.context_id = ctx->context_id; | |
222 | ||
223 | if (!pending) { | |
224 | emulate_csb_update(execlist, &status, false); | |
225 | } else { | |
226 | emulate_csb_update(execlist, &status, true); | |
227 | ||
228 | memset(&status, 0, sizeof(status)); | |
229 | ||
230 | status.idle_to_active = 1; | |
231 | status.context_id = 0; | |
232 | ||
233 | emulate_csb_update(execlist, &status, false); | |
234 | } | |
235 | } else { | |
236 | WARN_ON(1); | |
237 | return -EINVAL; | |
238 | } | |
239 | ||
240 | return 0; | |
241 | } | |
242 | ||
243 | static struct intel_vgpu_execlist_slot *get_next_execlist_slot( | |
244 | struct intel_vgpu_execlist *execlist) | |
245 | { | |
246 | struct intel_vgpu *vgpu = execlist->vgpu; | |
247 | int ring_id = execlist->ring_id; | |
248 | u32 status_reg = execlist_ring_mmio(vgpu->gvt, ring_id, | |
249 | _EL_OFFSET_STATUS); | |
250 | struct execlist_status_format status; | |
251 | ||
252 | status.ldw = vgpu_vreg(vgpu, status_reg); | |
253 | status.udw = vgpu_vreg(vgpu, status_reg + 4); | |
254 | ||
255 | if (status.execlist_queue_full) { | |
256 | gvt_err("virtual execlist slots are full\n"); | |
257 | return NULL; | |
258 | } | |
259 | ||
260 | return &execlist->slot[status.execlist_write_pointer]; | |
261 | } | |
262 | ||
28c4c6ca | 263 | static int emulate_execlist_schedule_in(struct intel_vgpu_execlist *execlist, |
8453d674 ZW |
264 | struct execlist_ctx_descriptor_format ctx[2]) |
265 | { | |
266 | struct intel_vgpu_execlist_slot *running = execlist->running_slot; | |
267 | struct intel_vgpu_execlist_slot *slot = | |
268 | get_next_execlist_slot(execlist); | |
269 | ||
270 | struct execlist_ctx_descriptor_format *ctx0, *ctx1; | |
271 | struct execlist_context_status_format status; | |
272 | ||
273 | gvt_dbg_el("emulate schedule-in\n"); | |
274 | ||
275 | if (!slot) { | |
276 | gvt_err("no available execlist slot\n"); | |
277 | return -EINVAL; | |
278 | } | |
279 | ||
280 | memset(&status, 0, sizeof(status)); | |
281 | memset(slot->ctx, 0, sizeof(slot->ctx)); | |
282 | ||
283 | slot->ctx[0] = ctx[0]; | |
284 | slot->ctx[1] = ctx[1]; | |
285 | ||
286 | gvt_dbg_el("alloc slot index %d ctx 0 %x ctx 1 %x\n", | |
287 | slot->index, ctx[0].context_id, | |
288 | ctx[1].context_id); | |
289 | ||
290 | /* | |
291 | * no running execlist, make this write bundle as running execlist | |
292 | * -> idle-to-active | |
293 | */ | |
294 | if (!running) { | |
295 | gvt_dbg_el("no current running execlist\n"); | |
296 | ||
297 | execlist->running_slot = slot; | |
298 | execlist->pending_slot = NULL; | |
299 | execlist->running_context = &slot->ctx[0]; | |
300 | ||
301 | gvt_dbg_el("running slot index %d running context %x\n", | |
302 | execlist->running_slot->index, | |
303 | execlist->running_context->context_id); | |
304 | ||
305 | emulate_execlist_status(execlist); | |
306 | ||
307 | status.idle_to_active = 1; | |
308 | status.context_id = 0; | |
309 | ||
310 | emulate_csb_update(execlist, &status, false); | |
311 | return 0; | |
312 | } | |
313 | ||
314 | ctx0 = &running->ctx[0]; | |
315 | ctx1 = &running->ctx[1]; | |
316 | ||
317 | gvt_dbg_el("current running slot index %d ctx 0 %x ctx 1 %x\n", | |
318 | running->index, ctx0->context_id, ctx1->context_id); | |
319 | ||
320 | /* | |
321 | * already has an running execlist | |
322 | * a. running ctx1 is valid, | |
323 | * ctx0 is finished, and running ctx1 == new execlist ctx[0] | |
324 | * b. running ctx1 is not valid, | |
325 | * ctx0 == new execlist ctx[0] | |
326 | * ----> lite-restore + preempted | |
327 | */ | |
328 | if ((valid_context(ctx1) && same_context(ctx1, &slot->ctx[0]) && | |
329 | /* condition a */ | |
330 | (!same_context(ctx0, execlist->running_context))) || | |
331 | (!valid_context(ctx1) && | |
332 | same_context(ctx0, &slot->ctx[0]))) { /* condition b */ | |
333 | gvt_dbg_el("need to switch virtual execlist slot\n"); | |
334 | ||
335 | execlist->pending_slot = slot; | |
336 | switch_virtual_execlist_slot(execlist); | |
337 | ||
338 | emulate_execlist_status(execlist); | |
339 | ||
340 | status.lite_restore = status.preempted = 1; | |
341 | status.context_id = ctx[0].context_id; | |
342 | ||
343 | emulate_csb_update(execlist, &status, false); | |
344 | } else { | |
345 | gvt_dbg_el("emulate as pending slot\n"); | |
346 | /* | |
347 | * otherwise | |
348 | * --> emulate pending execlist exist + but no preemption case | |
349 | */ | |
350 | execlist->pending_slot = slot; | |
351 | emulate_execlist_status(execlist); | |
352 | } | |
353 | return 0; | |
354 | } | |
355 | ||
28c4c6ca ZW |
356 | static void free_workload(struct intel_vgpu_workload *workload) |
357 | { | |
358 | intel_vgpu_unpin_mm(workload->shadow_mm); | |
359 | intel_gvt_mm_unreference(workload->shadow_mm); | |
360 | kmem_cache_free(workload->vgpu->workloads, workload); | |
361 | } | |
362 | ||
363 | #define get_desc_from_elsp_dwords(ed, i) \ | |
364 | ((struct execlist_ctx_descriptor_format *)&((ed)->data[i * 2])) | |
365 | ||
366 | static int prepare_execlist_workload(struct intel_vgpu_workload *workload) | |
367 | { | |
368 | struct intel_vgpu *vgpu = workload->vgpu; | |
369 | struct execlist_ctx_descriptor_format ctx[2]; | |
370 | int ring_id = workload->ring_id; | |
371 | ||
372 | intel_vgpu_pin_mm(workload->shadow_mm); | |
373 | intel_vgpu_sync_oos_pages(workload->vgpu); | |
374 | intel_vgpu_flush_post_shadow(workload->vgpu); | |
375 | if (!workload->emulate_schedule_in) | |
376 | return 0; | |
377 | ||
378 | ctx[0] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 1); | |
379 | ctx[1] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 0); | |
380 | ||
381 | return emulate_execlist_schedule_in(&vgpu->execlist[ring_id], ctx); | |
382 | } | |
383 | ||
384 | static int complete_execlist_workload(struct intel_vgpu_workload *workload) | |
385 | { | |
386 | struct intel_vgpu *vgpu = workload->vgpu; | |
387 | struct intel_vgpu_execlist *execlist = | |
388 | &vgpu->execlist[workload->ring_id]; | |
389 | struct intel_vgpu_workload *next_workload; | |
390 | struct list_head *next = workload_q_head(vgpu, workload->ring_id)->next; | |
391 | bool lite_restore = false; | |
392 | int ret; | |
393 | ||
394 | gvt_dbg_el("complete workload %p status %d\n", workload, | |
395 | workload->status); | |
396 | ||
397 | if (workload->status) | |
398 | goto out; | |
399 | ||
400 | if (!list_empty(workload_q_head(vgpu, workload->ring_id))) { | |
401 | struct execlist_ctx_descriptor_format *this_desc, *next_desc; | |
402 | ||
403 | next_workload = container_of(next, | |
404 | struct intel_vgpu_workload, list); | |
405 | this_desc = &workload->ctx_desc; | |
406 | next_desc = &next_workload->ctx_desc; | |
407 | ||
408 | lite_restore = same_context(this_desc, next_desc); | |
409 | } | |
410 | ||
411 | if (lite_restore) { | |
412 | gvt_dbg_el("next context == current - no schedule-out\n"); | |
413 | free_workload(workload); | |
414 | return 0; | |
415 | } | |
416 | ||
417 | ret = emulate_execlist_ctx_schedule_out(execlist, &workload->ctx_desc); | |
418 | if (ret) | |
419 | goto err; | |
420 | out: | |
421 | free_workload(workload); | |
422 | return 0; | |
423 | err: | |
424 | free_workload(workload); | |
425 | return ret; | |
426 | } | |
427 | ||
428 | #define RING_CTX_OFF(x) \ | |
429 | offsetof(struct execlist_ring_context, x) | |
430 | ||
431 | static void read_guest_pdps(struct intel_vgpu *vgpu, | |
432 | u64 ring_context_gpa, u32 pdp[8]) | |
433 | { | |
434 | u64 gpa; | |
435 | int i; | |
436 | ||
437 | gpa = ring_context_gpa + RING_CTX_OFF(pdp3_UDW.val); | |
438 | ||
439 | for (i = 0; i < 8; i++) | |
440 | intel_gvt_hypervisor_read_gpa(vgpu, | |
441 | gpa + i * 8, &pdp[7 - i], 4); | |
442 | } | |
443 | ||
444 | static int prepare_mm(struct intel_vgpu_workload *workload) | |
445 | { | |
446 | struct execlist_ctx_descriptor_format *desc = &workload->ctx_desc; | |
447 | struct intel_vgpu_mm *mm; | |
448 | int page_table_level; | |
449 | u32 pdp[8]; | |
450 | ||
451 | if (desc->addressing_mode == 1) { /* legacy 32-bit */ | |
452 | page_table_level = 3; | |
453 | } else if (desc->addressing_mode == 3) { /* legacy 64 bit */ | |
454 | page_table_level = 4; | |
455 | } else { | |
456 | gvt_err("Advanced Context mode(SVM) is not supported!\n"); | |
457 | return -EINVAL; | |
458 | } | |
459 | ||
460 | read_guest_pdps(workload->vgpu, workload->ring_context_gpa, pdp); | |
461 | ||
462 | mm = intel_vgpu_find_ppgtt_mm(workload->vgpu, page_table_level, pdp); | |
463 | if (mm) { | |
464 | intel_gvt_mm_reference(mm); | |
465 | } else { | |
466 | ||
467 | mm = intel_vgpu_create_mm(workload->vgpu, INTEL_GVT_MM_PPGTT, | |
468 | pdp, page_table_level, 0); | |
469 | if (IS_ERR(mm)) { | |
470 | gvt_err("fail to create mm object.\n"); | |
471 | return PTR_ERR(mm); | |
472 | } | |
473 | } | |
474 | workload->shadow_mm = mm; | |
475 | return 0; | |
476 | } | |
477 | ||
478 | #define get_last_workload(q) \ | |
479 | (list_empty(q) ? NULL : container_of(q->prev, \ | |
480 | struct intel_vgpu_workload, list)) | |
481 | ||
482 | bool submit_context(struct intel_vgpu *vgpu, int ring_id, | |
483 | struct execlist_ctx_descriptor_format *desc, | |
484 | bool emulate_schedule_in) | |
485 | { | |
486 | struct list_head *q = workload_q_head(vgpu, ring_id); | |
487 | struct intel_vgpu_workload *last_workload = get_last_workload(q); | |
488 | struct intel_vgpu_workload *workload = NULL; | |
489 | u64 ring_context_gpa; | |
490 | u32 head, tail, start, ctl, ctx_ctl; | |
491 | int ret; | |
492 | ||
493 | ring_context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, | |
494 | (u32)((desc->lrca + 1) << GTT_PAGE_SHIFT)); | |
495 | if (ring_context_gpa == INTEL_GVT_INVALID_ADDR) { | |
496 | gvt_err("invalid guest context LRCA: %x\n", desc->lrca); | |
497 | return -EINVAL; | |
498 | } | |
499 | ||
500 | intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa + | |
501 | RING_CTX_OFF(ring_header.val), &head, 4); | |
502 | ||
503 | intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa + | |
504 | RING_CTX_OFF(ring_tail.val), &tail, 4); | |
505 | ||
506 | head &= RB_HEAD_OFF_MASK; | |
507 | tail &= RB_TAIL_OFF_MASK; | |
508 | ||
509 | if (last_workload && same_context(&last_workload->ctx_desc, desc)) { | |
510 | gvt_dbg_el("ring id %d cur workload == last\n", ring_id); | |
511 | gvt_dbg_el("ctx head %x real head %lx\n", head, | |
512 | last_workload->rb_tail); | |
513 | /* | |
514 | * cannot use guest context head pointer here, | |
515 | * as it might not be updated at this time | |
516 | */ | |
517 | head = last_workload->rb_tail; | |
518 | } | |
519 | ||
520 | gvt_dbg_el("ring id %d begin a new workload\n", ring_id); | |
521 | ||
522 | workload = kmem_cache_zalloc(vgpu->workloads, GFP_KERNEL); | |
523 | if (!workload) | |
524 | return -ENOMEM; | |
525 | ||
526 | /* record some ring buffer register values for scan and shadow */ | |
527 | intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa + | |
528 | RING_CTX_OFF(rb_start.val), &start, 4); | |
529 | intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa + | |
530 | RING_CTX_OFF(rb_ctrl.val), &ctl, 4); | |
531 | intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa + | |
532 | RING_CTX_OFF(ctx_ctrl.val), &ctx_ctl, 4); | |
533 | ||
534 | INIT_LIST_HEAD(&workload->list); | |
535 | ||
536 | init_waitqueue_head(&workload->shadow_ctx_status_wq); | |
537 | atomic_set(&workload->shadow_ctx_active, 0); | |
538 | ||
539 | workload->vgpu = vgpu; | |
540 | workload->ring_id = ring_id; | |
541 | workload->ctx_desc = *desc; | |
542 | workload->ring_context_gpa = ring_context_gpa; | |
543 | workload->rb_head = head; | |
544 | workload->rb_tail = tail; | |
545 | workload->rb_start = start; | |
546 | workload->rb_ctl = ctl; | |
547 | workload->prepare = prepare_execlist_workload; | |
548 | workload->complete = complete_execlist_workload; | |
549 | workload->status = -EINPROGRESS; | |
550 | workload->emulate_schedule_in = emulate_schedule_in; | |
551 | ||
552 | if (emulate_schedule_in) | |
553 | memcpy(&workload->elsp_dwords, | |
554 | &vgpu->execlist[ring_id].elsp_dwords, | |
555 | sizeof(workload->elsp_dwords)); | |
556 | ||
557 | gvt_dbg_el("workload %p ring id %d head %x tail %x start %x ctl %x\n", | |
558 | workload, ring_id, head, tail, start, ctl); | |
559 | ||
560 | gvt_dbg_el("workload %p emulate schedule_in %d\n", workload, | |
561 | emulate_schedule_in); | |
562 | ||
563 | ret = prepare_mm(workload); | |
564 | if (ret) { | |
565 | kmem_cache_free(vgpu->workloads, workload); | |
566 | return ret; | |
567 | } | |
568 | ||
569 | queue_workload(workload); | |
570 | return 0; | |
571 | } | |
572 | ||
573 | int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id) | |
574 | { | |
575 | struct intel_vgpu_execlist *execlist = &vgpu->execlist[ring_id]; | |
576 | struct execlist_ctx_descriptor_format *desc[2], valid_desc[2]; | |
577 | unsigned long valid_desc_bitmap = 0; | |
578 | bool emulate_schedule_in = true; | |
579 | int ret; | |
580 | int i; | |
581 | ||
582 | memset(valid_desc, 0, sizeof(valid_desc)); | |
583 | ||
584 | desc[0] = get_desc_from_elsp_dwords(&execlist->elsp_dwords, 1); | |
585 | desc[1] = get_desc_from_elsp_dwords(&execlist->elsp_dwords, 0); | |
586 | ||
587 | for (i = 0; i < 2; i++) { | |
588 | if (!desc[i]->valid) | |
589 | continue; | |
590 | ||
591 | if (!desc[i]->privilege_access) { | |
592 | gvt_err("vgpu%d: unexpected GGTT elsp submission\n", | |
593 | vgpu->id); | |
594 | return -EINVAL; | |
595 | } | |
596 | ||
597 | /* TODO: add another guest context checks here. */ | |
598 | set_bit(i, &valid_desc_bitmap); | |
599 | valid_desc[i] = *desc[i]; | |
600 | } | |
601 | ||
602 | if (!valid_desc_bitmap) { | |
603 | gvt_err("vgpu%d: no valid desc in a elsp submission\n", | |
604 | vgpu->id); | |
605 | return -EINVAL; | |
606 | } | |
607 | ||
608 | if (!test_bit(0, (void *)&valid_desc_bitmap) && | |
609 | test_bit(1, (void *)&valid_desc_bitmap)) { | |
610 | gvt_err("vgpu%d: weird elsp submission, desc 0 is not valid\n", | |
611 | vgpu->id); | |
612 | return -EINVAL; | |
613 | } | |
614 | ||
615 | /* submit workload */ | |
616 | for_each_set_bit(i, (void *)&valid_desc_bitmap, 2) { | |
617 | ret = submit_context(vgpu, ring_id, &valid_desc[i], | |
618 | emulate_schedule_in); | |
619 | if (ret) { | |
620 | gvt_err("vgpu%d: fail to schedule workload\n", | |
621 | vgpu->id); | |
622 | return ret; | |
623 | } | |
624 | emulate_schedule_in = false; | |
625 | } | |
626 | return 0; | |
627 | } | |
628 | ||
8453d674 ZW |
629 | static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id) |
630 | { | |
631 | struct intel_vgpu_execlist *execlist = &vgpu->execlist[ring_id]; | |
632 | struct execlist_context_status_pointer_format ctx_status_ptr; | |
633 | u32 ctx_status_ptr_reg; | |
634 | ||
635 | memset(execlist, 0, sizeof(*execlist)); | |
636 | ||
637 | execlist->vgpu = vgpu; | |
638 | execlist->ring_id = ring_id; | |
639 | execlist->slot[0].index = 0; | |
640 | execlist->slot[1].index = 1; | |
641 | ||
642 | ctx_status_ptr_reg = execlist_ring_mmio(vgpu->gvt, ring_id, | |
643 | _EL_OFFSET_STATUS_PTR); | |
644 | ||
645 | ctx_status_ptr.dw = vgpu_vreg(vgpu, ctx_status_ptr_reg); | |
646 | ctx_status_ptr.read_ptr = ctx_status_ptr.write_ptr = 0x7; | |
647 | vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw; | |
648 | } | |
649 | ||
28c4c6ca ZW |
650 | void intel_vgpu_clean_execlist(struct intel_vgpu *vgpu) |
651 | { | |
652 | kmem_cache_destroy(vgpu->workloads); | |
653 | } | |
654 | ||
8453d674 ZW |
655 | int intel_vgpu_init_execlist(struct intel_vgpu *vgpu) |
656 | { | |
657 | int i; | |
658 | ||
659 | /* each ring has a virtual execlist engine */ | |
28c4c6ca | 660 | for (i = 0; i < I915_NUM_ENGINES; i++) { |
8453d674 | 661 | init_vgpu_execlist(vgpu, i); |
28c4c6ca ZW |
662 | INIT_LIST_HEAD(&vgpu->workload_q_head[i]); |
663 | } | |
664 | ||
665 | vgpu->workloads = kmem_cache_create("gvt-g vgpu workload", | |
666 | sizeof(struct intel_vgpu_workload), 0, | |
667 | SLAB_HWCACHE_ALIGN, | |
668 | NULL); | |
669 | ||
670 | if (!vgpu->workloads) | |
671 | return -ENOMEM; | |
8453d674 ZW |
672 | |
673 | return 0; | |
674 | } |