2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * Eddie Dong <eddie.dong@intel.com>
25 * Kevin Tian <kevin.tian@intel.com>
28 * Ping Gao <ping.a.gao@intel.com>
29 * Zhi Wang <zhi.a.wang@intel.com>
30 * Bing Niu <bing.niu@intel.com>
36 #include "i915_pvinfo.h"
38 void populate_pvinfo_page(struct intel_vgpu
*vgpu
)
40 /* setup the ballooning information */
41 vgpu_vreg64(vgpu
, vgtif_reg(magic
)) = VGT_MAGIC
;
42 vgpu_vreg(vgpu
, vgtif_reg(version_major
)) = 1;
43 vgpu_vreg(vgpu
, vgtif_reg(version_minor
)) = 0;
44 vgpu_vreg(vgpu
, vgtif_reg(display_ready
)) = 0;
45 vgpu_vreg(vgpu
, vgtif_reg(vgt_id
)) = vgpu
->id
;
46 vgpu_vreg(vgpu
, vgtif_reg(avail_rs
.mappable_gmadr
.base
)) =
47 vgpu_aperture_gmadr_base(vgpu
);
48 vgpu_vreg(vgpu
, vgtif_reg(avail_rs
.mappable_gmadr
.size
)) =
49 vgpu_aperture_sz(vgpu
);
50 vgpu_vreg(vgpu
, vgtif_reg(avail_rs
.nonmappable_gmadr
.base
)) =
51 vgpu_hidden_gmadr_base(vgpu
);
52 vgpu_vreg(vgpu
, vgtif_reg(avail_rs
.nonmappable_gmadr
.size
)) =
55 vgpu_vreg(vgpu
, vgtif_reg(avail_rs
.fence_num
)) = vgpu_fence_sz(vgpu
);
57 gvt_dbg_core("Populate PVINFO PAGE for vGPU %d\n", vgpu
->id
);
58 gvt_dbg_core("aperture base [GMADR] 0x%llx size 0x%llx\n",
59 vgpu_aperture_gmadr_base(vgpu
), vgpu_aperture_sz(vgpu
));
60 gvt_dbg_core("hidden base [GMADR] 0x%llx size=0x%llx\n",
61 vgpu_hidden_gmadr_base(vgpu
), vgpu_hidden_sz(vgpu
));
62 gvt_dbg_core("fence size %d\n", vgpu_fence_sz(vgpu
));
64 WARN_ON(sizeof(struct vgt_if
) != VGT_PVINFO_SIZE
);
68 * intel_gvt_init_vgpu_types - initialize vGPU type list
71 * Initialize vGPU type list based on available resource.
74 int intel_gvt_init_vgpu_types(struct intel_gvt
*gvt
)
76 unsigned int num_types
;
77 unsigned int i
, low_avail
;
80 /* vGPU type name is defined as GVTg_Vx_y which contains
81 * physical GPU generation type and 'y' means maximum vGPU
82 * instances user can create on one physical GPU for this
85 * Depend on physical SKU resource, might see vGPU types like
86 * GVTg_V4_8, GVTg_V4_4, GVTg_V4_2, etc. We can create
87 * different types of vGPU on same physical GPU depending on
88 * available resource. Each vGPU type will have "avail_instance"
89 * to indicate how many vGPU instance can be created for this
92 * Currently use static size here as we init type earlier..
94 low_avail
= MB_TO_BYTES(256) - HOST_LOW_GM_SIZE
;
97 gvt
->types
= kzalloc(num_types
* sizeof(struct intel_vgpu_type
),
102 min_low
= MB_TO_BYTES(32);
103 for (i
= 0; i
< num_types
; ++i
) {
104 if (low_avail
/ min_low
== 0)
106 gvt
->types
[i
].low_gm_size
= min_low
;
107 gvt
->types
[i
].high_gm_size
= max((min_low
<<3), MB_TO_BYTES(384U));
108 gvt
->types
[i
].fence
= 4;
109 gvt
->types
[i
].max_instance
= low_avail
/ min_low
;
110 gvt
->types
[i
].avail_instance
= gvt
->types
[i
].max_instance
;
112 if (IS_GEN8(gvt
->dev_priv
))
113 sprintf(gvt
->types
[i
].name
, "GVTg_V4_%u",
114 gvt
->types
[i
].max_instance
);
115 else if (IS_GEN9(gvt
->dev_priv
))
116 sprintf(gvt
->types
[i
].name
, "GVTg_V5_%u",
117 gvt
->types
[i
].max_instance
);
120 gvt_dbg_core("type[%d]: %s max %u avail %u low %u high %u fence %u\n",
121 i
, gvt
->types
[i
].name
, gvt
->types
[i
].max_instance
,
122 gvt
->types
[i
].avail_instance
,
123 gvt
->types
[i
].low_gm_size
,
124 gvt
->types
[i
].high_gm_size
, gvt
->types
[i
].fence
);
131 void intel_gvt_clean_vgpu_types(struct intel_gvt
*gvt
)
136 static void intel_gvt_update_vgpu_types(struct intel_gvt
*gvt
)
139 unsigned int low_gm_avail
, high_gm_avail
, fence_avail
;
140 unsigned int low_gm_min
, high_gm_min
, fence_min
, total_min
;
142 /* Need to depend on maxium hw resource size but keep on
143 * static config for now.
145 low_gm_avail
= MB_TO_BYTES(256) - HOST_LOW_GM_SIZE
-
146 gvt
->gm
.vgpu_allocated_low_gm_size
;
147 high_gm_avail
= MB_TO_BYTES(256) * 8UL - HOST_HIGH_GM_SIZE
-
148 gvt
->gm
.vgpu_allocated_high_gm_size
;
149 fence_avail
= gvt_fence_sz(gvt
) - HOST_FENCE
-
150 gvt
->fence
.vgpu_allocated_fence_num
;
152 for (i
= 0; i
< gvt
->num_types
; i
++) {
153 low_gm_min
= low_gm_avail
/ gvt
->types
[i
].low_gm_size
;
154 high_gm_min
= high_gm_avail
/ gvt
->types
[i
].high_gm_size
;
155 fence_min
= fence_avail
/ gvt
->types
[i
].fence
;
156 total_min
= min(min(low_gm_min
, high_gm_min
), fence_min
);
157 gvt
->types
[i
].avail_instance
= min(gvt
->types
[i
].max_instance
,
160 gvt_dbg_core("update type[%d]: %s max %u avail %u low %u high %u fence %u\n",
161 i
, gvt
->types
[i
].name
, gvt
->types
[i
].max_instance
,
162 gvt
->types
[i
].avail_instance
, gvt
->types
[i
].low_gm_size
,
163 gvt
->types
[i
].high_gm_size
, gvt
->types
[i
].fence
);
168 * intel_gvt_destroy_vgpu - destroy a virtual GPU
171 * This function is called when user wants to destroy a virtual GPU.
174 void intel_gvt_destroy_vgpu(struct intel_vgpu
*vgpu
)
176 struct intel_gvt
*gvt
= vgpu
->gvt
;
178 mutex_lock(&gvt
->lock
);
180 vgpu
->active
= false;
181 idr_remove(&gvt
->vgpu_idr
, vgpu
->id
);
183 if (atomic_read(&vgpu
->running_workload_num
)) {
184 mutex_unlock(&gvt
->lock
);
185 intel_gvt_wait_vgpu_idle(vgpu
);
186 mutex_lock(&gvt
->lock
);
189 intel_vgpu_stop_schedule(vgpu
);
190 intel_vgpu_clean_sched_policy(vgpu
);
191 intel_vgpu_clean_gvt_context(vgpu
);
192 intel_vgpu_clean_execlist(vgpu
);
193 intel_vgpu_clean_display(vgpu
);
194 intel_vgpu_clean_opregion(vgpu
);
195 intel_vgpu_clean_gtt(vgpu
);
196 intel_gvt_hypervisor_detach_vgpu(vgpu
);
197 intel_vgpu_free_resource(vgpu
);
198 intel_vgpu_clean_mmio(vgpu
);
201 intel_gvt_update_vgpu_types(gvt
);
202 mutex_unlock(&gvt
->lock
);
205 static struct intel_vgpu
*__intel_gvt_create_vgpu(struct intel_gvt
*gvt
,
206 struct intel_vgpu_creation_params
*param
)
208 struct intel_vgpu
*vgpu
;
211 gvt_dbg_core("handle %llu low %llu MB high %llu MB fence %llu\n",
212 param
->handle
, param
->low_gm_sz
, param
->high_gm_sz
,
215 vgpu
= vzalloc(sizeof(*vgpu
));
217 return ERR_PTR(-ENOMEM
);
219 mutex_lock(&gvt
->lock
);
221 ret
= idr_alloc(&gvt
->vgpu_idr
, vgpu
, 1, GVT_MAX_VGPU
, GFP_KERNEL
);
226 vgpu
->handle
= param
->handle
;
228 bitmap_zero(vgpu
->tlb_handle_pending
, I915_NUM_ENGINES
);
230 intel_vgpu_init_cfg_space(vgpu
, param
->primary
);
232 ret
= intel_vgpu_init_mmio(vgpu
);
236 ret
= intel_vgpu_alloc_resource(vgpu
, param
);
238 goto out_clean_vgpu_mmio
;
240 populate_pvinfo_page(vgpu
);
242 ret
= intel_gvt_hypervisor_attach_vgpu(vgpu
);
244 goto out_clean_vgpu_resource
;
246 ret
= intel_vgpu_init_gtt(vgpu
);
248 goto out_detach_hypervisor_vgpu
;
250 ret
= intel_vgpu_init_display(vgpu
);
254 ret
= intel_vgpu_init_execlist(vgpu
);
256 goto out_clean_display
;
258 ret
= intel_vgpu_init_gvt_context(vgpu
);
260 goto out_clean_execlist
;
262 ret
= intel_vgpu_init_sched_policy(vgpu
);
264 goto out_clean_shadow_ctx
;
267 mutex_unlock(&gvt
->lock
);
271 out_clean_shadow_ctx
:
272 intel_vgpu_clean_gvt_context(vgpu
);
274 intel_vgpu_clean_execlist(vgpu
);
276 intel_vgpu_clean_display(vgpu
);
278 intel_vgpu_clean_gtt(vgpu
);
279 out_detach_hypervisor_vgpu
:
280 intel_gvt_hypervisor_detach_vgpu(vgpu
);
281 out_clean_vgpu_resource
:
282 intel_vgpu_free_resource(vgpu
);
284 intel_vgpu_clean_mmio(vgpu
);
286 idr_remove(&gvt
->vgpu_idr
, vgpu
->id
);
289 mutex_unlock(&gvt
->lock
);
294 * intel_gvt_create_vgpu - create a virtual GPU
296 * @type: type of the vGPU to create
298 * This function is called when user wants to create a virtual GPU.
301 * pointer to intel_vgpu, error pointer if failed.
303 struct intel_vgpu
*intel_gvt_create_vgpu(struct intel_gvt
*gvt
,
304 struct intel_vgpu_type
*type
)
306 struct intel_vgpu_creation_params param
;
307 struct intel_vgpu
*vgpu
;
311 param
.low_gm_sz
= type
->low_gm_size
;
312 param
.high_gm_sz
= type
->high_gm_size
;
313 param
.fence_sz
= type
->fence
;
315 /* XXX current param based on MB */
316 param
.low_gm_sz
= BYTES_TO_MB(param
.low_gm_sz
);
317 param
.high_gm_sz
= BYTES_TO_MB(param
.high_gm_sz
);
319 vgpu
= __intel_gvt_create_vgpu(gvt
, ¶m
);
323 /* calculate left instance change for types */
324 intel_gvt_update_vgpu_types(gvt
);
330 * intel_gvt_reset_vgpu_locked - reset a virtual GPU by DMLR or GT reset
332 * @dmlr: vGPU Device Model Level Reset or GT Reset
333 * @engine_mask: engines to reset for GT reset
335 * This function is called when user wants to reset a virtual GPU through
336 * device model reset or GT reset. The caller should hold the gvt lock.
338 * vGPU Device Model Level Reset (DMLR) simulates the PCI level reset to reset
339 * the whole vGPU to default state as when it is created. This vGPU function
340 * is required both for functionary and security concerns.The ultimate goal
341 * of vGPU FLR is that reuse a vGPU instance by virtual machines. When we
342 * assign a vGPU to a virtual machine we must isse such reset first.
344 * Full GT Reset and Per-Engine GT Reset are soft reset flow for GPU engines
345 * (Render, Blitter, Video, Video Enhancement). It is defined by GPU Spec.
346 * Unlike the FLR, GT reset only reset particular resource of a vGPU per
347 * the reset request. Guest driver can issue a GT reset by programming the
348 * virtual GDRST register to reset specific virtual GPU engine or all
351 * The parameter dev_level is to identify if we will do DMLR or GT reset.
352 * The parameter engine_mask is to specific the engines that need to be
353 * resetted. If value ALL_ENGINES is given for engine_mask, it means
354 * the caller requests a full GT reset that we will reset all virtual
355 * GPU engines. For FLR, engine_mask is ignored.
357 void intel_gvt_reset_vgpu_locked(struct intel_vgpu
*vgpu
, bool dmlr
,
358 unsigned int engine_mask
)
360 struct intel_gvt
*gvt
= vgpu
->gvt
;
361 struct intel_gvt_workload_scheduler
*scheduler
= &gvt
->scheduler
;
363 gvt_dbg_core("------------------------------------------\n");
364 gvt_dbg_core("resseting vgpu%d, dmlr %d, engine_mask %08x\n",
365 vgpu
->id
, dmlr
, engine_mask
);
366 vgpu
->resetting
= true;
368 intel_vgpu_stop_schedule(vgpu
);
370 * The current_vgpu will set to NULL after stopping the
371 * scheduler when the reset is triggered by current vgpu.
373 if (scheduler
->current_vgpu
== NULL
) {
374 mutex_unlock(&gvt
->lock
);
375 intel_gvt_wait_vgpu_idle(vgpu
);
376 mutex_lock(&gvt
->lock
);
379 intel_vgpu_reset_execlist(vgpu
, dmlr
? ALL_ENGINES
: engine_mask
);
381 /* full GPU reset or device model level reset */
382 if (engine_mask
== ALL_ENGINES
|| dmlr
) {
383 intel_vgpu_reset_gtt(vgpu
, dmlr
);
384 intel_vgpu_reset_resource(vgpu
);
385 intel_vgpu_reset_mmio(vgpu
);
386 populate_pvinfo_page(vgpu
);
389 intel_vgpu_reset_cfg_space(vgpu
);
392 vgpu
->resetting
= false;
393 gvt_dbg_core("reset vgpu%d done\n", vgpu
->id
);
394 gvt_dbg_core("------------------------------------------\n");
398 * intel_gvt_reset_vgpu - reset a virtual GPU (Function Level)
401 * This function is called when user wants to reset a virtual GPU.
404 void intel_gvt_reset_vgpu(struct intel_vgpu
*vgpu
)
406 mutex_lock(&vgpu
->gvt
->lock
);
407 intel_gvt_reset_vgpu_locked(vgpu
, true, 0);
408 mutex_unlock(&vgpu
->gvt
->lock
);