2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * Eddie Dong <eddie.dong@intel.com>
25 * Kevin Tian <kevin.tian@intel.com>
28 * Ping Gao <ping.a.gao@intel.com>
29 * Zhi Wang <zhi.a.wang@intel.com>
30 * Bing Niu <bing.niu@intel.com>
36 #include "i915_pvinfo.h"
38 static void clean_vgpu_mmio(struct intel_vgpu
*vgpu
)
40 vfree(vgpu
->mmio
.vreg
);
41 vgpu
->mmio
.vreg
= vgpu
->mmio
.sreg
= NULL
;
44 int setup_vgpu_mmio(struct intel_vgpu
*vgpu
)
46 struct intel_gvt
*gvt
= vgpu
->gvt
;
47 const struct intel_gvt_device_info
*info
= &gvt
->device_info
;
50 memset(vgpu
->mmio
.vreg
, 0, info
->mmio_size
* 2);
52 vgpu
->mmio
.vreg
= vzalloc(info
->mmio_size
* 2);
57 vgpu
->mmio
.sreg
= vgpu
->mmio
.vreg
+ info
->mmio_size
;
59 memcpy(vgpu
->mmio
.vreg
, gvt
->firmware
.mmio
, info
->mmio_size
);
60 memcpy(vgpu
->mmio
.sreg
, gvt
->firmware
.mmio
, info
->mmio_size
);
62 vgpu_vreg(vgpu
, GEN6_GT_THREAD_STATUS_REG
) = 0;
64 /* set the bit 0:2(Core C-State ) to C0 */
65 vgpu_vreg(vgpu
, GEN6_GT_CORE_STATUS
) = 0;
69 static void setup_vgpu_cfg_space(struct intel_vgpu
*vgpu
,
70 struct intel_vgpu_creation_params
*param
)
72 struct intel_gvt
*gvt
= vgpu
->gvt
;
73 const struct intel_gvt_device_info
*info
= &gvt
->device_info
;
77 memcpy(vgpu_cfg_space(vgpu
), gvt
->firmware
.cfg_space
,
78 info
->cfg_space_size
);
80 if (!param
->primary
) {
81 vgpu_cfg_space(vgpu
)[PCI_CLASS_DEVICE
] =
82 INTEL_GVT_PCI_CLASS_VGA_OTHER
;
83 vgpu_cfg_space(vgpu
)[PCI_CLASS_PROG
] =
84 INTEL_GVT_PCI_CLASS_VGA_OTHER
;
87 /* Show guest that there isn't any stolen memory.*/
88 gmch_ctl
= (u16
*)(vgpu_cfg_space(vgpu
) + INTEL_GVT_PCI_GMCH_CONTROL
);
89 *gmch_ctl
&= ~(BDW_GMCH_GMS_MASK
<< BDW_GMCH_GMS_SHIFT
);
91 intel_vgpu_write_pci_bar(vgpu
, PCI_BASE_ADDRESS_2
,
92 gvt_aperture_pa_base(gvt
), true);
94 vgpu_cfg_space(vgpu
)[PCI_COMMAND
] &= ~(PCI_COMMAND_IO
96 | PCI_COMMAND_MASTER
);
98 * Clear the bar upper 32bit and let guest to assign the new value
100 memset(vgpu_cfg_space(vgpu
) + PCI_BASE_ADDRESS_1
, 0, 4);
101 memset(vgpu_cfg_space(vgpu
) + PCI_BASE_ADDRESS_3
, 0, 4);
102 memset(vgpu_cfg_space(vgpu
) + INTEL_GVT_PCI_OPREGION
, 0, 4);
104 for (i
= 0; i
< INTEL_GVT_MAX_BAR_NUM
; i
++) {
105 vgpu
->cfg_space
.bar
[i
].size
= pci_resource_len(
106 gvt
->dev_priv
->drm
.pdev
, i
* 2);
107 vgpu
->cfg_space
.bar
[i
].tracked
= false;
111 void populate_pvinfo_page(struct intel_vgpu
*vgpu
)
113 /* setup the ballooning information */
114 vgpu_vreg64(vgpu
, vgtif_reg(magic
)) = VGT_MAGIC
;
115 vgpu_vreg(vgpu
, vgtif_reg(version_major
)) = 1;
116 vgpu_vreg(vgpu
, vgtif_reg(version_minor
)) = 0;
117 vgpu_vreg(vgpu
, vgtif_reg(display_ready
)) = 0;
118 vgpu_vreg(vgpu
, vgtif_reg(vgt_id
)) = vgpu
->id
;
119 vgpu_vreg(vgpu
, vgtif_reg(avail_rs
.mappable_gmadr
.base
)) =
120 vgpu_aperture_gmadr_base(vgpu
);
121 vgpu_vreg(vgpu
, vgtif_reg(avail_rs
.mappable_gmadr
.size
)) =
122 vgpu_aperture_sz(vgpu
);
123 vgpu_vreg(vgpu
, vgtif_reg(avail_rs
.nonmappable_gmadr
.base
)) =
124 vgpu_hidden_gmadr_base(vgpu
);
125 vgpu_vreg(vgpu
, vgtif_reg(avail_rs
.nonmappable_gmadr
.size
)) =
126 vgpu_hidden_sz(vgpu
);
128 vgpu_vreg(vgpu
, vgtif_reg(avail_rs
.fence_num
)) = vgpu_fence_sz(vgpu
);
130 gvt_dbg_core("Populate PVINFO PAGE for vGPU %d\n", vgpu
->id
);
131 gvt_dbg_core("aperture base [GMADR] 0x%llx size 0x%llx\n",
132 vgpu_aperture_gmadr_base(vgpu
), vgpu_aperture_sz(vgpu
));
133 gvt_dbg_core("hidden base [GMADR] 0x%llx size=0x%llx\n",
134 vgpu_hidden_gmadr_base(vgpu
), vgpu_hidden_sz(vgpu
));
135 gvt_dbg_core("fence size %d\n", vgpu_fence_sz(vgpu
));
137 WARN_ON(sizeof(struct vgt_if
) != VGT_PVINFO_SIZE
);
141 * intel_gvt_init_vgpu_types - initialize vGPU type list
144 * Initialize vGPU type list based on available resource.
147 int intel_gvt_init_vgpu_types(struct intel_gvt
*gvt
)
149 unsigned int num_types
;
150 unsigned int i
, low_avail
;
151 unsigned int min_low
;
153 /* vGPU type name is defined as GVTg_Vx_y which contains
154 * physical GPU generation type and 'y' means maximum vGPU
155 * instances user can create on one physical GPU for this
158 * Depend on physical SKU resource, might see vGPU types like
159 * GVTg_V4_8, GVTg_V4_4, GVTg_V4_2, etc. We can create
160 * different types of vGPU on same physical GPU depending on
161 * available resource. Each vGPU type will have "avail_instance"
162 * to indicate how many vGPU instance can be created for this
165 * Currently use static size here as we init type earlier..
167 low_avail
= MB_TO_BYTES(256) - HOST_LOW_GM_SIZE
;
170 gvt
->types
= kzalloc(num_types
* sizeof(struct intel_vgpu_type
),
175 min_low
= MB_TO_BYTES(32);
176 for (i
= 0; i
< num_types
; ++i
) {
177 if (low_avail
/ min_low
== 0)
179 gvt
->types
[i
].low_gm_size
= min_low
;
180 gvt
->types
[i
].high_gm_size
= 3 * gvt
->types
[i
].low_gm_size
;
181 gvt
->types
[i
].fence
= 4;
182 gvt
->types
[i
].max_instance
= low_avail
/ min_low
;
183 gvt
->types
[i
].avail_instance
= gvt
->types
[i
].max_instance
;
185 if (IS_GEN8(gvt
->dev_priv
))
186 sprintf(gvt
->types
[i
].name
, "GVTg_V4_%u",
187 gvt
->types
[i
].max_instance
);
188 else if (IS_GEN9(gvt
->dev_priv
))
189 sprintf(gvt
->types
[i
].name
, "GVTg_V5_%u",
190 gvt
->types
[i
].max_instance
);
193 gvt_dbg_core("type[%d]: %s max %u avail %u low %u high %u fence %u\n",
194 i
, gvt
->types
[i
].name
, gvt
->types
[i
].max_instance
,
195 gvt
->types
[i
].avail_instance
,
196 gvt
->types
[i
].low_gm_size
,
197 gvt
->types
[i
].high_gm_size
, gvt
->types
[i
].fence
);
204 void intel_gvt_clean_vgpu_types(struct intel_gvt
*gvt
)
209 static void intel_gvt_update_vgpu_types(struct intel_gvt
*gvt
)
212 unsigned int low_gm_avail
, high_gm_avail
, fence_avail
;
213 unsigned int low_gm_min
, high_gm_min
, fence_min
, total_min
;
215 /* Need to depend on maxium hw resource size but keep on
216 * static config for now.
218 low_gm_avail
= MB_TO_BYTES(256) - HOST_LOW_GM_SIZE
-
219 gvt
->gm
.vgpu_allocated_low_gm_size
;
220 high_gm_avail
= MB_TO_BYTES(256) * 3 - HOST_HIGH_GM_SIZE
-
221 gvt
->gm
.vgpu_allocated_high_gm_size
;
222 fence_avail
= gvt_fence_sz(gvt
) - HOST_FENCE
-
223 gvt
->fence
.vgpu_allocated_fence_num
;
225 for (i
= 0; i
< gvt
->num_types
; i
++) {
226 low_gm_min
= low_gm_avail
/ gvt
->types
[i
].low_gm_size
;
227 high_gm_min
= high_gm_avail
/ gvt
->types
[i
].high_gm_size
;
228 fence_min
= fence_avail
/ gvt
->types
[i
].fence
;
229 total_min
= min(min(low_gm_min
, high_gm_min
), fence_min
);
230 gvt
->types
[i
].avail_instance
= min(gvt
->types
[i
].max_instance
,
233 gvt_dbg_core("update type[%d]: %s max %u avail %u low %u high %u fence %u\n",
234 i
, gvt
->types
[i
].name
, gvt
->types
[i
].max_instance
,
235 gvt
->types
[i
].avail_instance
, gvt
->types
[i
].low_gm_size
,
236 gvt
->types
[i
].high_gm_size
, gvt
->types
[i
].fence
);
241 * intel_gvt_destroy_vgpu - destroy a virtual GPU
244 * This function is called when user wants to destroy a virtual GPU.
247 void intel_gvt_destroy_vgpu(struct intel_vgpu
*vgpu
)
249 struct intel_gvt
*gvt
= vgpu
->gvt
;
251 mutex_lock(&gvt
->lock
);
253 vgpu
->active
= false;
254 idr_remove(&gvt
->vgpu_idr
, vgpu
->id
);
256 if (atomic_read(&vgpu
->running_workload_num
)) {
257 mutex_unlock(&gvt
->lock
);
258 intel_gvt_wait_vgpu_idle(vgpu
);
259 mutex_lock(&gvt
->lock
);
262 intel_vgpu_stop_schedule(vgpu
);
263 intel_vgpu_clean_sched_policy(vgpu
);
264 intel_vgpu_clean_gvt_context(vgpu
);
265 intel_vgpu_clean_execlist(vgpu
);
266 intel_vgpu_clean_display(vgpu
);
267 intel_vgpu_clean_opregion(vgpu
);
268 intel_vgpu_clean_gtt(vgpu
);
269 intel_gvt_hypervisor_detach_vgpu(vgpu
);
270 intel_vgpu_free_resource(vgpu
);
271 clean_vgpu_mmio(vgpu
);
274 intel_gvt_update_vgpu_types(gvt
);
275 mutex_unlock(&gvt
->lock
);
278 static struct intel_vgpu
*__intel_gvt_create_vgpu(struct intel_gvt
*gvt
,
279 struct intel_vgpu_creation_params
*param
)
281 struct intel_vgpu
*vgpu
;
284 gvt_dbg_core("handle %llu low %llu MB high %llu MB fence %llu\n",
285 param
->handle
, param
->low_gm_sz
, param
->high_gm_sz
,
288 vgpu
= vzalloc(sizeof(*vgpu
));
290 return ERR_PTR(-ENOMEM
);
292 mutex_lock(&gvt
->lock
);
294 ret
= idr_alloc(&gvt
->vgpu_idr
, vgpu
, 1, GVT_MAX_VGPU
, GFP_KERNEL
);
299 vgpu
->handle
= param
->handle
;
301 bitmap_zero(vgpu
->tlb_handle_pending
, I915_NUM_ENGINES
);
303 setup_vgpu_cfg_space(vgpu
, param
);
305 ret
= setup_vgpu_mmio(vgpu
);
309 ret
= intel_vgpu_alloc_resource(vgpu
, param
);
311 goto out_clean_vgpu_mmio
;
313 populate_pvinfo_page(vgpu
);
315 ret
= intel_gvt_hypervisor_attach_vgpu(vgpu
);
317 goto out_clean_vgpu_resource
;
319 ret
= intel_vgpu_init_gtt(vgpu
);
321 goto out_detach_hypervisor_vgpu
;
323 ret
= intel_vgpu_init_display(vgpu
);
327 ret
= intel_vgpu_init_execlist(vgpu
);
329 goto out_clean_display
;
331 ret
= intel_vgpu_init_gvt_context(vgpu
);
333 goto out_clean_execlist
;
335 ret
= intel_vgpu_init_sched_policy(vgpu
);
337 goto out_clean_shadow_ctx
;
340 mutex_unlock(&gvt
->lock
);
344 out_clean_shadow_ctx
:
345 intel_vgpu_clean_gvt_context(vgpu
);
347 intel_vgpu_clean_execlist(vgpu
);
349 intel_vgpu_clean_display(vgpu
);
351 intel_vgpu_clean_gtt(vgpu
);
352 out_detach_hypervisor_vgpu
:
353 intel_gvt_hypervisor_detach_vgpu(vgpu
);
354 out_clean_vgpu_resource
:
355 intel_vgpu_free_resource(vgpu
);
357 clean_vgpu_mmio(vgpu
);
360 mutex_unlock(&gvt
->lock
);
365 * intel_gvt_create_vgpu - create a virtual GPU
367 * @type: type of the vGPU to create
369 * This function is called when user wants to create a virtual GPU.
372 * pointer to intel_vgpu, error pointer if failed.
374 struct intel_vgpu
*intel_gvt_create_vgpu(struct intel_gvt
*gvt
,
375 struct intel_vgpu_type
*type
)
377 struct intel_vgpu_creation_params param
;
378 struct intel_vgpu
*vgpu
;
382 param
.low_gm_sz
= type
->low_gm_size
;
383 param
.high_gm_sz
= type
->high_gm_size
;
384 param
.fence_sz
= type
->fence
;
386 /* XXX current param based on MB */
387 param
.low_gm_sz
= BYTES_TO_MB(param
.low_gm_sz
);
388 param
.high_gm_sz
= BYTES_TO_MB(param
.high_gm_sz
);
390 vgpu
= __intel_gvt_create_vgpu(gvt
, ¶m
);
394 /* calculate left instance change for types */
395 intel_gvt_update_vgpu_types(gvt
);
401 * intel_gvt_reset_vgpu - reset a virtual GPU
404 * This function is called when user wants to reset a virtual GPU.
407 void intel_gvt_reset_vgpu(struct intel_vgpu
*vgpu
)