config DRM_I915_GVT
bool "Enable Intel GVT-g graphics virtualization host support"
depends on DRM_I915
+ depends on 64BIT
default n
help
Choose this option if you want to enable Intel GVT-g graphics
info->event = PRIMARY_B_FLIP_DONE;
break;
case MI_DISPLAY_FLIP_SKL_PLANE_1_C:
- info->pipe = PIPE_B;
+ info->pipe = PIPE_C;
info->event = PRIMARY_C_FLIP_DONE;
break;
default:
struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
struct intel_vgpu *vgpu = s->vgpu;
-#define write_bits(reg, e, s, v) do { \
- vgpu_vreg(vgpu, reg) &= ~GENMASK(e, s); \
- vgpu_vreg(vgpu, reg) |= (v << s); \
-} while (0)
-
- write_bits(info->surf_reg, 31, 12, info->surf_val);
- if (IS_SKYLAKE(dev_priv))
- write_bits(info->stride_reg, 9, 0, info->stride_val);
- else
- write_bits(info->stride_reg, 15, 6, info->stride_val);
- write_bits(info->ctrl_reg, IS_SKYLAKE(dev_priv) ? 12 : 10,
- 10, info->tile_val);
-
-#undef write_bits
+ set_mask_bits(&vgpu_vreg(vgpu, info->surf_reg), GENMASK(31, 12),
+ info->surf_val << 12);
+ if (IS_SKYLAKE(dev_priv)) {
+ set_mask_bits(&vgpu_vreg(vgpu, info->stride_reg), GENMASK(9, 0),
+ info->stride_val);
+ set_mask_bits(&vgpu_vreg(vgpu, info->ctrl_reg), GENMASK(12, 10),
+ info->tile_val << 10);
+ } else {
+ set_mask_bits(&vgpu_vreg(vgpu, info->stride_reg), GENMASK(15, 6),
+ info->stride_val << 6);
+ set_mask_bits(&vgpu_vreg(vgpu, info->ctrl_reg), GENMASK(10, 10),
+ info->tile_val << 10);
+ }
vgpu_vreg(vgpu, PIPE_FRMCOUNT_G4X(info->pipe))++;
intel_vgpu_trigger_virtual_event(vgpu, info->event);
pte = readq(addr);
#else
pte = ioread32(addr);
- pte |= ioread32(addr + 4) << 32;
+ pte |= (u64)ioread32(addr + 4) << 32;
#endif
return pte;
}
mfn = intel_gvt_hypervisor_virt_to_mfn(vaddr);
if (mfn == INTEL_GVT_INVALID_ADDR) {
- gvt_err("fail to translate vaddr:0x%llx\n", (u64)vaddr);
+ gvt_err("fail to translate vaddr: 0x%p\n", vaddr);
__free_page(gtt->scratch_page);
gtt->scratch_page = NULL;
return -ENXIO;
*/
int intel_gvt_init_host(void)
{
+ int ret;
+
if (intel_gvt_host.initialized)
return 0;
return -EINVAL;
/* Try to detect if we're running in host instead of VM. */
- if (!intel_gvt_hypervisor_detect_host())
+ ret = intel_gvt_hypervisor_detect_host();
+ if (ret)
return -ENODEV;
gvt_dbg_core("Running with hypervisor %s in host mode\n",
static void init_device_info(struct intel_gvt *gvt)
{
struct intel_gvt_device_info *info = &gvt->device_info;
+ struct pci_dev *pdev = gvt->dev_priv->drm.pdev;
if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) {
info->max_support_vgpus = 8;
info->cfg_space_size = 256;
info->mmio_size = 2 * 1024 * 1024;
info->mmio_bar = 0;
- info->msi_cap_offset = IS_SKYLAKE(gvt->dev_priv) ? 0xac : 0x90;
info->gtt_start_offset = 8 * 1024 * 1024;
info->gtt_entry_size = 8;
info->gtt_entry_size_shift = 3;
info->gmadr_bytes_in_cmd = 8;
info->max_surface_size = 36 * 1024 * 1024;
}
+ info->msi_cap_offset = pdev->msi_cap;
}
static int gvt_service_thread(void *data)
int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa);
int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci);
+int setup_vgpu_mmio(struct intel_vgpu *vgpu);
+void populate_pvinfo_page(struct intel_vgpu *vgpu);
#include "mpt.h"
vgpu->resetting = true;
intel_vgpu_stop_schedule(vgpu);
- if (scheduler->current_vgpu == vgpu) {
+ /*
+ * The current_vgpu will set to NULL after stopping the
+ * scheduler when the reset is triggered by current vgpu.
+ */
+ if (scheduler->current_vgpu == NULL) {
mutex_unlock(&vgpu->gvt->lock);
intel_gvt_wait_vgpu_idle(vgpu);
mutex_lock(&vgpu->gvt->lock);
intel_vgpu_reset_execlist(vgpu, bitmap);
+ /* full GPU reset */
+ if (bitmap == 0xff) {
+ mutex_unlock(&vgpu->gvt->lock);
+ intel_vgpu_clean_gtt(vgpu);
+ mutex_lock(&vgpu->gvt->lock);
+ setup_vgpu_mmio(vgpu);
+ populate_pvinfo_page(vgpu);
+ intel_vgpu_init_gtt(vgpu);
+ }
+
vgpu->resetting = false;
return 0;
u32 data;
u64 bitmap = 0;
+ write_vreg(vgpu, offset, p_data, bytes);
data = vgpu_vreg(vgpu, offset);
if (data & GEN6_GRDOM_FULL) {
int ring_id = render_mmio_to_ring_id(vgpu->gvt, offset);
struct intel_vgpu_execlist *execlist;
u32 data = *(u32 *)p_data;
- int ret;
+ int ret = 0;
if (WARN_ON(ring_id < 0 || ring_id > I915_NUM_ENGINES - 1))
return -EINVAL;
execlist = &vgpu->execlist[ring_id];
execlist->elsp_dwords.data[execlist->elsp_dwords.index] = data;
- if (execlist->elsp_dwords.index == 3)
+ if (execlist->elsp_dwords.index == 3) {
ret = intel_vgpu_submit_execlist(vgpu, ring_id);
+ if(ret)
+ gvt_err("fail submit workload on ring %d\n", ring_id);
+ }
++execlist->elsp_dwords.index;
execlist->elsp_dwords.index &= 0x3;
- return 0;
+ return ret;
}
static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
*/
void intel_gvt_clean_opregion(struct intel_gvt *gvt)
{
- iounmap(gvt->opregion.opregion_va);
+ memunmap(gvt->opregion.opregion_va);
gvt->opregion.opregion_va = NULL;
}
pci_read_config_dword(gvt->dev_priv->drm.pdev, INTEL_GVT_PCI_OPREGION,
&gvt->opregion.opregion_pa);
- gvt->opregion.opregion_va = acpi_os_ioremap(gvt->opregion.opregion_pa,
- INTEL_GVT_OPREGION_SIZE);
+ gvt->opregion.opregion_va = memremap(gvt->opregion.opregion_pa,
+ INTEL_GVT_OPREGION_SIZE, MEMREMAP_WB);
if (!gvt->opregion.opregion_va) {
gvt_err("fail to map host opregion\n");
return -EFAULT;
static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ enum forcewake_domains fw;
i915_reg_t reg;
u32 regs[] = {
[RCS] = 0x4260,
reg = _MMIO(regs[ring_id]);
- I915_WRITE(reg, 0x1);
+ /* WaForceWakeRenderDuringMmioTLBInvalidate:skl
+ * we need to put a forcewake when invalidating RCS TLB caches,
+ * otherwise device can go to RC6 state and interrupt invalidation
+ * process
+ */
+ fw = intel_uncore_forcewake_for_reg(dev_priv, reg,
+ FW_REG_READ | FW_REG_WRITE);
+ if (ring_id == RCS && IS_SKYLAKE(dev_priv))
+ fw |= FORCEWAKE_RENDER;
- if (wait_for_atomic((I915_READ(reg) == 0), 50))
+ intel_uncore_forcewake_get(dev_priv, fw);
+
+ I915_WRITE_FW(reg, 0x1);
+
+ if (wait_for_atomic((I915_READ_FW(reg) == 0), 50))
gvt_err("timeout in invalidate ring (%d) tlb\n", ring_id);
+ intel_uncore_forcewake_put(dev_priv, fw);
+
gvt_dbg_core("invalidate TLB for ring %d\n", ring_id);
}
if (!IS_SKYLAKE(dev_priv))
return;
+ offset.reg = regs[ring_id];
for (i = 0; i < 64; i++) {
gen9_render_mocs[ring_id][i] = I915_READ(offset);
I915_WRITE(offset, vgpu_vreg(vgpu, offset));
if (!IS_SKYLAKE(dev_priv))
return;
+ offset.reg = regs[ring_id];
for (i = 0; i < 64; i++) {
vgpu_vreg(vgpu, offset) = I915_READ(offset);
I915_WRITE(offset, gen9_render_mocs[ring_id][i]);
struct intel_vgpu_workload *workload = NULL;
int ret;
bool need_force_wake = IS_SKYLAKE(gvt->dev_priv);
+ DEFINE_WAIT_FUNC(wait, woken_wake_function);
kfree(p);
gvt_dbg_core("workload thread for ring %d started\n", ring_id);
while (!kthread_should_stop()) {
- ret = wait_event_interruptible(scheduler->waitq[ring_id],
- kthread_should_stop() ||
- (workload = pick_next_workload(gvt, ring_id)));
-
- WARN_ON_ONCE(ret);
-
- if (kthread_should_stop())
+ add_wait_queue(&scheduler->waitq[ring_id], &wait);
+ do {
+ workload = pick_next_workload(gvt, ring_id);
+ if (workload)
+ break;
+ wait_woken(&wait, TASK_INTERRUPTIBLE,
+ MAX_SCHEDULE_TIMEOUT);
+ } while (!kthread_should_stop());
+ remove_wait_queue(&scheduler->waitq[ring_id], &wait);
+
+ if (!workload)
break;
mutex_lock(&scheduler_mutex);
vgpu->mmio.vreg = vgpu->mmio.sreg = NULL;
}
-static int setup_vgpu_mmio(struct intel_vgpu *vgpu)
+int setup_vgpu_mmio(struct intel_vgpu *vgpu)
{
struct intel_gvt *gvt = vgpu->gvt;
const struct intel_gvt_device_info *info = &gvt->device_info;
}
}
-static void populate_pvinfo_page(struct intel_vgpu *vgpu)
+void populate_pvinfo_page(struct intel_vgpu *vgpu)
{
/* setup the ballooning information */
vgpu_vreg64(vgpu, vgtif_reg(magic)) = VGT_MAGIC;