pdp_pair[i].val = pdp[7 - i];
}
+static void update_shadow_pdps(struct intel_vgpu_workload *workload)
+{
+ struct intel_vgpu *vgpu = workload->vgpu;
+ int ring_id = workload->ring_id;
+ struct i915_gem_context *shadow_ctx = vgpu->submission.shadow_ctx;
+ struct drm_i915_gem_object *ctx_obj =
+ shadow_ctx->engine[ring_id].state->obj;
+ struct execlist_ring_context *shadow_ring_context;
+ struct page *page;
+
+ if (WARN_ON(!workload->shadow_mm))
+ return;
+
+ if (WARN_ON(!atomic_read(&workload->shadow_mm->pincount)))
+ return;
+
+ page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
+ shadow_ring_context = kmap(page);
+ set_context_pdp_root_pointer(shadow_ring_context,
+ (void *)workload->shadow_mm->ppgtt_mm.shadow_pdps);
+ kunmap(page);
+}
+
+ /*
+ * when populating shadow ctx from guest, we should not overrride oa related
+ * registers, so that they will not be overlapped by guest oa configs. Thus
+ * made it possible to capture oa data from host for both host and guests.
+ */
+ static void sr_oa_regs(struct intel_vgpu_workload *workload,
+ u32 *reg_state, bool save)
+ {
+ struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
+ u32 ctx_oactxctrl = dev_priv->perf.oa.ctx_oactxctrl_offset;
+ u32 ctx_flexeu0 = dev_priv->perf.oa.ctx_flexeu0_offset;
+ int i = 0;
+ u32 flex_mmio[] = {
+ i915_mmio_reg_offset(EU_PERF_CNTL0),
+ i915_mmio_reg_offset(EU_PERF_CNTL1),
+ i915_mmio_reg_offset(EU_PERF_CNTL2),
+ i915_mmio_reg_offset(EU_PERF_CNTL3),
+ i915_mmio_reg_offset(EU_PERF_CNTL4),
+ i915_mmio_reg_offset(EU_PERF_CNTL5),
+ i915_mmio_reg_offset(EU_PERF_CNTL6),
+ };
+
+ if (!workload || !reg_state || workload->ring_id != RCS)
+ return;
+
+ if (save) {
+ workload->oactxctrl = reg_state[ctx_oactxctrl + 1];
+
+ for (i = 0; i < ARRAY_SIZE(workload->flex_mmio); i++) {
+ u32 state_offset = ctx_flexeu0 + i * 2;
+
+ workload->flex_mmio[i] = reg_state[state_offset + 1];
+ }
+ } else {
+ reg_state[ctx_oactxctrl] =
+ i915_mmio_reg_offset(GEN8_OACTXCONTROL);
+ reg_state[ctx_oactxctrl + 1] = workload->oactxctrl;
+
+ for (i = 0; i < ARRAY_SIZE(workload->flex_mmio); i++) {
+ u32 state_offset = ctx_flexeu0 + i * 2;
+ u32 mmio = flex_mmio[i];
+
+ reg_state[state_offset] = mmio;
+ reg_state[state_offset + 1] = workload->flex_mmio[i];
+ }
+ }
+ }
+
static int populate_shadow_context(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
};
static const struct sun4i_tcon_quirks sun8i_a83t_lcd_quirks = {
+ .supports_lvds = true,
+ .has_channel_0 = true,
+};
+
+static const struct sun4i_tcon_quirks sun8i_a83t_tv_quirks = {
+ .has_channel_1 = true,
};
static const struct sun4i_tcon_quirks sun8i_v3s_quirks = {
bool has_channel_1; /* a33 does not have channel 1 */
bool has_lvds_alt; /* Does the LVDS clock have a parent other than the TCON clock? */
bool needs_de_be_mux; /* sun6i needs mux to select backend */
+ bool needs_edp_reset; /* a80 edp reset needed for tcon0 access */
+ bool supports_lvds; /* Does the TCON support an LVDS output? */
/* callback to handle tcon muxing options */
int (*set_mux)(struct sun4i_tcon *, const struct drm_encoder *);
int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv);
+int vmw_kms_suspend(struct drm_device *dev);
+int vmw_kms_resume(struct drm_device *dev);
+ void vmw_kms_lost_device(struct drm_device *dev);
int vmw_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
if (res->backup) {
ret = vmw_kms_helper_buffer_prepare(res->dev_priv, res->backup,
interruptible,
- res->dev_priv->has_mob);
+ res->dev_priv->has_mob,
+ false);
if (ret)
goto out_unreserve;
+
+ ctx->buf = vmw_dmabuf_reference(res->backup);
}
ret = vmw_resource_validate(res);
if (ret)
}
+/**
+ * vmw_kms_suspend - Save modesetting state and turn modesetting off.
+ *
+ * @dev: Pointer to the drm device
+ * Return: 0 on success. Negative error code on failure.
+ */
+int vmw_kms_suspend(struct drm_device *dev)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+
+ dev_priv->suspend_state = drm_atomic_helper_suspend(dev);
+ if (IS_ERR(dev_priv->suspend_state)) {
+ int ret = PTR_ERR(dev_priv->suspend_state);
+
+ DRM_ERROR("Failed kms suspend: %d\n", ret);
+ dev_priv->suspend_state = NULL;
+
+ return ret;
+ }
+
+ return 0;
+}
+
+
+/**
+ * vmw_kms_resume - Re-enable modesetting and restore state
+ *
+ * @dev: Pointer to the drm device
+ * Return: 0 on success. Negative error code on failure.
+ *
+ * State is resumed from a previous vmw_kms_suspend(). It's illegal
+ * to call this function without a previous vmw_kms_suspend().
+ */
+int vmw_kms_resume(struct drm_device *dev)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ int ret;
+
+ if (WARN_ON(!dev_priv->suspend_state))
+ return 0;
+
+ ret = drm_atomic_helper_resume(dev, dev_priv->suspend_state);
+ dev_priv->suspend_state = NULL;
+
+ return ret;
+}
++
+ /**
+ * vmw_kms_lost_device - Notify kms that modesetting capabilities will be lost
+ *
+ * @dev: Pointer to the drm device
+ */
+ void vmw_kms_lost_device(struct drm_device *dev)
+ {
+ drm_atomic_helper_shutdown(dev);
+ }
struct hda_intel *hda = container_of(chip, struct hda_intel, chip);
struct hdac_bus *bus = azx_bus(chip);
struct pci_dev *pci = chip->pci;
+ struct hda_codec *codec;
int dev = chip->dev_index;
+ int val;
int err;
hda->probe_continued = 1;
chip->running = 1;
azx_add_card_list(chip);
- snd_hda_set_power_save(&chip->bus, power_save * 1000);
+ val = power_save;
+ #ifdef CONFIG_PM
+ if (pm_blacklist) {
+ const struct snd_pci_quirk *q;
+
+ q = snd_pci_quirk_lookup(chip->pci, power_save_blacklist);
+ if (q && val) {
+ dev_info(chip->card->dev, "device %04x:%04x is on the power_save blacklist, forcing power_save to 0\n",
+ q->subvendor, q->subdevice);
+ val = 0;
+ }
+ }
+ #endif /* CONFIG_PM */
+ /*
+ * The discrete GPU cannot power down unless the HDA controller runtime
+ * suspends, so activate runtime PM on codecs even if power_save == 0.
+ */
+ if (use_vga_switcheroo(hda))
+ list_for_each_codec(codec, &chip->bus)
+ codec->auto_runtime_pm = 1;
+
- if (azx_has_pm_runtime(chip) || hda->use_vga_switcheroo)
+ snd_hda_set_power_save(&chip->bus, val * 1000);
+ if (azx_has_pm_runtime(chip))
pm_runtime_put_autosuspend(&pci->dev);
out_free: