X-Git-Url: https://git.proxmox.com/?a=blobdiff_plain;f=drivers%2Fgpu%2Fdrm%2Fi915%2Fi915_drv.c;h=2d3c426465d3fc50048a306b35197c14b79b42e5;hb=a021880f78e6ab3dd6149c92725b84833002af1b;hp=1c75402a59c1377e7abd410f2dc58dcec0df7094;hpb=a8e28440016bfb23bec266c4c66eacca6ea2d48b;p=mirror_ubuntu-bionic-kernel.git diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 1c75402a59c1..2d3c426465d3 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -43,6 +43,7 @@ #include #include +#include #include #include "i915_drv.h" @@ -316,10 +317,9 @@ static int i915_getparam(struct drm_device *dev, void *data, value = INTEL_INFO(dev_priv)->sseu.min_eu_in_pool; break; case I915_PARAM_HUC_STATUS: - /* The register is already force-woken. We dont need - * any rpm here - */ + intel_runtime_pm_get(dev_priv); value = I915_READ(HUC_STATUS2) & HUC_FW_VERIFIED; + intel_runtime_pm_put(dev_priv); break; case I915_PARAM_MMAP_GTT_VERSION: /* Though we've started our numbering from 1, and so class all @@ -348,6 +348,9 @@ static int i915_getparam(struct drm_device *dev, void *data, case I915_PARAM_HAS_EXEC_HANDLE_LUT: case I915_PARAM_HAS_COHERENT_PHYS_GTT: case I915_PARAM_HAS_EXEC_SOFTPIN: + case I915_PARAM_HAS_EXEC_ASYNC: + case I915_PARAM_HAS_EXEC_FENCE: + case I915_PARAM_HAS_EXEC_CAPTURE: /* For the time being all of these are always true; * if some supported hardware does not have one of these * features this value needs to be provided from @@ -547,6 +550,7 @@ static const struct vga_switcheroo_client_ops i915_switcheroo_ops = { static void i915_gem_fini(struct drm_i915_private *dev_priv) { mutex_lock(&dev_priv->drm.struct_mutex); + intel_uc_fini_hw(dev_priv); i915_gem_cleanup_engines(dev_priv); i915_gem_context_fini(dev_priv); mutex_unlock(&dev_priv->drm.struct_mutex); @@ -565,9 +569,7 @@ static int i915_load_modeset_init(struct drm_device *dev) if (i915_inject_load_failure()) return -ENODEV; - ret = intel_bios_init(dev_priv); - if (ret) - DRM_INFO("failed to find VBIOS tables\n"); + intel_bios_init(dev_priv); /* If we have > 1 VGA cards, then we need to arbitrate access * to the common VGA resources. @@ -605,12 +607,11 @@ static int i915_load_modeset_init(struct drm_device *dev) if (ret) goto cleanup_irq; - intel_huc_init(dev_priv); - intel_guc_init(dev_priv); + intel_uc_init_fw(dev_priv); ret = i915_gem_init(dev_priv); if (ret) - goto cleanup_irq; + goto cleanup_uc; intel_modeset_gem_init(dev); @@ -632,9 +633,9 @@ cleanup_gem: if (i915_gem_suspend(dev_priv)) DRM_ERROR("failed to idle hardware; continuing to unload!\n"); i915_gem_fini(dev_priv); +cleanup_uc: + intel_uc_fini_fw(dev_priv); cleanup_irq: - intel_guc_fini(dev_priv); - intel_huc_fini(dev_priv); drm_irq_uninstall(dev); intel_teardown_gmbus(dev_priv); cleanup_csr: @@ -754,6 +755,15 @@ out_err: return -ENOMEM; } +static void i915_engines_cleanup(struct drm_i915_private *i915) +{ + struct intel_engine_cs *engine; + enum intel_engine_id id; + + for_each_engine(engine, i915, id) + kfree(engine); +} + static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv) { destroy_workqueue(dev_priv->hotplug.dp_wq); @@ -767,10 +777,17 @@ static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv) */ static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv) { - if (IS_HSW_EARLY_SDV(dev_priv) || - IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0)) + bool pre = false; + + pre |= IS_HSW_EARLY_SDV(dev_priv); + pre |= IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0); + pre |= IS_BXT_REVID(dev_priv, 0, BXT_REVID_B_LAST); + + if (pre) { DRM_ERROR("This is a pre-production stepping. " "It may not be fully functional.\n"); + add_taint(TAINT_MACHINE_CHECK, LOCKDEP_STILL_OK); + } } /** @@ -806,9 +823,9 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv, spin_lock_init(&dev_priv->gpu_error.lock); mutex_init(&dev_priv->backlight_lock); spin_lock_init(&dev_priv->uncore.lock); + spin_lock_init(&dev_priv->mm.object_stat_lock); spin_lock_init(&dev_priv->mmio_flip_lock); - spin_lock_init(&dev_priv->wm.dsparb_lock); mutex_init(&dev_priv->sb_lock); mutex_init(&dev_priv->modeset_restore_lock); mutex_init(&dev_priv->av_mutex); @@ -816,12 +833,11 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv, mutex_init(&dev_priv->pps_mutex); intel_uc_init_early(dev_priv); - i915_memcpy_init_early(dev_priv); ret = i915_workqueues_init(dev_priv); if (ret < 0) - return ret; + goto err_engines; /* This must be called before any calls to HAS_PCH_* */ intel_detect_pch(dev_priv); @@ -850,6 +866,8 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv, err_workqueues: i915_workqueues_cleanup(dev_priv); +err_engines: + i915_engines_cleanup(dev_priv); return ret; } @@ -862,6 +880,7 @@ static void i915_driver_cleanup_early(struct drm_i915_private *dev_priv) i915_perf_fini(dev_priv); i915_gem_load_cleanup(dev_priv); i915_workqueues_cleanup(dev_priv); + i915_engines_cleanup(dev_priv); } static int i915_mmio_setup(struct drm_i915_private *dev_priv) @@ -925,13 +944,21 @@ static int i915_driver_init_mmio(struct drm_i915_private *dev_priv) ret = i915_mmio_setup(dev_priv); if (ret < 0) - goto put_bridge; + goto err_bridge; intel_uncore_init(dev_priv); + ret = intel_engines_init_mmio(dev_priv); + if (ret) + goto err_uncore; + + i915_gem_init_mmio(dev_priv); + return 0; -put_bridge: +err_uncore: + intel_uncore_fini(dev_priv); +err_bridge: pci_dev_put(dev_priv->bridge_dev); return ret; @@ -965,7 +992,9 @@ static void intel_sanitize_options(struct drm_i915_private *dev_priv) DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt); i915.semaphores = intel_sanitize_semaphores(dev_priv, i915.semaphores); - DRM_DEBUG_DRIVER("use GPU sempahores? %s\n", yesno(i915.semaphores)); + DRM_DEBUG_DRIVER("use GPU semaphores? %s\n", yesno(i915.semaphores)); + + intel_uc_sanitize_options(dev_priv); } /** @@ -1165,7 +1194,6 @@ static void i915_driver_unregister(struct drm_i915_private *dev_priv) i915_teardown_sysfs(dev_priv); i915_guc_log_unregister(dev_priv); - i915_debugfs_unregister(dev_priv); drm_dev_unregister(&dev_priv->drm); i915_gem_shrinker_cleanup(dev_priv); @@ -1184,11 +1212,14 @@ static void i915_driver_unregister(struct drm_i915_private *dev_priv) */ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent) { + const struct intel_device_info *match_info = + (struct intel_device_info *)ent->driver_data; struct drm_i915_private *dev_priv; int ret; - if (i915.nuclear_pageflip) - driver.driver_features |= DRIVER_ATOMIC; + /* Enable nuclear pageflip on ILK+ */ + if (!i915.nuclear_pageflip && match_info->gen < 5) + driver.driver_features &= ~DRIVER_ATOMIC; ret = -ENOMEM; dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); @@ -1196,8 +1227,7 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent) ret = drm_dev_init(&dev_priv->drm, &driver, &pdev->dev); if (ret) { DRM_DEV_ERROR(&pdev->dev, "allocation failed\n"); - kfree(dev_priv); - return ret; + goto out_free; } dev_priv->drm.pdev = pdev; @@ -1205,7 +1235,7 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent) ret = pci_enable_device(pdev); if (ret) - goto out_free_priv; + goto out_fini; pci_set_drvdata(pdev, &dev_priv->drm); @@ -1269,9 +1299,11 @@ out_runtime_pm_put: i915_driver_cleanup_early(dev_priv); out_pci_disable: pci_disable_device(pdev); -out_free_priv: +out_fini: i915_load_error(dev_priv, "Device initialization failed (%d)\n", ret); - drm_dev_unref(&dev_priv->drm); + drm_dev_fini(&dev_priv->drm); +out_free: + kfree(dev_priv); return ret; } @@ -1287,6 +1319,8 @@ void i915_driver_unload(struct drm_device *dev) intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); + drm_atomic_helper_shutdown(dev); + intel_gvt_cleanup(dev_priv); i915_driver_unregister(dev_priv); @@ -1316,14 +1350,13 @@ void i915_driver_unload(struct drm_device *dev) /* Free error state after interrupts are fully disabled. */ cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work); - i915_destroy_error_state(dev_priv); + i915_reset_error_state(dev_priv); /* Flush any outstanding unpin_work. */ drain_workqueue(dev_priv->wq); - intel_guc_fini(dev_priv); - intel_huc_fini(dev_priv); i915_gem_fini(dev_priv); + intel_uc_fini_fw(dev_priv); intel_fbc_cleanup_cfb(dev_priv); intel_power_domains_fini(dev_priv); @@ -1332,8 +1365,16 @@ void i915_driver_unload(struct drm_device *dev) i915_driver_cleanup_mmio(dev_priv); intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); +} + +static void i915_driver_release(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = to_i915(dev); i915_driver_cleanup_early(dev_priv); + drm_dev_fini(&dev_priv->drm); + + kfree(dev_priv); } static int i915_driver_open(struct drm_device *dev, struct drm_file *file) @@ -1365,17 +1406,14 @@ static void i915_driver_lastclose(struct drm_device *dev) vga_switcheroo_process_delayed_switch(); } -static void i915_driver_preclose(struct drm_device *dev, struct drm_file *file) +static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file) { + struct drm_i915_file_private *file_priv = file->driver_priv; + mutex_lock(&dev->struct_mutex); i915_gem_context_close(dev, file); i915_gem_release(dev, file); mutex_unlock(&dev->struct_mutex); -} - -static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file) -{ - struct drm_i915_file_private *file_priv = file->driver_priv; kfree(file_priv); } @@ -1434,8 +1472,6 @@ static int i915_drm_suspend(struct drm_device *dev) goto out; } - intel_guc_suspend(dev_priv); - intel_display_suspend(dev); intel_dp_mst_suspend(dev); @@ -1454,7 +1490,7 @@ static int i915_drm_suspend(struct drm_device *dev) opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold; intel_opregion_notify_adapter(dev_priv, opregion_target_state); - intel_uncore_forcewake_reset(dev_priv, false); + intel_uncore_suspend(dev_priv); intel_opregion_unregister(dev_priv); intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true); @@ -1699,7 +1735,7 @@ static int i915_drm_resume_early(struct drm_device *dev) DRM_ERROR("Resume prepare failed: %d, continuing anyway\n", ret); - intel_uncore_early_sanitize(dev_priv, true); + intel_uncore_resume_early(dev_priv); if (IS_GEN9_LP(dev_priv)) { if (!dev_priv->suspended_to_idle) @@ -1715,6 +1751,8 @@ static int i915_drm_resume_early(struct drm_device *dev) !(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload)) intel_power_domains_init_hw(dev_priv, true); + i915_gem_sanitize(dev_priv); + enable_rpm_wakeref_asserts(dev_priv); out: @@ -1760,12 +1798,15 @@ void i915_reset(struct drm_i915_private *dev_priv) int ret; lockdep_assert_held(&dev_priv->drm.struct_mutex); + GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, &error->flags)); - if (!test_and_clear_bit(I915_RESET_IN_PROGRESS, &error->flags)) + if (!test_bit(I915_RESET_HANDOFF, &error->flags)) return; /* Clear any previous failed attempts at recovery. Time to try again. */ - __clear_bit(I915_WEDGED, &error->flags); + if (!i915_gem_unset_wedged(dev_priv)) + goto wakeup; + error->reset_count++; pr_notice("drm/i915: Resetting chip after gpu hang\n"); @@ -1811,15 +1852,18 @@ void i915_reset(struct drm_i915_private *dev_priv) i915_queue_hangcheck(dev_priv); -wakeup: +finish: i915_gem_reset_finish(dev_priv); enable_irq(dev_priv->drm.irq); - wake_up_bit(&error->flags, I915_RESET_IN_PROGRESS); + +wakeup: + clear_bit(I915_RESET_HANDOFF, &error->flags); + wake_up_bit(&error->flags, I915_RESET_HANDOFF); return; error: i915_gem_set_wedged(dev_priv); - goto wakeup; + goto finish; } static int i915_pm_suspend(struct device *kdev) @@ -2134,6 +2178,20 @@ static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv) I915_WRITE(VLV_GUNIT_CLOCK_GATE2, s->clock_gate_dis2); } +static int vlv_wait_for_pw_status(struct drm_i915_private *dev_priv, + u32 mask, u32 val) +{ + /* The HW does not like us polling for PW_STATUS frequently, so + * use the sleeping loop rather than risk the busy spin within + * intel_wait_for_register(). + * + * Transitioning between RC6 states should be at most 2ms (see + * valleyview_enable_rps) so use a 3ms timeout. + */ + return wait_for((I915_READ_NOTRACE(VLV_GTLC_PW_STATUS) & mask) == val, + 3); +} + int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on) { u32 val; @@ -2162,8 +2220,9 @@ int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on) static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow) { + u32 mask; u32 val; - int err = 0; + int err; val = I915_READ(VLV_GTLC_WAKE_CTRL); val &= ~VLV_GTLC_ALLOWWAKEREQ; @@ -2172,45 +2231,32 @@ static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow) I915_WRITE(VLV_GTLC_WAKE_CTRL, val); POSTING_READ(VLV_GTLC_WAKE_CTRL); - err = intel_wait_for_register(dev_priv, - VLV_GTLC_PW_STATUS, - VLV_GTLC_ALLOWWAKEACK, - allow, - 1); + mask = VLV_GTLC_ALLOWWAKEACK; + val = allow ? mask : 0; + + err = vlv_wait_for_pw_status(dev_priv, mask, val); if (err) DRM_ERROR("timeout disabling GT waking\n"); return err; } -static int vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv, - bool wait_for_on) +static void vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv, + bool wait_for_on) { u32 mask; u32 val; - int err; mask = VLV_GTLC_PW_MEDIA_STATUS_MASK | VLV_GTLC_PW_RENDER_STATUS_MASK; val = wait_for_on ? mask : 0; - if ((I915_READ(VLV_GTLC_PW_STATUS) & mask) == val) - return 0; - - DRM_DEBUG_KMS("waiting for GT wells to go %s (%08x)\n", - onoff(wait_for_on), - I915_READ(VLV_GTLC_PW_STATUS)); /* * RC6 transitioning can be delayed up to 2 msec (see * valleyview_enable_rps), use 3 msec for safety. */ - err = intel_wait_for_register(dev_priv, - VLV_GTLC_PW_STATUS, mask, val, - 3); - if (err) + if (vlv_wait_for_pw_status(dev_priv, mask, val)) DRM_ERROR("timeout waiting for GT wells to go %s\n", onoff(wait_for_on)); - - return err; } static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv) @@ -2231,7 +2277,7 @@ static int vlv_suspend_complete(struct drm_i915_private *dev_priv) * Bspec defines the following GT well on flags as debug only, so * don't treat them as hard failures. */ - (void)vlv_wait_for_gt_wells(dev_priv, false); + vlv_wait_for_gt_wells(dev_priv, false); mask = VLV_GTLC_RENDER_CTX_EXISTS | VLV_GTLC_MEDIA_CTX_EXISTS; WARN_ON((I915_READ(VLV_GTLC_WAKE_CTRL) & mask) != mask); @@ -2342,7 +2388,7 @@ static int intel_runtime_suspend(struct device *kdev) return ret; } - intel_uncore_forcewake_reset(dev_priv, false); + intel_uncore_suspend(dev_priv); enable_rpm_wakeref_asserts(dev_priv); WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count)); @@ -2532,7 +2578,7 @@ static const struct drm_ioctl_desc i915_ioctls[] = { DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH), - DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2_WR, i915_gem_execbuffer2, DRM_AUTH|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), @@ -2574,10 +2620,10 @@ static struct drm_driver driver = { */ .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME | - DRIVER_RENDER | DRIVER_MODESET, + DRIVER_RENDER | DRIVER_MODESET | DRIVER_ATOMIC, + .release = i915_driver_release, .open = i915_driver_open, .lastclose = i915_driver_lastclose, - .preclose = i915_driver_preclose, .postclose = i915_driver_postclose, .set_busid = drm_pci_set_busid, @@ -2603,3 +2649,7 @@ static struct drm_driver driver = { .minor = DRIVER_MINOR, .patchlevel = DRIVER_PATCHLEVEL, }; + +#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) +#include "selftests/mock_drm.c" +#endif