]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/commitdiff
Merge tag 'drm-misc-next-2019-10-09-2' of git://anongit.freedesktop.org/drm/drm-misc...
authorDave Airlie <airlied@redhat.com>
Thu, 10 Oct 2019 23:30:52 +0000 (09:30 +1000)
committerDave Airlie <airlied@redhat.com>
Thu, 10 Oct 2019 23:30:53 +0000 (09:30 +1000)
drm-misc-next for 5.5:

UAPI Changes:
-Colorspace: Expose different prop values for DP vs. HDMI (Gwan-gyeong Mun)
-fourcc: Add DRM_FORMAT_MOD_ARM_16X16_BLOCK_U_INTERLEAVED (Raymond)
-not_actually: s/ENOTSUPP/EOPNOTSUPP/ in drm_edid and drm_mipi_dbi. This should
    not reach userspace, but adding here to specifically call that out (Daniel)
-i810: Prevent underflow in dispatch ioctls (Dan)
-komeda: Add ACLK sysfs attribute (Mihail)
-v3d: Allow userspace to clean up after render jobs (Iago)

Cross-subsystem Changes:
-MAINTAINERS:
 -Add Alyssa & Steven as panfrost reviewers (Rob)
 -Add Jernej as DE2 reviewer (Maxime)
 -Add Chen-Yu as Allwinner maintainer (Maxime)
-staging: Make some stack arrays static const (Colin)

Core Changes:
-ttm: Allow drivers to specify their vma manager (to use gem mgr) (Gerd)
-docs: Various fixes in connector/encoder/bridge docs (Daniel, Lyude, Laurent)
-connector: Allow more than 3 possible encoders for a connector (José)
-dp_cec: Allow a connector to be associated with a cec device (Dariusz)
-various: Fix some compile/sparse warnings (Ville)
-mm: Ensure mm node removals are properly serialised (Chris)
-panel: Specify the type of panel for drm_panels for later use (Laurent)
-panel: Use drm_panel_init to init device and funcs (Laurent)
-mst: Refactors and cleanups in anticipation of suspend/resume support (Lyude)
-vram:
 -Add lazy unmapping for gem bo's (Thomas)
 -Unify and rationalize vram mm and gem vram (Thomas)
 -Expose vmap and vunmap for gem vram objects (Thomas)
 -Allow objects to be pinned at the top of vram to avoid fragmentation (Thomas)

Driver Changes:
-various: Include drm_bridge.h instead of relying on drm_crtc.h (Boris)
-ast/mgag200: Refactor show_cursor(), move cursor to top of video mem (Thomas)
-komeda:
 -Add error event printing (behind CONFIG) and reg dump support (Lowry)
 -Add suspend/resume support (Lowry)
 -Workaround D71 shadow registers not flushing on disable (Lowry)
-meson: Add suspend/resume support (Neil)
-omap: Miscellaneous refactors and improvements (Tomi/Jyri)
-panfrost/shmem: Silence lockdep by using mutex_trylock (Rob)
-panfrost: Miscellaneous small fixes (Rob/Steven)
-sti: Fix warnings (Benjamin/Linus)
-sun4i:
 -Add vcc-dsi regulator to sun6i_mipi_dsi (Jagan)
 -A few patches to figure out the DRQ/start delay calc on dsi (Jagan/Icenowy)
-virtio:
 -Add module param to switch resource reuse workaround on/off (Gerd)
 -Avoid calling vmexit while holding spinlock (Gerd)
 -Use gem shmem helpers instead of ttm (Gerd)
 -Accommodate command buffer allocations too big for cma (David)

Cc: Rob Herring <robh@kernel.org>
Cc: Maxime Ripard <mripard@kernel.org>
Cc: Gwan-gyeong Mun <gwan-gyeong.mun@intel.com>
Cc: Gerd Hoffmann <kraxel@redhat.com>
Cc: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
Cc: Lyude Paul <lyude@redhat.com>
Cc: José Roberto de Souza <jose.souza@intel.com>
Cc: Dariusz Marcinkiewicz <darekm@google.com>
Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
Cc: Raymond Smith <raymond.smith@arm.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Colin Ian King <colin.king@canonical.com>
Cc: Thomas Zimmermann <tzimmermann@suse.de>
Cc: Dan Carpenter <dan.carpenter@oracle.com>
Cc: Mihail Atanassov <Mihail.Atanassov@arm.com>
Cc: Lowry Li <Lowry.Li@arm.com>
Cc: Neil Armstrong <narmstrong@baylibre.com>
Cc: Jyri Sarha <jsarha@ti.com>
Cc: Tomi Valkeinen <tomi.valkeinen@ti.com>
Cc: Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com>
Cc: Steven Price <steven.price@arm.com>
Cc: Benjamin Gaignard <benjamin.gaignard@st.com>
Cc: Linus Walleij <linus.walleij@linaro.org>
Cc: Jagan Teki <jagan@amarulasolutions.com>
Cc: Icenowy Zheng <icenowy@aosc.io>
Cc: Iago Toral Quiroga <itoral@igalia.com>
Cc: David Riley <davidriley@chromium.org>
Signed-off-by: Dave Airlie <airlied@redhat.com>
# gpg: Signature made Thu 10 Oct 2019 01:00:47 AM AEST
# gpg:                using RSA key 732C002572DCAF79
# gpg: Can't check signature: public key not found

# Conflicts:
# drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
# drivers/gpu/drm/i915/i915_drv.c
# drivers/gpu/drm/i915/i915_gem.c
# drivers/gpu/drm/i915/i915_gem_gtt.c
# drivers/gpu/drm/i915/i915_vma.c
From: Sean Paul <sean@poorly.run>
Link: https://patchwork.freedesktop.org/patch/msgid/20191009150825.GA227673@art_vandelay
1  2 
MAINTAINERS
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/i915/display/intel_dp.c
drivers/gpu/drm/i915/display/intel_hdmi.c
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/omapdrm/dss/dss.c

diff --combined MAINTAINERS
index 55199ef7fa744cd7f8bd3cef1bb88c76880542b0,94fb077c0817715e2891ee91f365cde48416e44a..07ad2f9d2d200815f29be043ca176cfb22175001
@@@ -1272,6 -1272,8 +1272,8 @@@ F:      Documentation/gpu/afbc.rs
  ARM MALI PANFROST DRM DRIVER
  M:    Rob Herring <robh@kernel.org>
  M:    Tomeu Vizoso <tomeu.vizoso@collabora.com>
+ R:    Steven Price <steven.price@arm.com>
+ R:    Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com>
  L:    dri-devel@lists.freedesktop.org
  S:    Supported
  T:    git git://anongit.freedesktop.org/drm/drm-misc
@@@ -5376,12 -5378,22 +5378,22 @@@ F:   include/linux/vga
  
  DRM DRIVERS FOR ALLWINNER A10
  M:    Maxime Ripard <mripard@kernel.org>
+ M:    Chen-Yu Tsai <wens@csie.org>
  L:    dri-devel@lists.freedesktop.org
  S:    Supported
  F:    drivers/gpu/drm/sun4i/
  F:    Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt
  T:    git git://anongit.freedesktop.org/drm/drm-misc
  
+ DRM DRIVER FOR ALLWINNER DE2 AND DE3 ENGINE
+ M:    Maxime Ripard <mripard@kernel.org>
+ M:    Chen-Yu Tsai <wens@csie.org>
+ R:    Jernej Skrabec <jernej.skrabec@siol.net>
+ L:    dri-devel@lists.freedesktop.org
+ S:    Supported
+ F:    drivers/gpu/drm/sun4i/sun8i*
+ T:    git git://anongit.freedesktop.org/drm/drm-misc
  DRM DRIVERS FOR AMLOGIC SOCS
  M:    Neil Armstrong <narmstrong@baylibre.com>
  L:    dri-devel@lists.freedesktop.org
@@@ -6112,10 -6124,7 +6124,10 @@@ M:    Gao Xiang <gaoxiang25@huawei.com
  M:    Chao Yu <yuchao0@huawei.com>
  L:    linux-erofs@lists.ozlabs.org
  S:    Maintained
 +T:    git git://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs.git
 +F:    Documentation/filesystems/erofs.txt
  F:    fs/erofs/
 +F:    include/trace/events/erofs.h
  
  ERRSEQ ERROR TRACKING INFRASTRUCTURE
  M:    Jeff Layton <jlayton@kernel.org>
@@@ -9078,7 -9087,6 +9090,7 @@@ F:      security/keys
  KGDB / KDB /debug_core
  M:    Jason Wessel <jason.wessel@windriver.com>
  M:    Daniel Thompson <daniel.thompson@linaro.org>
 +R:    Douglas Anderson <dianders@chromium.org>
  W:    http://kgdb.wiki.kernel.org/
  L:    kgdb-bugreport@lists.sourceforge.net
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/jwessel/kgdb.git
index 6f8aaf655a9fc36e3ee3022f1c9c861091eb0425,3fae1007143e398b5dd1261be472741ffe20f957..baf32484b8208ecad80b7c30c92a7c627d6966e4
   * - 3.32.0 - Add syncobj timeline support to AMDGPU_CS.
   * - 3.33.0 - Fixes for GDS ENOMEM failures in AMDGPU_CS.
   * - 3.34.0 - Non-DC can flip correctly between buffers with different pitches
 + * - 3.35.0 - Add drm_amdgpu_info_device::tcc_disabled_mask
   */
  #define KMS_DRIVER_MAJOR      3
 -#define KMS_DRIVER_MINOR      34
 +#define KMS_DRIVER_MINOR      35
  #define KMS_DRIVER_PATCHLEVEL 0
  
  #define AMDGPU_MAX_TIMEOUT_PARAM_LENTH        256
@@@ -1049,7 -1048,7 +1049,7 @@@ static int amdgpu_pci_probe(struct pci_
        }
  
        /* Get rid of things like offb */
-       ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, 0, "amdgpudrmfb");
+       ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "amdgpudrmfb");
        if (ret)
                return ret;
  
index a52f0b13a2c8a1e6ea8f6682fffc74a2bb114319,f4e0f27a76de4ebc970849b620f47c7069eb2ea7..c67d3c41db1985a0ab1b593a05a0d178cc283389
@@@ -2385,6 -2385,8 +2385,6 @@@ static int amdgpu_dm_initialize_drm_dev
  
        if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
                dm->dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
 -      if (adev->asic_type == CHIP_RENOIR)
 -              dm->dc->debug.disable_stutter = true;
  
        return 0;
  fail:
@@@ -4837,7 -4839,13 +4837,13 @@@ static int to_drm_connector_type(enum s
  
  static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
  {
-       return drm_encoder_find(connector->dev, NULL, connector->encoder_ids[0]);
+       struct drm_encoder *encoder;
+       /* There is only one encoder per connector */
+       drm_connector_for_each_possible_encoder(connector, encoder)
+               return encoder;
+       return NULL;
  }
  
  static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
@@@ -6017,9 -6025,7 +6023,9 @@@ static void amdgpu_dm_enable_crtc_inter
        struct drm_crtc *crtc;
        struct drm_crtc_state *old_crtc_state, *new_crtc_state;
        int i;
 +#ifdef CONFIG_DEBUG_FS
        enum amdgpu_dm_pipe_crc_source source;
 +#endif
  
        for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
                                      new_crtc_state, i) {
index 38aa09cfbed3e14619096d29d228b00e8e598b83,2950e9308a4f77239dae51096a4c37772f15e3fd..0e45c61d733143b83158d1ad5e44642c1280d737
  
  #define DP_DPRX_ESI_LEN 14
  
 -/* DP DSC small joiner has 2 FIFOs each of 640 x 6 bytes */
 -#define DP_DSC_MAX_SMALL_JOINER_RAM_BUFFER    61440
 -#define DP_DSC_MIN_SUPPORTED_BPC              8
 -#define DP_DSC_MAX_SUPPORTED_BPC              10
 -
  /* DP DSC throughput values used for slice count calculations KPixels/s */
  #define DP_DSC_PEAK_PIXEL_RATE                        2720000
  #define DP_DSC_MAX_ENC_THROUGHPUT_0           340000
  #define DP_DSC_MAX_ENC_THROUGHPUT_1           400000
  
 -/* DP DSC FEC Overhead factor = (100 - 2.4)/100 */
 -#define DP_DSC_FEC_OVERHEAD_FACTOR            976
 +/* DP DSC FEC Overhead factor = 1/(0.972261) */
 +#define DP_DSC_FEC_OVERHEAD_FACTOR            972261
  
  /* Compliance test status bits  */
  #define INTEL_DP_RESOLUTION_SHIFT_MASK        0
@@@ -489,108 -494,6 +489,108 @@@ int intel_dp_get_link_train_fallback_va
        return 0;
  }
  
 +u32 intel_dp_mode_to_fec_clock(u32 mode_clock)
 +{
 +      return div_u64(mul_u32_u32(mode_clock, 1000000U),
 +                     DP_DSC_FEC_OVERHEAD_FACTOR);
 +}
 +
 +static int
 +small_joiner_ram_size_bits(struct drm_i915_private *i915)
 +{
 +      if (INTEL_GEN(i915) >= 11)
 +              return 7680 * 8;
 +      else
 +              return 6144 * 8;
 +}
 +
 +static u16 intel_dp_dsc_get_output_bpp(struct drm_i915_private *i915,
 +                                     u32 link_clock, u32 lane_count,
 +                                     u32 mode_clock, u32 mode_hdisplay)
 +{
 +      u32 bits_per_pixel, max_bpp_small_joiner_ram;
 +      int i;
 +
 +      /*
 +       * Available Link Bandwidth(Kbits/sec) = (NumberOfLanes)*
 +       * (LinkSymbolClock)* 8 * (TimeSlotsPerMTP)
 +       * for SST -> TimeSlotsPerMTP is 1,
 +       * for MST -> TimeSlotsPerMTP has to be calculated
 +       */
 +      bits_per_pixel = (link_clock * lane_count * 8) /
 +                       intel_dp_mode_to_fec_clock(mode_clock);
 +      DRM_DEBUG_KMS("Max link bpp: %u\n", bits_per_pixel);
 +
 +      /* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */
 +      max_bpp_small_joiner_ram = small_joiner_ram_size_bits(i915) /
 +              mode_hdisplay;
 +      DRM_DEBUG_KMS("Max small joiner bpp: %u\n", max_bpp_small_joiner_ram);
 +
 +      /*
 +       * Greatest allowed DSC BPP = MIN (output BPP from available Link BW
 +       * check, output bpp from small joiner RAM check)
 +       */
 +      bits_per_pixel = min(bits_per_pixel, max_bpp_small_joiner_ram);
 +
 +      /* Error out if the max bpp is less than smallest allowed valid bpp */
 +      if (bits_per_pixel < valid_dsc_bpp[0]) {
 +              DRM_DEBUG_KMS("Unsupported BPP %u, min %u\n",
 +                            bits_per_pixel, valid_dsc_bpp[0]);
 +              return 0;
 +      }
 +
 +      /* Find the nearest match in the array of known BPPs from VESA */
 +      for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp) - 1; i++) {
 +              if (bits_per_pixel < valid_dsc_bpp[i + 1])
 +                      break;
 +      }
 +      bits_per_pixel = valid_dsc_bpp[i];
 +
 +      /*
 +       * Compressed BPP in U6.4 format so multiply by 16, for Gen 11,
 +       * fractional part is 0
 +       */
 +      return bits_per_pixel << 4;
 +}
 +
 +static u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp,
 +                                     int mode_clock, int mode_hdisplay)
 +{
 +      u8 min_slice_count, i;
 +      int max_slice_width;
 +
 +      if (mode_clock <= DP_DSC_PEAK_PIXEL_RATE)
 +              min_slice_count = DIV_ROUND_UP(mode_clock,
 +                                             DP_DSC_MAX_ENC_THROUGHPUT_0);
 +      else
 +              min_slice_count = DIV_ROUND_UP(mode_clock,
 +                                             DP_DSC_MAX_ENC_THROUGHPUT_1);
 +
 +      max_slice_width = drm_dp_dsc_sink_max_slice_width(intel_dp->dsc_dpcd);
 +      if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) {
 +              DRM_DEBUG_KMS("Unsupported slice width %d by DP DSC Sink device\n",
 +                            max_slice_width);
 +              return 0;
 +      }
 +      /* Also take into account max slice width */
 +      min_slice_count = min_t(u8, min_slice_count,
 +                              DIV_ROUND_UP(mode_hdisplay,
 +                                           max_slice_width));
 +
 +      /* Find the closest match to the valid slice count values */
 +      for (i = 0; i < ARRAY_SIZE(valid_dsc_slicecount); i++) {
 +              if (valid_dsc_slicecount[i] >
 +                  drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
 +                                                  false))
 +                      break;
 +              if (min_slice_count  <= valid_dsc_slicecount[i])
 +                      return valid_dsc_slicecount[i];
 +      }
 +
 +      DRM_DEBUG_KMS("Unsupported Slice Count %d\n", min_slice_count);
 +      return 0;
 +}
 +
  static enum drm_mode_status
  intel_dp_mode_valid(struct drm_connector *connector,
                    struct drm_display_mode *mode)
                                                                true);
                } else if (drm_dp_sink_supports_fec(intel_dp->fec_capable)) {
                        dsc_max_output_bpp =
 -                              intel_dp_dsc_get_output_bpp(max_link_clock,
 +                              intel_dp_dsc_get_output_bpp(dev_priv,
 +                                                          max_link_clock,
                                                            max_lanes,
                                                            target_clock,
                                                            mode->hdisplay) >> 4;
        if (mode->flags & DRM_MODE_FLAG_DBLCLK)
                return MODE_H_ILLEGAL;
  
 -      return MODE_OK;
 +      return intel_mode_valid_max_plane_size(dev_priv, mode);
  }
  
  u32 intel_dp_pack_aux(const u8 *src, int src_bytes)
@@@ -739,14 -641,12 +739,14 @@@ vlv_power_sequencer_kick(struct intel_d
        u32 DP;
  
        if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
 -               "skipping pipe %c power sequencer kick due to port %c being active\n",
 -               pipe_name(pipe), port_name(intel_dig_port->base.port)))
 +               "skipping pipe %c power sequencer kick due to [ENCODER:%d:%s] being active\n",
 +               pipe_name(pipe), intel_dig_port->base.base.base.id,
 +               intel_dig_port->base.base.name))
                return;
  
 -      DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
 -                    pipe_name(pipe), port_name(intel_dig_port->base.port));
 +      DRM_DEBUG_KMS("kicking pipe %c power sequencer for [ENCODER:%d:%s]\n",
 +                    pipe_name(pipe), intel_dig_port->base.base.base.id,
 +                    intel_dig_port->base.base.name);
  
        /* Preserve the BIOS-computed detected bit. This is
         * supposed to be read-only.
@@@ -864,10 -764,9 +864,10 @@@ vlv_power_sequencer_pipe(struct intel_d
        vlv_steal_power_sequencer(dev_priv, pipe);
        intel_dp->pps_pipe = pipe;
  
 -      DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
 +      DRM_DEBUG_KMS("picked pipe %c power sequencer for [ENCODER:%d:%s]\n",
                      pipe_name(intel_dp->pps_pipe),
 -                    port_name(intel_dig_port->base.port));
 +                    intel_dig_port->base.base.base.id,
 +                    intel_dig_port->base.base.name);
  
        /* init power sequencer on this pipe and port */
        intel_dp_init_panel_power_sequencer(intel_dp);
@@@ -975,16 -874,13 +975,16 @@@ vlv_initial_power_sequencer_setup(struc
  
        /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
        if (intel_dp->pps_pipe == INVALID_PIPE) {
 -              DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
 -                            port_name(port));
 +              DRM_DEBUG_KMS("no initial power sequencer for [ENCODER:%d:%s]\n",
 +                            intel_dig_port->base.base.base.id,
 +                            intel_dig_port->base.base.name);
                return;
        }
  
 -      DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
 -                    port_name(port), pipe_name(intel_dp->pps_pipe));
 +      DRM_DEBUG_KMS("initial power sequencer for [ENCODER:%d:%s]: pipe %c\n",
 +                    intel_dig_port->base.base.base.id,
 +                    intel_dig_port->base.base.name,
 +                    pipe_name(intel_dp->pps_pipe));
  
        intel_dp_init_panel_power_sequencer(intel_dp);
        intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
@@@ -1347,12 -1243,13 +1347,12 @@@ intel_dp_aux_xfer(struct intel_dp *inte
        trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true);
  
        if (try == 3) {
 -              static u32 last_status = -1;
                const u32 status = intel_uncore_read(uncore, ch_ctl);
  
 -              if (status != last_status) {
 +              if (status != intel_dp->aux_busy_last_status) {
                        WARN(1, "dp_aux_ch not started status 0x%08x\n",
                             status);
 -                      last_status = status;
 +                      intel_dp->aux_busy_last_status = status;
                }
  
                ret = -EBUSY;
@@@ -1842,14 -1739,8 +1842,14 @@@ static bool intel_dp_source_supports_fe
  {
        struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
  
 -      return INTEL_GEN(dev_priv) >= 11 &&
 -              pipe_config->cpu_transcoder != TRANSCODER_A;
 +      /* On TGL, FEC is supported on all Pipes */
 +      if (INTEL_GEN(dev_priv) >= 12)
 +              return true;
 +
 +      if (IS_GEN(dev_priv, 11) && pipe_config->cpu_transcoder != TRANSCODER_A)
 +              return true;
 +
 +      return false;
  }
  
  static bool intel_dp_supports_fec(struct intel_dp *intel_dp,
@@@ -1864,15 -1755,8 +1864,15 @@@ static bool intel_dp_source_supports_ds
  {
        struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
  
 -      return INTEL_GEN(dev_priv) >= 10 &&
 -              pipe_config->cpu_transcoder != TRANSCODER_A;
 +      /* On TGL, DSC is supported on all Pipes */
 +      if (INTEL_GEN(dev_priv) >= 12)
 +              return true;
 +
 +      if (INTEL_GEN(dev_priv) >= 10 &&
 +          pipe_config->cpu_transcoder != TRANSCODER_A)
 +              return true;
 +
 +      return false;
  }
  
  static bool intel_dp_supports_dsc(struct intel_dp *intel_dp,
@@@ -2031,17 -1915,11 +2031,17 @@@ static int intel_dp_dsc_compute_config(
        if (!intel_dp_supports_dsc(intel_dp, pipe_config))
                return -EINVAL;
  
 -      dsc_max_bpc = min_t(u8, DP_DSC_MAX_SUPPORTED_BPC,
 -                          conn_state->max_requested_bpc);
 +      /* Max DSC Input BPC for ICL is 10 and for TGL+ is 12 */
 +      if (INTEL_GEN(dev_priv) >= 12)
 +              dsc_max_bpc = min_t(u8, 12, conn_state->max_requested_bpc);
 +      else
 +              dsc_max_bpc = min_t(u8, 10,
 +                                  conn_state->max_requested_bpc);
  
        pipe_bpp = intel_dp_dsc_compute_bpp(intel_dp, dsc_max_bpc);
 -      if (pipe_bpp < DP_DSC_MIN_SUPPORTED_BPC * 3) {
 +
 +      /* Min Input BPC for ICL+ is 8 */
 +      if (pipe_bpp < 8 * 3) {
                DRM_DEBUG_KMS("No DSC support for less than 8bpc\n");
                return -EINVAL;
        }
                u8 dsc_dp_slice_count;
  
                dsc_max_output_bpp =
 -                      intel_dp_dsc_get_output_bpp(pipe_config->port_clock,
 +                      intel_dp_dsc_get_output_bpp(dev_priv,
 +                                                  pipe_config->port_clock,
                                                    pipe_config->lane_count,
                                                    adjusted_mode->crtc_clock,
                                                    adjusted_mode->crtc_hdisplay);
@@@ -2250,16 -2127,6 +2250,16 @@@ bool intel_dp_limited_color_range(cons
        const struct drm_display_mode *adjusted_mode =
                &crtc_state->base.adjusted_mode;
  
 +      /*
 +       * Our YCbCr output is always limited range.
 +       * crtc_state->limited_color_range only applies to RGB,
 +       * and it must never be set for YCbCr or we risk setting
 +       * some conflicting bits in PIPECONF which will mess up
 +       * the colors on the monitor.
 +       */
 +      if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
 +              return false;
 +
        if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) {
                /*
                 * See:
@@@ -2359,7 -2226,7 +2359,7 @@@ intel_dp_compute_config(struct intel_en
                               adjusted_mode->crtc_clock,
                               pipe_config->port_clock,
                               &pipe_config->dp_m_n,
 -                             constant_n);
 +                             constant_n, pipe_config->fec_enable);
  
        if (intel_connector->panel.downclock_mode != NULL &&
                dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
                                               intel_connector->panel.downclock_mode->clock,
                                               pipe_config->port_clock,
                                               &pipe_config->dp_m2_n2,
 -                                             constant_n);
 +                                             constant_n, pipe_config->fec_enable);
        }
  
        if (!HAS_DDI(dev_priv))
  
        intel_psr_compute_config(intel_dp, pipe_config);
  
 +      intel_hdcp_transcoder_config(intel_connector,
 +                                   pipe_config->cpu_transcoder);
 +
        return 0;
  }
  
@@@ -2407,9 -2271,6 +2407,9 @@@ static void intel_dp_prepare(struct int
                                 intel_crtc_has_type(pipe_config,
                                                     INTEL_OUTPUT_DP_MST));
  
 +      intel_dp->regs.dp_tp_ctl = DP_TP_CTL(port);
 +      intel_dp->regs.dp_tp_status = DP_TP_STATUS(port);
 +
        /*
         * There are four kinds of DP registers:
         *
@@@ -2611,9 -2472,8 +2611,9 @@@ static bool edp_panel_vdd_on(struct int
        intel_display_power_get(dev_priv,
                                intel_aux_power_domain(intel_dig_port));
  
 -      DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
 -                    port_name(intel_dig_port->base.port));
 +      DRM_DEBUG_KMS("Turning [ENCODER:%d:%s] VDD on\n",
 +                    intel_dig_port->base.base.base.id,
 +                    intel_dig_port->base.base.name);
  
        if (!edp_have_panel_power(intel_dp))
                wait_panel_power_cycle(intel_dp);
         * If the panel wasn't on, delay before accessing aux channel
         */
        if (!edp_have_panel_power(intel_dp)) {
 -              DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
 -                            port_name(intel_dig_port->base.port));
 +              DRM_DEBUG_KMS("[ENCODER:%d:%s] panel power wasn't enabled\n",
 +                            intel_dig_port->base.base.base.id,
 +                            intel_dig_port->base.base.name);
                msleep(intel_dp->panel_power_up_delay);
        }
  
@@@ -2659,9 -2518,8 +2659,9 @@@ void intel_edp_panel_vdd_on(struct inte
        vdd = false;
        with_pps_lock(intel_dp, wakeref)
                vdd = edp_panel_vdd_on(intel_dp);
 -      I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
 -           port_name(dp_to_dig_port(intel_dp)->base.port));
 +      I915_STATE_WARN(!vdd, "[ENCODER:%d:%s] VDD already requested on\n",
 +                      dp_to_dig_port(intel_dp)->base.base.base.id,
 +                      dp_to_dig_port(intel_dp)->base.base.name);
  }
  
  static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
        if (!edp_have_panel_vdd(intel_dp))
                return;
  
 -      DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
 -                    port_name(intel_dig_port->base.port));
 +      DRM_DEBUG_KMS("Turning [ENCODER:%d:%s] VDD off\n",
 +                    intel_dig_port->base.base.base.id,
 +                    intel_dig_port->base.base.name);
  
        pp = ironlake_get_pp_control(intel_dp);
        pp &= ~EDP_FORCE_VDD;
@@@ -2743,9 -2600,8 +2743,9 @@@ static void edp_panel_vdd_off(struct in
        if (!intel_dp_is_edp(intel_dp))
                return;
  
 -      I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
 -           port_name(dp_to_dig_port(intel_dp)->base.port));
 +      I915_STATE_WARN(!intel_dp->want_panel_vdd, "[ENCODER:%d:%s] VDD not forced on",
 +                      dp_to_dig_port(intel_dp)->base.base.base.id,
 +                      dp_to_dig_port(intel_dp)->base.base.name);
  
        intel_dp->want_panel_vdd = false;
  
@@@ -2766,14 -2622,12 +2766,14 @@@ static void edp_panel_on(struct intel_d
        if (!intel_dp_is_edp(intel_dp))
                return;
  
 -      DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
 -                    port_name(dp_to_dig_port(intel_dp)->base.port));
 +      DRM_DEBUG_KMS("Turn [ENCODER:%d:%s] panel power on\n",
 +                    dp_to_dig_port(intel_dp)->base.base.base.id,
 +                    dp_to_dig_port(intel_dp)->base.base.name);
  
        if (WARN(edp_have_panel_power(intel_dp),
 -               "eDP port %c panel power already on\n",
 -               port_name(dp_to_dig_port(intel_dp)->base.port)))
 +               "[ENCODER:%d:%s] panel power already on\n",
 +               dp_to_dig_port(intel_dp)->base.base.base.id,
 +               dp_to_dig_port(intel_dp)->base.base.name))
                return;
  
        wait_panel_power_cycle(intel_dp);
@@@ -2828,11 -2682,11 +2828,11 @@@ static void edp_panel_off(struct intel_
        if (!intel_dp_is_edp(intel_dp))
                return;
  
 -      DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
 -                    port_name(dig_port->base.port));
 +      DRM_DEBUG_KMS("Turn [ENCODER:%d:%s] panel power off\n",
 +                    dig_port->base.base.base.id, dig_port->base.base.name);
  
 -      WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
 -           port_name(dig_port->base.port));
 +      WARN(!intel_dp->want_panel_vdd, "Need [ENCODER:%d:%s] VDD to turn off panel\n",
 +           dig_port->base.base.base.id, dig_port->base.base.name);
  
        pp = ironlake_get_pp_control(intel_dp);
        /* We need to switch off panel power _and_ force vdd, for otherwise some
@@@ -2977,8 -2831,8 +2977,8 @@@ static void assert_dp_port(struct intel
        bool cur_state = I915_READ(intel_dp->output_reg) & DP_PORT_EN;
  
        I915_STATE_WARN(cur_state != state,
 -                      "DP port %c state assertion failure (expected %s, current %s)\n",
 -                      port_name(dig_port->base.port),
 +                      "[ENCODER:%d:%s] state assertion failure (expected %s, current %s)\n",
 +                      dig_port->base.base.base.id, dig_port->base.base.name,
                        onoff(state), onoff(cur_state));
  }
  #define assert_dp_port_disabled(d) assert_dp_port((d), false)
@@@ -3366,7 -3220,7 +3366,7 @@@ _intel_dp_set_link_train(struct intel_d
                              dp_train_pat & train_pat_mask);
  
        if (HAS_DDI(dev_priv)) {
 -              u32 temp = I915_READ(DP_TP_CTL(port));
 +              u32 temp = I915_READ(intel_dp->regs.dp_tp_ctl);
  
                if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
                        temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
                        temp |= DP_TP_CTL_LINK_TRAIN_PAT4;
                        break;
                }
 -              I915_WRITE(DP_TP_CTL(port), temp);
 +              I915_WRITE(intel_dp->regs.dp_tp_ctl, temp);
  
        } else if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
                   (HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
@@@ -3556,9 -3410,8 +3556,9 @@@ static void vlv_detach_power_sequencer(
         * port select always when logically disconnecting a power sequencer
         * from a port.
         */
 -      DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
 -                    pipe_name(pipe), port_name(intel_dig_port->base.port));
 +      DRM_DEBUG_KMS("detaching pipe %c power sequencer from [ENCODER:%d:%s]\n",
 +                    pipe_name(pipe), intel_dig_port->base.base.base.id,
 +                    intel_dig_port->base.base.name);
        I915_WRITE(pp_on_reg, 0);
        POSTING_READ(pp_on_reg);
  
@@@ -3574,18 -3427,17 +3574,18 @@@ static void vlv_steal_power_sequencer(s
  
        for_each_intel_dp(&dev_priv->drm, encoder) {
                struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
 -              enum port port = encoder->port;
  
                WARN(intel_dp->active_pipe == pipe,
 -                   "stealing pipe %c power sequencer from active (e)DP port %c\n",
 -                   pipe_name(pipe), port_name(port));
 +                   "stealing pipe %c power sequencer from active [ENCODER:%d:%s]\n",
 +                   pipe_name(pipe), encoder->base.base.id,
 +                   encoder->base.name);
  
                if (intel_dp->pps_pipe != pipe)
                        continue;
  
 -              DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
 -                            pipe_name(pipe), port_name(port));
 +              DRM_DEBUG_KMS("stealing pipe %c power sequencer from [ENCODER:%d:%s]\n",
 +                            pipe_name(pipe), encoder->base.base.id,
 +                            encoder->base.name);
  
                /* make sure vdd is off before we steal it */
                vlv_detach_power_sequencer(intel_dp);
@@@ -3627,9 -3479,8 +3627,9 @@@ static void vlv_init_panel_power_sequen
        /* now it's all ours */
        intel_dp->pps_pipe = crtc->pipe;
  
 -      DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
 -                    pipe_name(intel_dp->pps_pipe), port_name(encoder->port));
 +      DRM_DEBUG_KMS("initializing pipe %c power sequencer for [ENCODER:%d:%s]\n",
 +                    pipe_name(intel_dp->pps_pipe), encoder->base.base.id,
 +                    encoder->base.name);
  
        /* init power sequencer on this pipe and port */
        intel_dp_init_panel_power_sequencer(intel_dp);
@@@ -4093,22 -3944,22 +4093,22 @@@ void intel_dp_set_idle_link_train(struc
        if (!HAS_DDI(dev_priv))
                return;
  
 -      val = I915_READ(DP_TP_CTL(port));
 +      val = I915_READ(intel_dp->regs.dp_tp_ctl);
        val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
        val |= DP_TP_CTL_LINK_TRAIN_IDLE;
 -      I915_WRITE(DP_TP_CTL(port), val);
 +      I915_WRITE(intel_dp->regs.dp_tp_ctl, val);
  
        /*
 -       * On PORT_A we can have only eDP in SST mode. There the only reason
 -       * we need to set idle transmission mode is to work around a HW issue
 -       * where we enable the pipe while not in idle link-training mode.
 +       * Until TGL on PORT_A we can have only eDP in SST mode. There the only
 +       * reason we need to set idle transmission mode is to work around a HW
 +       * issue where we enable the pipe while not in idle link-training mode.
         * In this case there is requirement to wait for a minimum number of
         * idle patterns to be sent.
         */
 -      if (port == PORT_A)
 +      if (port == PORT_A && INTEL_GEN(dev_priv) < 12)
                return;
  
 -      if (intel_de_wait_for_set(dev_priv, DP_TP_STATUS(port),
 +      if (intel_de_wait_for_set(dev_priv, intel_dp->regs.dp_tp_status,
                                  DP_TP_STATUS_IDLE_DONE, 1))
                DRM_ERROR("Timed out waiting for DP idle patterns\n");
  }
@@@ -4450,10 -4301,9 +4450,10 @@@ intel_dp_configure_mst(struct intel_dp 
                &dp_to_dig_port(intel_dp)->base;
        bool sink_can_mst = intel_dp_sink_can_mst(intel_dp);
  
 -      DRM_DEBUG_KMS("MST support? port %c: %s, sink: %s, modparam: %s\n",
 -                    port_name(encoder->port), yesno(intel_dp->can_mst),
 -                    yesno(sink_can_mst), yesno(i915_modparams.enable_dp_mst));
 +      DRM_DEBUG_KMS("[ENCODER:%d:%s] MST support? port: %s, sink: %s, modparam: %s\n",
 +                    encoder->base.base.id, encoder->base.name,
 +                    yesno(intel_dp->can_mst), yesno(sink_can_mst),
 +                    yesno(i915_modparams.enable_dp_mst));
  
        if (!intel_dp->can_mst)
                return;
@@@ -4473,6 -4323,91 +4473,6 @@@ intel_dp_get_sink_irq_esi(struct intel_
                DP_DPRX_ESI_LEN;
  }
  
 -u16 intel_dp_dsc_get_output_bpp(int link_clock, u8 lane_count,
 -                              int mode_clock, int mode_hdisplay)
 -{
 -      u16 bits_per_pixel, max_bpp_small_joiner_ram;
 -      int i;
 -
 -      /*
 -       * Available Link Bandwidth(Kbits/sec) = (NumberOfLanes)*
 -       * (LinkSymbolClock)* 8 * ((100-FECOverhead)/100)*(TimeSlotsPerMTP)
 -       * FECOverhead = 2.4%, for SST -> TimeSlotsPerMTP is 1,
 -       * for MST -> TimeSlotsPerMTP has to be calculated
 -       */
 -      bits_per_pixel = (link_clock * lane_count * 8 *
 -                        DP_DSC_FEC_OVERHEAD_FACTOR) /
 -              mode_clock;
 -
 -      /* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */
 -      max_bpp_small_joiner_ram = DP_DSC_MAX_SMALL_JOINER_RAM_BUFFER /
 -              mode_hdisplay;
 -
 -      /*
 -       * Greatest allowed DSC BPP = MIN (output BPP from avaialble Link BW
 -       * check, output bpp from small joiner RAM check)
 -       */
 -      bits_per_pixel = min(bits_per_pixel, max_bpp_small_joiner_ram);
 -
 -      /* Error out if the max bpp is less than smallest allowed valid bpp */
 -      if (bits_per_pixel < valid_dsc_bpp[0]) {
 -              DRM_DEBUG_KMS("Unsupported BPP %d\n", bits_per_pixel);
 -              return 0;
 -      }
 -
 -      /* Find the nearest match in the array of known BPPs from VESA */
 -      for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp) - 1; i++) {
 -              if (bits_per_pixel < valid_dsc_bpp[i + 1])
 -                      break;
 -      }
 -      bits_per_pixel = valid_dsc_bpp[i];
 -
 -      /*
 -       * Compressed BPP in U6.4 format so multiply by 16, for Gen 11,
 -       * fractional part is 0
 -       */
 -      return bits_per_pixel << 4;
 -}
 -
 -u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp,
 -                              int mode_clock,
 -                              int mode_hdisplay)
 -{
 -      u8 min_slice_count, i;
 -      int max_slice_width;
 -
 -      if (mode_clock <= DP_DSC_PEAK_PIXEL_RATE)
 -              min_slice_count = DIV_ROUND_UP(mode_clock,
 -                                             DP_DSC_MAX_ENC_THROUGHPUT_0);
 -      else
 -              min_slice_count = DIV_ROUND_UP(mode_clock,
 -                                             DP_DSC_MAX_ENC_THROUGHPUT_1);
 -
 -      max_slice_width = drm_dp_dsc_sink_max_slice_width(intel_dp->dsc_dpcd);
 -      if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) {
 -              DRM_DEBUG_KMS("Unsupported slice width %d by DP DSC Sink device\n",
 -                            max_slice_width);
 -              return 0;
 -      }
 -      /* Also take into account max slice width */
 -      min_slice_count = min_t(u8, min_slice_count,
 -                              DIV_ROUND_UP(mode_hdisplay,
 -                                           max_slice_width));
 -
 -      /* Find the closest match to the valid slice count values */
 -      for (i = 0; i < ARRAY_SIZE(valid_dsc_slicecount); i++) {
 -              if (valid_dsc_slicecount[i] >
 -                  drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
 -                                                  false))
 -                      break;
 -              if (min_slice_count  <= valid_dsc_slicecount[i])
 -                      return valid_dsc_slicecount[i];
 -      }
 -
 -      DRM_DEBUG_KMS("Unsupported Slice Count %d\n", min_slice_count);
 -      return 0;
 -}
 -
  static void
  intel_pixel_encoding_setup_vsc(struct intel_dp *intel_dp,
                               const struct intel_crtc_state *crtc_state)
@@@ -5555,7 -5490,6 +5555,6 @@@ static in
  intel_dp_connector_register(struct drm_connector *connector)
  {
        struct intel_dp *intel_dp = intel_attached_dp(connector);
-       struct drm_device *dev = connector->dev;
        int ret;
  
        ret = intel_connector_register(connector);
        intel_dp->aux.dev = connector->kdev;
        ret = drm_dp_aux_register(&intel_dp->aux);
        if (!ret)
-               drm_dp_cec_register_connector(&intel_dp->aux,
-                                             connector->name, dev->dev);
+               drm_dp_cec_register_connector(&intel_dp->aux, connector);
        return ret;
  }
  
@@@ -6329,15 -6262,13 +6327,15 @@@ intel_dp_hpd_pulse(struct intel_digital
                 * would end up in an endless cycle of
                 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
                 */
 -              DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
 -                            port_name(intel_dig_port->base.port));
 +              DRM_DEBUG_KMS("ignoring long hpd on eDP [ENCODER:%d:%s]\n",
 +                            intel_dig_port->base.base.base.id,
 +                            intel_dig_port->base.base.name);
                return IRQ_HANDLED;
        }
  
 -      DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
 -                    port_name(intel_dig_port->base.port),
 +      DRM_DEBUG_KMS("got hpd irq on [ENCODER:%d:%s] - %s\n",
 +                    intel_dig_port->base.base.base.id,
 +                    intel_dig_port->base.base.name,
                      long_hpd ? "long" : "short");
  
        if (long_hpd) {
@@@ -7201,9 -7132,8 +7199,9 @@@ intel_dp_init_connector(struct intel_di
                  intel_dp_modeset_retry_work_fn);
  
        if (WARN(intel_dig_port->max_lanes < 1,
 -               "Not enough lanes (%d) for DP on port %c\n",
 -               intel_dig_port->max_lanes, port_name(port)))
 +               "Not enough lanes (%d) for DP on [ENCODER:%d:%s]\n",
 +               intel_dig_port->max_lanes, intel_encoder->base.base.id,
 +               intel_encoder->base.name))
                return false;
  
        intel_dp_set_source_rates(intel_dp);
                    port != PORT_B && port != PORT_C))
                return false;
  
 -      DRM_DEBUG_KMS("Adding %s connector on port %c\n",
 -                      type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
 -                      port_name(port));
 +      DRM_DEBUG_KMS("Adding %s connector on [ENCODER:%d:%s]\n",
 +                    type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
 +                    intel_encoder->base.base.id, intel_encoder->base.name);
  
        drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
        drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
@@@ -7365,11 -7295,11 +7363,11 @@@ bool intel_dp_init(struct drm_i915_priv
        intel_encoder->power_domain = intel_port_to_power_domain(port);
        if (IS_CHERRYVIEW(dev_priv)) {
                if (port == PORT_D)
 -                      intel_encoder->crtc_mask = 1 << 2;
 +                      intel_encoder->crtc_mask = BIT(PIPE_C);
                else
 -                      intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
 +                      intel_encoder->crtc_mask = BIT(PIPE_A) | BIT(PIPE_B);
        } else {
 -              intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
 +              intel_encoder->crtc_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C);
        }
        intel_encoder->cloneable = 0;
        intel_encoder->port = port;
index 03b140980fc45b6378466e2779f02592fa492f57,0f5a0c618e461595a6d008c7e207b04ad18b363d..0a6846c5ba954e760c7a7e7d32ab3bd7b75319aa
@@@ -724,20 -724,11 +724,20 @@@ intel_hdmi_compute_avi_infoframe(struc
  
        drm_hdmi_avi_infoframe_colorspace(frame, conn_state);
  
 -      drm_hdmi_avi_infoframe_quant_range(frame, connector,
 -                                         adjusted_mode,
 -                                         crtc_state->limited_color_range ?
 -                                         HDMI_QUANTIZATION_RANGE_LIMITED :
 -                                         HDMI_QUANTIZATION_RANGE_FULL);
 +      /* nonsense combination */
 +      WARN_ON(crtc_state->limited_color_range &&
 +              crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
 +
 +      if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_RGB) {
 +              drm_hdmi_avi_infoframe_quant_range(frame, connector,
 +                                                 adjusted_mode,
 +                                                 crtc_state->limited_color_range ?
 +                                                 HDMI_QUANTIZATION_RANGE_LIMITED :
 +                                                 HDMI_QUANTIZATION_RANGE_FULL);
 +      } else {
 +              frame->quantization_range = HDMI_QUANTIZATION_RANGE_DEFAULT;
 +              frame->ycc_quantization_range = HDMI_YCC_QUANTIZATION_RANGE_LIMITED;
 +      }
  
        drm_hdmi_avi_infoframe_content_type(frame, conn_state);
  
@@@ -1500,10 -1491,7 +1500,10 @@@ bool intel_hdmi_hdcp_check_link(struct 
  {
        struct drm_i915_private *dev_priv =
                intel_dig_port->base.base.dev->dev_private;
 +      struct intel_connector *connector =
 +              intel_dig_port->hdmi.attached_connector;
        enum port port = intel_dig_port->base.port;
 +      enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
        int ret;
        union {
                u32 reg;
        if (ret)
                return false;
  
 -      I915_WRITE(PORT_HDCP_RPRIME(port), ri.reg);
 +      I915_WRITE(HDCP_RPRIME(dev_priv, cpu_transcoder, port), ri.reg);
  
        /* Wait for Ri prime match */
 -      if (wait_for(I915_READ(PORT_HDCP_STATUS(port)) &
 +      if (wait_for(I915_READ(HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
                     (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1)) {
                DRM_ERROR("Ri' mismatch detected, link check failed (%x)\n",
 -                        I915_READ(PORT_HDCP_STATUS(port)));
 +                        I915_READ(HDCP_STATUS(dev_priv, cpu_transcoder,
 +                                              port)));
                return false;
        }
        return true;
@@@ -2197,10 -2184,8 +2197,10 @@@ intel_hdmi_mode_valid(struct drm_connec
                        status = hdmi_port_clock_valid(hdmi, clock * 5 / 4,
                                                       true, force_dvi);
        }
 +      if (status != MODE_OK)
 +              return status;
  
 -      return status;
 +      return intel_mode_valid_max_plane_size(dev_priv, mode);
  }
  
  static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state,
  
  static bool
  intel_hdmi_ycbcr420_config(struct drm_connector *connector,
 -                         struct intel_crtc_state *config,
 -                         int *clock_12bpc, int *clock_10bpc,
 -                         int *clock_8bpc)
 +                         struct intel_crtc_state *config)
  {
        struct intel_crtc *intel_crtc = to_intel_crtc(config->base.crtc);
  
                return false;
        }
  
 -      /* YCBCR420 TMDS rate requirement is half the pixel clock */
 -      config->port_clock /= 2;
 -      *clock_12bpc /= 2;
 -      *clock_10bpc /= 2;
 -      *clock_8bpc /= 2;
        config->output_format = INTEL_OUTPUT_FORMAT_YCBCR420;
  
        /* YCBCR 420 output conversion needs a scaler */
        return true;
  }
  
 +static int intel_hdmi_port_clock(int clock, int bpc)
 +{
 +      /*
 +       * Need to adjust the port link by:
 +       *  1.5x for 12bpc
 +       *  1.25x for 10bpc
 +       */
 +      return clock * bpc / 8;
 +}
 +
 +static int intel_hdmi_compute_bpc(struct intel_encoder *encoder,
 +                                struct intel_crtc_state *crtc_state,
 +                                int clock, bool force_dvi)
 +{
 +      struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
 +      int bpc;
 +
 +      for (bpc = 12; bpc >= 10; bpc -= 2) {
 +              if (hdmi_deep_color_possible(crtc_state, bpc) &&
 +                  hdmi_port_clock_valid(intel_hdmi,
 +                                        intel_hdmi_port_clock(clock, bpc),
 +                                        true, force_dvi) == MODE_OK)
 +                      return bpc;
 +      }
 +
 +      return 8;
 +}
 +
 +static int intel_hdmi_compute_clock(struct intel_encoder *encoder,
 +                                  struct intel_crtc_state *crtc_state,
 +                                  bool force_dvi)
 +{
 +      struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
 +      const struct drm_display_mode *adjusted_mode =
 +              &crtc_state->base.adjusted_mode;
 +      int bpc, clock = adjusted_mode->crtc_clock;
 +
 +      if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
 +              clock *= 2;
 +
 +      /* YCBCR420 TMDS rate requirement is half the pixel clock */
 +      if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
 +              clock /= 2;
 +
 +      bpc = intel_hdmi_compute_bpc(encoder, crtc_state,
 +                                   clock, force_dvi);
 +
 +      crtc_state->port_clock = intel_hdmi_port_clock(clock, bpc);
 +
 +      /*
 +       * pipe_bpp could already be below 8bpc due to
 +       * FDI bandwidth constraints. We shouldn't bump it
 +       * back up to 8bpc in that case.
 +       */
 +      if (crtc_state->pipe_bpp > bpc * 3)
 +              crtc_state->pipe_bpp = bpc * 3;
 +
 +      DRM_DEBUG_KMS("picking %d bpc for HDMI output (pipe bpp: %d)\n",
 +                    bpc, crtc_state->pipe_bpp);
 +
 +      if (hdmi_port_clock_valid(intel_hdmi, crtc_state->port_clock,
 +                                false, force_dvi) != MODE_OK) {
 +              DRM_DEBUG_KMS("unsupported HDMI clock (%d kHz), rejecting mode\n",
 +                            crtc_state->port_clock);
 +              return -EINVAL;
 +      }
 +
 +      return 0;
 +}
 +
 +static bool intel_hdmi_limited_color_range(const struct intel_crtc_state *crtc_state,
 +                                         const struct drm_connector_state *conn_state)
 +{
 +      const struct intel_digital_connector_state *intel_conn_state =
 +              to_intel_digital_connector_state(conn_state);
 +      const struct drm_display_mode *adjusted_mode =
 +              &crtc_state->base.adjusted_mode;
 +
 +      /*
 +       * Our YCbCr output is always limited range.
 +       * crtc_state->limited_color_range only applies to RGB,
 +       * and it must never be set for YCbCr or we risk setting
 +       * some conflicting bits in PIPECONF which will mess up
 +       * the colors on the monitor.
 +       */
 +      if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
 +              return false;
 +
 +      if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) {
 +              /* See CEA-861-E - 5.1 Default Encoding Parameters */
 +              return crtc_state->has_hdmi_sink &&
 +                      drm_default_rgb_quant_range(adjusted_mode) ==
 +                      HDMI_QUANTIZATION_RANGE_LIMITED;
 +      } else {
 +              return intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_LIMITED;
 +      }
 +}
 +
  int intel_hdmi_compute_config(struct intel_encoder *encoder,
                              struct intel_crtc_state *pipe_config,
                              struct drm_connector_state *conn_state)
        struct drm_scdc *scdc = &connector->display_info.hdmi.scdc;
        struct intel_digital_connector_state *intel_conn_state =
                to_intel_digital_connector_state(conn_state);
 -      int clock_8bpc = pipe_config->base.adjusted_mode.crtc_clock;
 -      int clock_10bpc = clock_8bpc * 5 / 4;
 -      int clock_12bpc = clock_8bpc * 3 / 2;
 -      int desired_bpp;
        bool force_dvi = intel_conn_state->force_audio == HDMI_AUDIO_OFF_DVI;
 +      int ret;
  
        if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
                return -EINVAL;
        if (pipe_config->has_hdmi_sink)
                pipe_config->has_infoframe = true;
  
 -      if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) {
 -              /* See CEA-861-E - 5.1 Default Encoding Parameters */
 -              pipe_config->limited_color_range =
 -                      pipe_config->has_hdmi_sink &&
 -                      drm_default_rgb_quant_range(adjusted_mode) ==
 -                      HDMI_QUANTIZATION_RANGE_LIMITED;
 -      } else {
 -              pipe_config->limited_color_range =
 -                      intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_LIMITED;
 -      }
 -
 -      if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) {
 +      if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
                pipe_config->pixel_multiplier = 2;
 -              clock_8bpc *= 2;
 -              clock_10bpc *= 2;
 -              clock_12bpc *= 2;
 -      }
  
        if (drm_mode_is_420_only(&connector->display_info, adjusted_mode)) {
 -              if (!intel_hdmi_ycbcr420_config(connector, pipe_config,
 -                                              &clock_12bpc, &clock_10bpc,
 -                                              &clock_8bpc)) {
 +              if (!intel_hdmi_ycbcr420_config(connector, pipe_config)) {
                        DRM_ERROR("Can't support YCBCR420 output\n");
                        return -EINVAL;
                }
        }
  
 +      pipe_config->limited_color_range =
 +              intel_hdmi_limited_color_range(pipe_config, conn_state);
 +
        if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv))
                pipe_config->has_pch_encoder = true;
  
                                intel_conn_state->force_audio == HDMI_AUDIO_ON;
        }
  
 -      /*
 -       * Note that g4x/vlv don't support 12bpc hdmi outputs. We also need
 -       * to check that the higher clock still fits within limits.
 -       */
 -      if (hdmi_deep_color_possible(pipe_config, 12) &&
 -          hdmi_port_clock_valid(intel_hdmi, clock_12bpc,
 -                                true, force_dvi) == MODE_OK) {
 -              DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n");
 -              desired_bpp = 12*3;
 -
 -              /* Need to adjust the port link by 1.5x for 12bpc. */
 -              pipe_config->port_clock = clock_12bpc;
 -      } else if (hdmi_deep_color_possible(pipe_config, 10) &&
 -                 hdmi_port_clock_valid(intel_hdmi, clock_10bpc,
 -                                       true, force_dvi) == MODE_OK) {
 -              DRM_DEBUG_KMS("picking bpc to 10 for HDMI output\n");
 -              desired_bpp = 10 * 3;
 -
 -              /* Need to adjust the port link by 1.25x for 10bpc. */
 -              pipe_config->port_clock = clock_10bpc;
 -      } else {
 -              DRM_DEBUG_KMS("picking bpc to 8 for HDMI output\n");
 -              desired_bpp = 8*3;
 -
 -              pipe_config->port_clock = clock_8bpc;
 -      }
 -
 -      if (!pipe_config->bw_constrained) {
 -              DRM_DEBUG_KMS("forcing pipe bpp to %i for HDMI\n", desired_bpp);
 -              pipe_config->pipe_bpp = desired_bpp;
 -      }
 -
 -      if (hdmi_port_clock_valid(intel_hdmi, pipe_config->port_clock,
 -                                false, force_dvi) != MODE_OK) {
 -              DRM_DEBUG_KMS("unsupported HDMI clock, rejecting mode\n");
 -              return -EINVAL;
 -      }
 +      ret = intel_hdmi_compute_clock(encoder, pipe_config, force_dvi);
 +      if (ret)
 +              return ret;
  
        /* Set user selected PAR to incoming mode's member */
        adjusted_mode->picture_aspect_ratio = conn_state->picture_aspect_ratio;
                return -EINVAL;
        }
  
 +      intel_hdcp_transcoder_config(intel_hdmi->attached_connector,
 +                                   pipe_config->cpu_transcoder);
 +
        return 0;
  }
  
@@@ -2809,8 -2751,9 +2809,9 @@@ intel_hdmi_connector_register(struct dr
  
  static void intel_hdmi_destroy(struct drm_connector *connector)
  {
-       if (intel_attached_hdmi(connector)->cec_notifier)
-               cec_notifier_put(intel_attached_hdmi(connector)->cec_notifier);
+       struct cec_notifier *n = intel_attached_hdmi(connector)->cec_notifier;
+       cec_notifier_conn_unregister(n);
  
        intel_connector_destroy(connector);
  }
@@@ -3059,7 -3002,7 +3060,7 @@@ static u8 intel_hdmi_ddc_pin(struct drm
  
        if (HAS_PCH_MCC(dev_priv))
                ddc_pin = mcc_port_to_ddc_pin(dev_priv, port);
 -      else if (HAS_PCH_TGP(dev_priv) || HAS_PCH_ICP(dev_priv))
 +      else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
                ddc_pin = icl_port_to_ddc_pin(dev_priv, port);
        else if (HAS_PCH_CNP(dev_priv))
                ddc_pin = cnp_port_to_ddc_pin(dev_priv, port);
@@@ -3125,14 -3068,14 +3126,15 @@@ void intel_hdmi_init_connector(struct i
        struct drm_device *dev = intel_encoder->base.dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
        enum port port = intel_encoder->port;
+       struct cec_connector_info conn_info;
  
 -      DRM_DEBUG_KMS("Adding HDMI connector on port %c\n",
 -                    port_name(port));
 +      DRM_DEBUG_KMS("Adding HDMI connector on [ENCODER:%d:%s]\n",
 +                    intel_encoder->base.base.id, intel_encoder->base.name);
  
        if (WARN(intel_dig_port->max_lanes < 4,
 -               "Not enough lanes (%d) for HDMI on port %c\n",
 -               intel_dig_port->max_lanes, port_name(port)))
 +               "Not enough lanes (%d) for HDMI on [ENCODER:%d:%s]\n",
 +               intel_dig_port->max_lanes, intel_encoder->base.base.id,
 +               intel_encoder->base.name))
                return;
  
        drm_connector_init(dev, connector, &intel_hdmi_connector_funcs,
                I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
        }
  
-       intel_hdmi->cec_notifier = cec_notifier_get_conn(dev->dev,
-                                                        port_identifier(port));
+       cec_fill_conn_info_from_drm(&conn_info, connector);
+       intel_hdmi->cec_notifier =
+               cec_notifier_conn_register(dev->dev, port_identifier(port),
+                                          &conn_info);
        if (!intel_hdmi->cec_notifier)
                DRM_DEBUG_KMS("CEC notifier get failed\n");
  }
@@@ -3269,11 -3215,11 +3274,11 @@@ void intel_hdmi_init(struct drm_i915_pr
        intel_encoder->port = port;
        if (IS_CHERRYVIEW(dev_priv)) {
                if (port == PORT_D)
 -                      intel_encoder->crtc_mask = 1 << 2;
 +                      intel_encoder->crtc_mask = BIT(PIPE_C);
                else
 -                      intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
 +                      intel_encoder->crtc_mask = BIT(PIPE_A) | BIT(PIPE_B);
        } else {
 -              intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
 +              intel_encoder->crtc_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C);
        }
        intel_encoder->cloneable = 1 << INTEL_OUTPUT_ANALOG;
        /*
index e8ddc2320efab3b891b8eb3504371cb8905ade1a,493f07806b08761afde8203e1a3f7eca9d02067e..98816c35ffc39b581b7cea08f22560cdf03d414f
@@@ -252,7 -252,6 +252,7 @@@ struct i915_execbuffer 
                bool has_fence : 1;
                bool needs_unfenced : 1;
  
 +              struct intel_context *ce;
                struct i915_request *rq;
                u32 *rq_cmd;
                unsigned int rq_size;
@@@ -698,9 -697,7 +698,9 @@@ static int eb_reserve(struct i915_execb
  
                case 1:
                        /* Too fragmented, unbind everything and retry */
 +                      mutex_lock(&eb->context->vm->mutex);
                        err = i915_gem_evict_vm(eb->context->vm);
 +                      mutex_unlock(&eb->context->vm->mutex);
                        if (err)
                                return err;
                        break;
@@@ -728,7 -725,7 +728,7 @@@ static int eb_select_context(struct i91
                return -ENOENT;
  
        eb->gem_context = ctx;
 -      if (ctx->vm)
 +      if (rcu_access_pointer(ctx->vm))
                eb->invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
  
        eb->context_flags = 0;
@@@ -883,9 -880,6 +883,9 @@@ static void eb_destroy(const struct i91
  {
        GEM_BUG_ON(eb->reloc_cache.rq);
  
 +      if (eb->reloc_cache.ce)
 +              intel_context_put(eb->reloc_cache.ce);
 +
        if (eb->lut_size > 0)
                kfree(eb->buckets);
  }
@@@ -908,8 -902,7 +908,8 @@@ static void reloc_cache_init(struct rel
        cache->use_64bit_reloc = HAS_64BIT_RELOC(i915);
        cache->has_fence = cache->gen < 4;
        cache->needs_unfenced = INTEL_INFO(i915)->unfenced_needs_alignment;
-       cache->node.allocated = false;
+       cache->node.flags = 0;
 +      cache->ce = NULL;
        cache->rq = NULL;
        cache->rq_size = 0;
  }
@@@ -974,9 -967,7 +974,9 @@@ static void reloc_cache_reset(struct re
                        ggtt->vm.clear_range(&ggtt->vm,
                                             cache->node.start,
                                             cache->node.size);
 +                      mutex_lock(&ggtt->vm.mutex);
                        drm_mm_remove_node(&cache->node);
 +                      mutex_unlock(&ggtt->vm.mutex);
                } else {
                        i915_vma_unpin((struct i915_vma *)cache->node.mm);
                }
@@@ -1051,13 -1042,11 +1051,13 @@@ static void *reloc_iomap(struct drm_i91
                                               PIN_NOEVICT);
                if (IS_ERR(vma)) {
                        memset(&cache->node, 0, sizeof(cache->node));
 +                      mutex_lock(&ggtt->vm.mutex);
                        err = drm_mm_insert_node_in_range
                                (&ggtt->vm.mm, &cache->node,
                                 PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
                                 0, ggtt->mappable_end,
                                 DRM_MM_INSERT_LOW);
 +                      mutex_unlock(&ggtt->vm.mutex);
                        if (err) /* no inactive aperture space, use cpu reloc */
                                return NULL;
                } else {
@@@ -1156,7 -1145,7 +1156,7 @@@ static int __reloc_gpu_alloc(struct i91
        u32 *cmd;
        int err;
  
 -      pool = intel_engine_pool_get(&eb->engine->pool, PAGE_SIZE);
 +      pool = intel_engine_get_pool(eb->engine, PAGE_SIZE);
        if (IS_ERR(pool))
                return PTR_ERR(pool);
  
        if (err)
                goto err_unmap;
  
 -      rq = i915_request_create(eb->context);
 +      rq = intel_context_create_request(cache->ce);
        if (IS_ERR(rq)) {
                err = PTR_ERR(rq);
                goto err_unpin;
@@@ -1250,29 -1239,6 +1250,29 @@@ static u32 *reloc_gpu(struct i915_execb
                if (!intel_engine_can_store_dword(eb->engine))
                        return ERR_PTR(-ENODEV);
  
 +              if (!cache->ce) {
 +                      struct intel_context *ce;
 +
 +                      /*
 +                       * The CS pre-parser can pre-fetch commands across
 +                       * memory sync points and starting gen12 it is able to
 +                       * pre-fetch across BB_START and BB_END boundaries
 +                       * (within the same context). We therefore use a
 +                       * separate context gen12+ to guarantee that the reloc
 +                       * writes land before the parser gets to the target
 +                       * memory location.
 +                       */
 +                      if (cache->gen >= 12)
 +                              ce = intel_context_create(eb->context->gem_context,
 +                                                        eb->engine);
 +                      else
 +                              ce = intel_context_get(eb->context);
 +                      if (IS_ERR(ce))
 +                              return ERR_CAST(ce);
 +
 +                      cache->ce = ce;
 +              }
 +
                err = __reloc_gpu_alloc(eb, vma, len);
                if (unlikely(err))
                        return ERR_PTR(err);
@@@ -1422,7 -1388,7 +1422,7 @@@ eb_relocate_entry(struct i915_execbuffe
                if (reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
                    IS_GEN(eb->i915, 6)) {
                        err = i915_vma_bind(target, target->obj->cache_level,
 -                                          PIN_GLOBAL);
 +                                          PIN_GLOBAL, NULL);
                        if (WARN_ONCE(err,
                                      "Unexpected failure to bind target VMA!"))
                                return err;
@@@ -1995,7 -1961,7 +1995,7 @@@ static struct i915_vma *eb_parse(struc
        struct i915_vma *vma;
        int err;
  
 -      pool = intel_engine_pool_get(&eb->engine->pool, eb->batch_len);
 +      pool = intel_engine_get_pool(eb->engine, eb->batch_len);
        if (IS_ERR(pool))
                return ERR_CAST(pool);
  
@@@ -2146,6 -2112,35 +2146,6 @@@ static struct i915_request *eb_throttle
        return i915_request_get(rq);
  }
  
 -static int
 -__eb_pin_context(struct i915_execbuffer *eb, struct intel_context *ce)
 -{
 -      int err;
 -
 -      if (likely(atomic_inc_not_zero(&ce->pin_count)))
 -              return 0;
 -
 -      err = mutex_lock_interruptible(&eb->i915->drm.struct_mutex);
 -      if (err)
 -              return err;
 -
 -      err = __intel_context_do_pin(ce);
 -      mutex_unlock(&eb->i915->drm.struct_mutex);
 -
 -      return err;
 -}
 -
 -static void
 -__eb_unpin_context(struct i915_execbuffer *eb, struct intel_context *ce)
 -{
 -      if (likely(atomic_add_unless(&ce->pin_count, -1, 1)))
 -              return;
 -
 -      mutex_lock(&eb->i915->drm.struct_mutex);
 -      intel_context_unpin(ce);
 -      mutex_unlock(&eb->i915->drm.struct_mutex);
 -}
 -
  static int __eb_pin_engine(struct i915_execbuffer *eb, struct intel_context *ce)
  {
        struct intel_timeline *tl;
         * GGTT space, so do this first before we reserve a seqno for
         * ourselves.
         */
 -      err = __eb_pin_context(eb, ce);
 +      err = intel_context_pin(ce);
        if (err)
                return err;
  
@@@ -2209,7 -2204,7 +2209,7 @@@ err_exit
        intel_context_exit(ce);
        intel_context_timeline_unlock(tl);
  err_unpin:
 -      __eb_unpin_context(eb, ce);
 +      intel_context_unpin(ce);
        return err;
  }
  
@@@ -2222,7 -2217,7 +2222,7 @@@ static void eb_unpin_engine(struct i915
        intel_context_exit(ce);
        mutex_unlock(&tl->mutex);
  
 -      __eb_unpin_context(eb, ce);
 +      intel_context_unpin(ce);
  }
  
  static unsigned int
index 15abad5c2d62aea8057b885dc8419b007d61432e,1c4ff8b5b0a2d359c94f59d26ac52b7845052e4b..9354924576c4cb794ca6a81666fb831c73c791f6
@@@ -36,6 -36,7 +36,6 @@@
  #include <linux/pm_runtime.h>
  #include <linux/pnp.h>
  #include <linux/slab.h>
 -#include <linux/vgaarb.h>
  #include <linux/vga_switcheroo.h>
  #include <linux/vt.h>
  #include <acpi/video.h>
  #include "display/intel_display_types.h"
  #include "display/intel_dp.h"
  #include "display/intel_fbdev.h"
 -#include "display/intel_gmbus.h"
  #include "display/intel_hotplug.h"
  #include "display/intel_overlay.h"
  #include "display/intel_pipe_crc.h"
  #include "display/intel_sprite.h"
 +#include "display/intel_vga.h"
  
  #include "gem/i915_gem_context.h"
  #include "gem/i915_gem_ioctls.h"
@@@ -71,7 -72,6 +71,7 @@@
  #include "i915_perf.h"
  #include "i915_query.h"
  #include "i915_suspend.h"
 +#include "i915_switcheroo.h"
  #include "i915_sysfs.h"
  #include "i915_trace.h"
  #include "i915_vgpu.h"
@@@ -269,125 -269,159 +269,100 @@@ intel_teardown_mchbar(struct drm_i915_p
                release_resource(&dev_priv->mch_res);
  }
  
 -/* true = enable decode, false = disable decoder */
 -static unsigned int i915_vga_set_decode(void *cookie, bool state)
 +static int i915_driver_modeset_probe(struct drm_i915_private *i915)
  {
 -      struct drm_i915_private *dev_priv = cookie;
 -
 -      intel_modeset_vga_set_state(dev_priv, state);
 -      if (state)
 -              return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
 -                     VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
 -      else
 -              return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
 -}
 -
 -static int i915_resume_switcheroo(struct drm_i915_private *i915);
 -static int i915_suspend_switcheroo(struct drm_i915_private *i915,
 -                                 pm_message_t state);
 -
 -static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
 -{
 -      struct drm_i915_private *i915 = pdev_to_i915(pdev);
 -      pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
 -
 -      if (!i915) {
 -              dev_err(&pdev->dev, "DRM not initialized, aborting switch.\n");
 -              return;
 -      }
 -
 -      if (state == VGA_SWITCHEROO_ON) {
 -              pr_info("switched on\n");
 -              i915->drm.switch_power_state = DRM_SWITCH_POWER_CHANGING;
 -              /* i915 resume handler doesn't set to D0 */
 -              pci_set_power_state(pdev, PCI_D0);
 -              i915_resume_switcheroo(i915);
 -              i915->drm.switch_power_state = DRM_SWITCH_POWER_ON;
 -      } else {
 -              pr_info("switched off\n");
 -              i915->drm.switch_power_state = DRM_SWITCH_POWER_CHANGING;
 -              i915_suspend_switcheroo(i915, pmm);
 -              i915->drm.switch_power_state = DRM_SWITCH_POWER_OFF;
 -      }
 -}
 -
 -static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
 -{
 -      struct drm_i915_private *i915 = pdev_to_i915(pdev);
 -
 -      /*
 -       * FIXME: open_count is protected by drm_global_mutex but that would lead to
 -       * locking inversion with the driver load path. And the access here is
 -       * completely racy anyway. So don't bother with locking for now.
 -       */
 -      return i915 && i915->drm.open_count == 0;
 -}
 -
 -static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
 -      .set_gpu_state = i915_switcheroo_set_state,
 -      .reprobe = NULL,
 -      .can_switch = i915_switcheroo_can_switch,
 -};
 -
 -static int i915_driver_modeset_probe(struct drm_device *dev)
 -{
 -      struct drm_i915_private *dev_priv = to_i915(dev);
 -      struct pci_dev *pdev = dev_priv->drm.pdev;
        int ret;
  
 -      if (i915_inject_probe_failure(dev_priv))
 +      if (i915_inject_probe_failure(i915))
                return -ENODEV;
  
 -      if (HAS_DISPLAY(dev_priv)) {
 -              ret = drm_vblank_init(&dev_priv->drm,
 -                                    INTEL_INFO(dev_priv)->num_pipes);
 +      if (HAS_DISPLAY(i915) && INTEL_DISPLAY_ENABLED(i915)) {
 +              ret = drm_vblank_init(&i915->drm,
 +                                    INTEL_NUM_PIPES(i915));
                if (ret)
                        goto out;
        }
  
 -      intel_bios_init(dev_priv);
 +      intel_bios_init(i915);
  
 -      /* If we have > 1 VGA cards, then we need to arbitrate access
 -       * to the common VGA resources.
 -       *
 -       * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA),
 -       * then we do not take part in VGA arbitration and the
 -       * vga_client_register() fails with -ENODEV.
 -       */
 -      ret = vga_client_register(pdev, dev_priv, NULL, i915_vga_set_decode);
 -      if (ret && ret != -ENODEV)
 +      ret = intel_vga_register(i915);
 +      if (ret)
                goto out;
  
        intel_register_dsm_handler();
  
 -      ret = vga_switcheroo_register_client(pdev, &i915_switcheroo_ops, false);
 +      ret = i915_switcheroo_register(i915);
        if (ret)
                goto cleanup_vga_client;
  
        /* must happen before intel_power_domains_init_hw() on VLV/CHV */
 -      intel_update_rawclk(dev_priv);
 +      intel_update_rawclk(i915);
  
 -      intel_power_domains_init_hw(dev_priv, false);
 +      intel_power_domains_init_hw(i915, false);
  
 -      intel_csr_ucode_init(dev_priv);
 +      intel_csr_ucode_init(i915);
  
 -      ret = intel_irq_install(dev_priv);
 +      ret = intel_irq_install(i915);
        if (ret)
                goto cleanup_csr;
  
 -      intel_gmbus_setup(dev_priv);
 -
        /* Important: The output setup functions called by modeset_init need
         * working irqs for e.g. gmbus and dp aux transfers. */
 -      ret = intel_modeset_init(dev);
 +      ret = intel_modeset_init(i915);
        if (ret)
                goto cleanup_irq;
  
 -      ret = i915_gem_init(dev_priv);
 +      ret = i915_gem_init(i915);
        if (ret)
                goto cleanup_modeset;
  
 -      intel_overlay_setup(dev_priv);
 +      intel_overlay_setup(i915);
  
 -      if (!HAS_DISPLAY(dev_priv))
 +      if (!HAS_DISPLAY(i915) || !INTEL_DISPLAY_ENABLED(i915))
                return 0;
  
 -      ret = intel_fbdev_init(dev);
 +      ret = intel_fbdev_init(&i915->drm);
        if (ret)
                goto cleanup_gem;
  
        /* Only enable hotplug handling once the fbdev is fully set up. */
 -      intel_hpd_init(dev_priv);
 +      intel_hpd_init(i915);
  
 -      intel_init_ipc(dev_priv);
 +      intel_init_ipc(i915);
  
        return 0;
  
  cleanup_gem:
 -      i915_gem_suspend(dev_priv);
 -      i915_gem_driver_remove(dev_priv);
 -      i915_gem_driver_release(dev_priv);
 +      i915_gem_suspend(i915);
 +      i915_gem_driver_remove(i915);
 +      i915_gem_driver_release(i915);
  cleanup_modeset:
 -      intel_modeset_driver_remove(dev);
 +      intel_modeset_driver_remove(i915);
  cleanup_irq:
 -      intel_irq_uninstall(dev_priv);
 -      intel_gmbus_teardown(dev_priv);
 +      intel_irq_uninstall(i915);
  cleanup_csr:
 -      intel_csr_ucode_fini(dev_priv);
 -      intel_power_domains_driver_remove(dev_priv);
 -      vga_switcheroo_unregister_client(pdev);
 +      intel_csr_ucode_fini(i915);
 +      intel_power_domains_driver_remove(i915);
 +      i915_switcheroo_unregister(i915);
  cleanup_vga_client:
 -      vga_client_register(pdev, NULL, NULL, NULL);
 +      intel_vga_unregister(i915);
  out:
        return ret;
  }
  
- static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
- {
-       struct apertures_struct *ap;
-       struct pci_dev *pdev = dev_priv->drm.pdev;
-       struct i915_ggtt *ggtt = &dev_priv->ggtt;
-       bool primary;
-       int ret;
-       ap = alloc_apertures(1);
-       if (!ap)
-               return -ENOMEM;
-       ap->ranges[0].base = ggtt->gmadr.start;
-       ap->ranges[0].size = ggtt->mappable_end;
-       primary =
-               pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
-       ret = drm_fb_helper_remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
-       kfree(ap);
-       return ret;
- }
 +static void i915_driver_modeset_remove(struct drm_i915_private *i915)
 +{
 +      intel_modeset_driver_remove(i915);
 +
 +      intel_bios_driver_remove(i915);
 +
 +      i915_switcheroo_unregister(i915);
 +
 +      intel_vga_unregister(i915);
 +
 +      intel_csr_ucode_fini(i915);
 +}
 +
  static void intel_init_dpio(struct drm_i915_private *dev_priv)
  {
        /*
@@@ -542,7 -576,9 +517,7 @@@ static int i915_driver_early_probe(stru
  
        intel_gt_init_early(&dev_priv->gt, dev_priv);
  
 -      ret = i915_gem_init_early(dev_priv);
 -      if (ret < 0)
 -              goto err_gt;
 +      i915_gem_init_early(dev_priv);
  
        /* This must be called before any calls to HAS_PCH_* */
        intel_detect_pch(dev_priv);
  
  err_gem:
        i915_gem_cleanup_early(dev_priv);
 -err_gt:
        intel_gt_driver_late_release(&dev_priv->gt);
        vlv_free_s0ix_state(dev_priv);
  err_workqueues:
@@@ -1187,27 -1224,15 +1162,15 @@@ static int i915_driver_hw_probe(struct 
        if (ret)
                goto err_perf;
  
-       /*
-        * WARNING: Apparently we must kick fbdev drivers before vgacon,
-        * otherwise the vga fbdev driver falls over.
-        */
-       ret = i915_kick_out_firmware_fb(dev_priv);
-       if (ret) {
-               DRM_ERROR("failed to remove conflicting framebuffer drivers\n");
-               goto err_ggtt;
-       }
-       ret = vga_remove_vgacon(pdev);
-       if (ret) {
-               DRM_ERROR("failed to remove conflicting VGA console\n");
+       ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "inteldrmfb");
+       if (ret)
                goto err_ggtt;
-       }
  
        ret = i915_ggtt_init_hw(dev_priv);
        if (ret)
                goto err_ggtt;
  
 -      intel_gt_init_hw(dev_priv);
 +      intel_gt_init_hw_early(dev_priv);
  
        ret = i915_ggtt_enable_hw(dev_priv);
        if (ret) {
        pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY,
                           PM_QOS_DEFAULT_VALUE);
  
 -      /* BIOS often leaves RC6 enabled, but disable it for hw init */
 -      intel_sanitize_gt_powersave(dev_priv);
 -
        intel_gt_init_workarounds(dev_priv);
  
        /* On the 945G/GM, the chipset reports the MSI capability on the
@@@ -1353,13 -1381,14 +1316,13 @@@ static void i915_driver_register(struc
        } else
                DRM_ERROR("Failed to register driver for userspace access!\n");
  
 -      if (HAS_DISPLAY(dev_priv)) {
 +      if (HAS_DISPLAY(dev_priv) && INTEL_DISPLAY_ENABLED(dev_priv)) {
                /* Must be done after probing outputs */
                intel_opregion_register(dev_priv);
                acpi_video_register();
        }
  
 -      if (IS_GEN(dev_priv, 5))
 -              intel_gpu_ips_init(dev_priv);
 +      intel_gt_driver_register(&dev_priv->gt);
  
        intel_audio_init(dev_priv);
  
         * We need to coordinate the hotplugs with the asynchronous fbdev
         * configuration, for which we use the fbdev->async_cookie.
         */
 -      if (HAS_DISPLAY(dev_priv))
 +      if (HAS_DISPLAY(dev_priv) && INTEL_DISPLAY_ENABLED(dev_priv))
                drm_kms_helper_poll_init(dev);
  
        intel_power_domains_enable(dev_priv);
@@@ -1402,7 -1431,7 +1365,7 @@@ static void i915_driver_unregister(stru
         */
        drm_kms_helper_poll_fini(&dev_priv->drm);
  
 -      intel_gpu_ips_teardown();
 +      intel_gt_driver_unregister(&dev_priv->gt);
        acpi_video_unregister();
        intel_opregion_unregister(dev_priv);
  
@@@ -1531,7 -1560,7 +1494,7 @@@ int i915_driver_probe(struct pci_dev *p
        if (ret < 0)
                goto out_cleanup_mmio;
  
 -      ret = i915_driver_modeset_probe(&dev_priv->drm);
 +      ret = i915_driver_modeset_probe(dev_priv);
        if (ret < 0)
                goto out_cleanup_hw;
  
  out_cleanup_hw:
        i915_driver_hw_remove(dev_priv);
        i915_ggtt_driver_release(dev_priv);
 -
 -      /* Paranoia: make sure we have disabled everything before we exit. */
 -      intel_sanitize_gt_powersave(dev_priv);
  out_cleanup_mmio:
        i915_driver_mmio_release(dev_priv);
  out_runtime_pm_put:
@@@ -1561,6 -1593,8 +1524,6 @@@ out_fini
  
  void i915_driver_remove(struct drm_i915_private *i915)
  {
 -      struct pci_dev *pdev = i915->drm.pdev;
 -
        disable_rpm_wakeref_asserts(&i915->runtime_pm);
  
        i915_driver_unregister(i915);
  
        intel_gvt_driver_remove(i915);
  
 -      intel_modeset_driver_remove(&i915->drm);
 -
 -      intel_bios_driver_remove(i915);
 -
 -      vga_switcheroo_unregister_client(pdev);
 -      vga_client_register(pdev, NULL, NULL, NULL);
 -
 -      intel_csr_ucode_fini(i915);
 +      i915_driver_modeset_remove(i915);
  
        /* Free error state after interrupts are fully disabled. */
        cancel_delayed_work_sync(&i915->gt.hangcheck.work);
@@@ -1607,6 -1648,9 +1570,6 @@@ static void i915_driver_release(struct 
  
        i915_ggtt_driver_release(dev_priv);
  
 -      /* Paranoia: make sure we have disabled everything before we exit. */
 -      intel_sanitize_gt_powersave(dev_priv);
 -
        i915_driver_mmio_release(dev_priv);
  
        enable_rpm_wakeref_asserts(rpm);
@@@ -1650,10 -1694,12 +1613,10 @@@ static void i915_driver_postclose(struc
  {
        struct drm_i915_file_private *file_priv = file->driver_priv;
  
 -      mutex_lock(&dev->struct_mutex);
        i915_gem_context_close(file);
        i915_gem_release(dev, file);
 -      mutex_unlock(&dev->struct_mutex);
  
 -      kfree(file_priv);
 +      kfree_rcu(file_priv, rcu);
  
        /* Catch up with all the deferred frees from "this" client */
        i915_gem_flush_free_objects(to_i915(dev));
@@@ -1808,7 -1854,8 +1771,7 @@@ out
        return ret;
  }
  
 -static int
 -i915_suspend_switcheroo(struct drm_i915_private *i915, pm_message_t state)
 +int i915_suspend_switcheroo(struct drm_i915_private *i915, pm_message_t state)
  {
        int error;
  
@@@ -1832,7 -1879,7 +1795,7 @@@ static int i915_drm_resume(struct drm_d
        int ret;
  
        disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
 -      intel_sanitize_gt_powersave(dev_priv);
 +      intel_gt_pm_disable(&dev_priv->gt);
  
        i915_gem_sanitize(dev_priv);
  
        if (ret)
                DRM_ERROR("failed to re-enable GGTT\n");
  
 +      i915_gem_restore_gtt_mappings(dev_priv);
 +      i915_gem_restore_fences(dev_priv);
 +
        intel_csr_ucode_resume(dev_priv);
  
        i915_restore_state(dev_priv);
  
        i915_gem_resume(dev_priv);
  
 -      intel_modeset_init_hw(dev);
 +      intel_modeset_init_hw(dev_priv);
        intel_init_clock_gating(dev_priv);
  
        spin_lock_irq(&dev_priv->irq_lock);
@@@ -1963,7 -2007,7 +1926,7 @@@ static int i915_drm_resume_early(struc
  
        intel_display_power_resume_early(dev_priv);
  
 -      intel_sanitize_gt_powersave(dev_priv);
 +      intel_gt_pm_disable(&dev_priv->gt);
  
        intel_power_domains_resume(dev_priv);
  
        return ret;
  }
  
 -static int i915_resume_switcheroo(struct drm_i915_private *i915)
 +int i915_resume_switcheroo(struct drm_i915_private *i915)
  {
        int ret;
  
@@@ -2507,6 -2551,9 +2470,6 @@@ static int intel_runtime_suspend(struc
        struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
        int ret = 0;
  
 -      if (WARN_ON_ONCE(!(dev_priv->gt_pm.rc6.enabled && HAS_RC6(dev_priv))))
 -              return -ENODEV;
 -
        if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv)))
                return -ENODEV;
  
index 80f3153c48dcd4ba1956a3348c1c7f3c49e6f19a,814f62fca72798bb812bf009f4436ae96889c80b..0ddbd3a5fb8d30eac654f700822d7136c17efaab
  #include "intel_pm.h"
  
  static int
 -insert_mappable_node(struct i915_ggtt *ggtt,
 -                     struct drm_mm_node *node, u32 size)
 +insert_mappable_node(struct i915_ggtt *ggtt, struct drm_mm_node *node, u32 size)
  {
 +      int err;
 +
 +      err = mutex_lock_interruptible(&ggtt->vm.mutex);
 +      if (err)
 +              return err;
 +
        memset(node, 0, sizeof(*node));
 -      return drm_mm_insert_node_in_range(&ggtt->vm.mm, node,
 -                                         size, 0, I915_COLOR_UNEVICTABLE,
 -                                         0, ggtt->mappable_end,
 -                                         DRM_MM_INSERT_LOW);
 +      err = drm_mm_insert_node_in_range(&ggtt->vm.mm, node,
 +                                        size, 0, I915_COLOR_UNEVICTABLE,
 +                                        0, ggtt->mappable_end,
 +                                        DRM_MM_INSERT_LOW);
 +
 +      mutex_unlock(&ggtt->vm.mutex);
 +
 +      return err;
  }
  
  static void
 -remove_mappable_node(struct drm_mm_node *node)
 +remove_mappable_node(struct i915_ggtt *ggtt, struct drm_mm_node *node)
  {
 +      mutex_lock(&ggtt->vm.mutex);
        drm_mm_remove_node(node);
 +      mutex_unlock(&ggtt->vm.mutex);
  }
  
  int
@@@ -98,8 -87,7 +98,8 @@@ i915_gem_get_aperture_ioctl(struct drm_
        struct i915_vma *vma;
        u64 pinned;
  
 -      mutex_lock(&ggtt->vm.mutex);
 +      if (mutex_lock_interruptible(&ggtt->vm.mutex))
 +              return -EINTR;
  
        pinned = ggtt->vm.reserved;
        list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link)
@@@ -121,24 -109,20 +121,24 @@@ int i915_gem_object_unbind(struct drm_i
        LIST_HEAD(still_in_list);
        int ret = 0;
  
 -      lockdep_assert_held(&obj->base.dev->struct_mutex);
 -
        spin_lock(&obj->vma.lock);
        while (!ret && (vma = list_first_entry_or_null(&obj->vma.list,
                                                       struct i915_vma,
                                                       obj_link))) {
 +              struct i915_address_space *vm = vma->vm;
 +
 +              ret = -EBUSY;
 +              if (!i915_vm_tryopen(vm))
 +                      break;
 +
                list_move_tail(&vma->obj_link, &still_in_list);
                spin_unlock(&obj->vma.lock);
  
 -              ret = -EBUSY;
                if (flags & I915_GEM_OBJECT_UNBIND_ACTIVE ||
                    !i915_vma_is_active(vma))
                        ret = i915_vma_unbind(vma);
  
 +              i915_vm_close(vm);
                spin_lock(&obj->vma.lock);
        }
        list_splice(&still_in_list, &obj->vma.list);
@@@ -354,6 -338,10 +354,6 @@@ i915_gem_gtt_pread(struct drm_i915_gem_
        u64 remain, offset;
        int ret;
  
 -      ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
 -      if (ret)
 -              return ret;
 -
        wakeref = intel_runtime_pm_get(&i915->runtime_pm);
        vma = ERR_PTR(-ENODEV);
        if (!i915_gem_object_is_tiled(obj))
                                               PIN_NOEVICT);
        if (!IS_ERR(vma)) {
                node.start = i915_ggtt_offset(vma);
-               node.allocated = false;
+               node.flags = 0;
        } else {
                ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
                if (ret)
 -                      goto out_unlock;
 +                      goto out_rpm;
                GEM_BUG_ON(!drm_mm_node_allocated(&node));
        }
  
 -      mutex_unlock(&i915->drm.struct_mutex);
 -
        ret = i915_gem_object_lock_interruptible(obj);
        if (ret)
                goto out_unpin;
  
        i915_gem_object_unlock_fence(obj, fence);
  out_unpin:
 -      mutex_lock(&i915->drm.struct_mutex);
        if (drm_mm_node_allocated(&node)) {
                ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
 -              remove_mappable_node(&node);
 +              remove_mappable_node(ggtt, &node);
        } else {
                i915_vma_unpin(vma);
        }
 -out_unlock:
 +out_rpm:
        intel_runtime_pm_put(&i915->runtime_pm, wakeref);
 -      mutex_unlock(&i915->drm.struct_mutex);
 -
        return ret;
  }
  
@@@ -538,6 -531,10 +538,6 @@@ i915_gem_gtt_pwrite_fast(struct drm_i91
        void __user *user_data;
        int ret;
  
 -      ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
 -      if (ret)
 -              return ret;
 -
        if (i915_gem_object_has_struct_page(obj)) {
                /*
                 * Avoid waking the device up if we can fallback, as
                 * using the cache bypass of indirect GGTT access.
                 */
                wakeref = intel_runtime_pm_get_if_in_use(rpm);
 -              if (!wakeref) {
 -                      ret = -EFAULT;
 -                      goto out_unlock;
 -              }
 +              if (!wakeref)
 +                      return -EFAULT;
        } else {
                /* No backing pages, no fallback, we must force GGTT access */
                wakeref = intel_runtime_pm_get(rpm);
                                               PIN_NOEVICT);
        if (!IS_ERR(vma)) {
                node.start = i915_ggtt_offset(vma);
-               node.allocated = false;
+               node.flags = 0;
        } else {
                ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
                if (ret)
                GEM_BUG_ON(!drm_mm_node_allocated(&node));
        }
  
 -      mutex_unlock(&i915->drm.struct_mutex);
 -
        ret = i915_gem_object_lock_interruptible(obj);
        if (ret)
                goto out_unpin;
  
        i915_gem_object_unlock_fence(obj, fence);
  out_unpin:
 -      mutex_lock(&i915->drm.struct_mutex);
        intel_gt_flush_ggtt_writes(ggtt->vm.gt);
        if (drm_mm_node_allocated(&node)) {
                ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
 -              remove_mappable_node(&node);
 +              remove_mappable_node(ggtt, &node);
        } else {
                i915_vma_unpin(vma);
        }
  out_rpm:
        intel_runtime_pm_put(rpm, wakeref);
 -out_unlock:
 -      mutex_unlock(&i915->drm.struct_mutex);
        return ret;
  }
  
@@@ -883,6 -887,74 +883,6 @@@ void i915_gem_runtime_suspend(struct dr
        }
  }
  
 -static long
 -wait_for_timelines(struct drm_i915_private *i915,
 -                 unsigned int wait, long timeout)
 -{
 -      struct intel_gt_timelines *timelines = &i915->gt.timelines;
 -      struct intel_timeline *tl;
 -      unsigned long flags;
 -
 -      spin_lock_irqsave(&timelines->lock, flags);
 -      list_for_each_entry(tl, &timelines->active_list, link) {
 -              struct i915_request *rq;
 -
 -              rq = i915_active_request_get_unlocked(&tl->last_request);
 -              if (!rq)
 -                      continue;
 -
 -              spin_unlock_irqrestore(&timelines->lock, flags);
 -
 -              /*
 -               * "Race-to-idle".
 -               *
 -               * Switching to the kernel context is often used a synchronous
 -               * step prior to idling, e.g. in suspend for flushing all
 -               * current operations to memory before sleeping. These we
 -               * want to complete as quickly as possible to avoid prolonged
 -               * stalls, so allow the gpu to boost to maximum clocks.
 -               */
 -              if (wait & I915_WAIT_FOR_IDLE_BOOST)
 -                      gen6_rps_boost(rq);
 -
 -              timeout = i915_request_wait(rq, wait, timeout);
 -              i915_request_put(rq);
 -              if (timeout < 0)
 -                      return timeout;
 -
 -              /* restart after reacquiring the lock */
 -              spin_lock_irqsave(&timelines->lock, flags);
 -              tl = list_entry(&timelines->active_list, typeof(*tl), link);
 -      }
 -      spin_unlock_irqrestore(&timelines->lock, flags);
 -
 -      return timeout;
 -}
 -
 -int i915_gem_wait_for_idle(struct drm_i915_private *i915,
 -                         unsigned int flags, long timeout)
 -{
 -      /* If the device is asleep, we have no requests outstanding */
 -      if (!intel_gt_pm_is_awake(&i915->gt))
 -              return 0;
 -
 -      GEM_TRACE("flags=%x (%s), timeout=%ld%s\n",
 -                flags, flags & I915_WAIT_LOCKED ? "locked" : "unlocked",
 -                timeout, timeout == MAX_SCHEDULE_TIMEOUT ? " (forever)" : "");
 -
 -      timeout = wait_for_timelines(i915, flags, timeout);
 -      if (timeout < 0)
 -              return timeout;
 -
 -      if (flags & I915_WAIT_LOCKED) {
 -              lockdep_assert_held(&i915->drm.struct_mutex);
 -
 -              i915_retire_requests(i915);
 -      }
 -
 -      return 0;
 -}
 -
  struct i915_vma *
  i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
                         const struct i915_ggtt_view *view,
        struct i915_vma *vma;
        int ret;
  
 -      lockdep_assert_held(&obj->base.dev->struct_mutex);
 +      if (i915_gem_object_never_bind_ggtt(obj))
 +              return ERR_PTR(-ENODEV);
  
        if (flags & PIN_MAPPABLE &&
            (!view || view->type == I915_GGTT_VIEW_NORMAL)) {
                                return ERR_PTR(-ENOSPC);
                }
  
 -              WARN(i915_vma_is_pinned(vma),
 -                   "bo is already pinned in ggtt with incorrect alignment:"
 -                   " offset=%08x, req.alignment=%llx,"
 -                   " req.map_and_fenceable=%d, vma->map_and_fenceable=%d\n",
 -                   i915_ggtt_offset(vma), alignment,
 -                   !!(flags & PIN_MAPPABLE),
 -                   i915_vma_is_map_and_fenceable(vma));
                ret = i915_vma_unbind(vma);
                if (ret)
                        return ERR_PTR(ret);
@@@ -1070,6 -1148,95 +1070,6 @@@ void i915_gem_sanitize(struct drm_i915_
        intel_runtime_pm_put(&i915->runtime_pm, wakeref);
  }
  
 -static void init_unused_ring(struct intel_gt *gt, u32 base)
 -{
 -      struct intel_uncore *uncore = gt->uncore;
 -
 -      intel_uncore_write(uncore, RING_CTL(base), 0);
 -      intel_uncore_write(uncore, RING_HEAD(base), 0);
 -      intel_uncore_write(uncore, RING_TAIL(base), 0);
 -      intel_uncore_write(uncore, RING_START(base), 0);
 -}
 -
 -static void init_unused_rings(struct intel_gt *gt)
 -{
 -      struct drm_i915_private *i915 = gt->i915;
 -
 -      if (IS_I830(i915)) {
 -              init_unused_ring(gt, PRB1_BASE);
 -              init_unused_ring(gt, SRB0_BASE);
 -              init_unused_ring(gt, SRB1_BASE);
 -              init_unused_ring(gt, SRB2_BASE);
 -              init_unused_ring(gt, SRB3_BASE);
 -      } else if (IS_GEN(i915, 2)) {
 -              init_unused_ring(gt, SRB0_BASE);
 -              init_unused_ring(gt, SRB1_BASE);
 -      } else if (IS_GEN(i915, 3)) {
 -              init_unused_ring(gt, PRB1_BASE);
 -              init_unused_ring(gt, PRB2_BASE);
 -      }
 -}
 -
 -int i915_gem_init_hw(struct drm_i915_private *i915)
 -{
 -      struct intel_uncore *uncore = &i915->uncore;
 -      struct intel_gt *gt = &i915->gt;
 -      int ret;
 -
 -      BUG_ON(!i915->kernel_context);
 -      ret = intel_gt_terminally_wedged(gt);
 -      if (ret)
 -              return ret;
 -
 -      gt->last_init_time = ktime_get();
 -
 -      /* Double layer security blanket, see i915_gem_init() */
 -      intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
 -
 -      if (HAS_EDRAM(i915) && INTEL_GEN(i915) < 9)
 -              intel_uncore_rmw(uncore, HSW_IDICR, 0, IDIHASHMSK(0xf));
 -
 -      if (IS_HASWELL(i915))
 -              intel_uncore_write(uncore,
 -                                 MI_PREDICATE_RESULT_2,
 -                                 IS_HSW_GT3(i915) ?
 -                                 LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
 -
 -      /* Apply the GT workarounds... */
 -      intel_gt_apply_workarounds(gt);
 -      /* ...and determine whether they are sticking. */
 -      intel_gt_verify_workarounds(gt, "init");
 -
 -      intel_gt_init_swizzling(gt);
 -
 -      /*
 -       * At least 830 can leave some of the unused rings
 -       * "active" (ie. head != tail) after resume which
 -       * will prevent c3 entry. Makes sure all unused rings
 -       * are totally idle.
 -       */
 -      init_unused_rings(gt);
 -
 -      ret = i915_ppgtt_init_hw(gt);
 -      if (ret) {
 -              DRM_ERROR("Enabling PPGTT failed (%d)\n", ret);
 -              goto out;
 -      }
 -
 -      /* We can't enable contexts until all firmware is loaded */
 -      ret = intel_uc_init_hw(&gt->uc);
 -      if (ret) {
 -              i915_probe_error(i915, "Enabling uc failed (%d)\n", ret);
 -              goto out;
 -      }
 -
 -      intel_mocs_init(gt);
 -
 -out:
 -      intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
 -      return ret;
 -}
 -
  static int __intel_engines_record_defaults(struct drm_i915_private *i915)
  {
        struct i915_request *requests[I915_NUM_ENGINES] = {};
@@@ -1208,6 -1375,17 +1208,6 @@@ out
        return err;
  }
  
 -static int
 -i915_gem_init_scratch(struct drm_i915_private *i915, unsigned int size)
 -{
 -      return intel_gt_init_scratch(&i915->gt, size);
 -}
 -
 -static void i915_gem_fini_scratch(struct drm_i915_private *i915)
 -{
 -      intel_gt_fini_scratch(&i915->gt);
 -}
 -
  static int intel_engines_verify_workarounds(struct drm_i915_private *i915)
  {
        struct intel_engine_cs *engine;
@@@ -1249,6 -1427,7 +1249,6 @@@ int i915_gem_init(struct drm_i915_priva
         * we hold the forcewake during initialisation these problems
         * just magically go away.
         */
 -      mutex_lock(&dev_priv->drm.struct_mutex);
        intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
  
        ret = i915_init_ggtt(dev_priv);
                goto err_unlock;
        }
  
 -      ret = i915_gem_init_scratch(dev_priv,
 -                                  IS_GEN(dev_priv, 2) ? SZ_256K : PAGE_SIZE);
 -      if (ret) {
 -              GEM_BUG_ON(ret == -EIO);
 -              goto err_ggtt;
 -      }
 +      intel_gt_init(&dev_priv->gt);
  
        ret = intel_engines_setup(dev_priv);
        if (ret) {
                goto err_unlock;
        }
  
 -      ret = i915_gem_contexts_init(dev_priv);
 +      ret = i915_gem_init_contexts(dev_priv);
        if (ret) {
                GEM_BUG_ON(ret == -EIO);
                goto err_scratch;
  
        intel_uc_init(&dev_priv->gt.uc);
  
 -      ret = i915_gem_init_hw(dev_priv);
 +      ret = intel_gt_init_hw(&dev_priv->gt);
        if (ret)
                goto err_uc_init;
  
                goto err_gt;
  
        intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
 -      mutex_unlock(&dev_priv->drm.struct_mutex);
  
        return 0;
  
         * driver doesn't explode during runtime.
         */
  err_gt:
 -      mutex_unlock(&dev_priv->drm.struct_mutex);
 -
 -      intel_gt_set_wedged(&dev_priv->gt);
 +      intel_gt_set_wedged_on_init(&dev_priv->gt);
        i915_gem_suspend(dev_priv);
        i915_gem_suspend_late(dev_priv);
  
        i915_gem_drain_workqueue(dev_priv);
 -
 -      mutex_lock(&dev_priv->drm.struct_mutex);
  err_init_hw:
        intel_uc_fini_hw(&dev_priv->gt.uc);
  err_uc_init:
        if (ret != -EIO) {
                intel_uc_fini(&dev_priv->gt.uc);
 -              intel_cleanup_gt_powersave(dev_priv);
                intel_engines_cleanup(dev_priv);
        }
  err_context:
        if (ret != -EIO)
 -              i915_gem_contexts_fini(dev_priv);
 +              i915_gem_driver_release__contexts(dev_priv);
  err_scratch:
 -      i915_gem_fini_scratch(dev_priv);
 -err_ggtt:
 +      intel_gt_driver_release(&dev_priv->gt);
  err_unlock:
        intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
 -      mutex_unlock(&dev_priv->drm.struct_mutex);
  
        if (ret != -EIO) {
                intel_uc_cleanup_firmwares(&dev_priv->gt.uc);
        }
  
        if (ret == -EIO) {
 -              mutex_lock(&dev_priv->drm.struct_mutex);
 -
                /*
                 * Allow engines or uC initialisation to fail by marking the GPU
                 * as wedged. But we only want to do this when the GPU is angry,
                i915_gem_restore_gtt_mappings(dev_priv);
                i915_gem_restore_fences(dev_priv);
                intel_init_clock_gating(dev_priv);
 -
 -              mutex_unlock(&dev_priv->drm.struct_mutex);
        }
  
        i915_gem_drain_freed_objects(dev_priv);
@@@ -1391,35 -1587,43 +1391,35 @@@ void i915_gem_driver_unregister(struct 
  
  void i915_gem_driver_remove(struct drm_i915_private *dev_priv)
  {
 -      GEM_BUG_ON(dev_priv->gt.awake);
 -
        intel_wakeref_auto_fini(&dev_priv->ggtt.userfault_wakeref);
  
        i915_gem_suspend_late(dev_priv);
 -      intel_disable_gt_powersave(dev_priv);
 +      intel_gt_driver_remove(&dev_priv->gt);
  
        /* Flush any outstanding unpin_work. */
        i915_gem_drain_workqueue(dev_priv);
  
 -      mutex_lock(&dev_priv->drm.struct_mutex);
        intel_uc_fini_hw(&dev_priv->gt.uc);
        intel_uc_fini(&dev_priv->gt.uc);
 -      mutex_unlock(&dev_priv->drm.struct_mutex);
  
        i915_gem_drain_freed_objects(dev_priv);
  }
  
  void i915_gem_driver_release(struct drm_i915_private *dev_priv)
  {
 -      mutex_lock(&dev_priv->drm.struct_mutex);
        intel_engines_cleanup(dev_priv);
 -      i915_gem_contexts_fini(dev_priv);
 -      i915_gem_fini_scratch(dev_priv);
 -      mutex_unlock(&dev_priv->drm.struct_mutex);
 +      i915_gem_driver_release__contexts(dev_priv);
 +      intel_gt_driver_release(&dev_priv->gt);
  
        intel_wa_list_free(&dev_priv->gt_wa_list);
  
 -      intel_cleanup_gt_powersave(dev_priv);
 -
        intel_uc_cleanup_firmwares(&dev_priv->gt.uc);
        i915_gem_cleanup_userptr(dev_priv);
        intel_timelines_fini(dev_priv);
  
        i915_gem_drain_freed_objects(dev_priv);
  
 -      WARN_ON(!list_empty(&dev_priv->contexts.list));
 +      WARN_ON(!list_empty(&dev_priv->gem.contexts.list));
  }
  
  void i915_gem_init_mmio(struct drm_i915_private *i915)
@@@ -1439,7 -1643,7 +1439,7 @@@ static void i915_gem_init__mm(struct dr
        i915_gem_init__objects(i915);
  }
  
 -int i915_gem_init_early(struct drm_i915_private *dev_priv)
 +void i915_gem_init_early(struct drm_i915_private *dev_priv)
  {
        int err;
  
        err = i915_gemfs_init(dev_priv);
        if (err)
                DRM_NOTE("Unable to create a private tmpfs mount, hugepage support will be disabled(%d).\n", err);
 -
 -      return 0;
  }
  
  void i915_gem_cleanup_early(struct drm_i915_private *dev_priv)
index 4bdd63b5710029684ea9d9445c737d1dc71dfa9c,41d495a360d876fb5019e5bd590c5182969831f7..225ec808b01a980254afa72cd287286cca10bd44
@@@ -1083,7 -1083,7 +1083,7 @@@ static const struct dss_features omap34
  
  static const struct dss_features omap3630_dss_feats = {
        .model                  =       DSS_MODEL_OMAP3,
 -      .fck_div_max            =       32,
 +      .fck_div_max            =       31,
        .fck_freq_max           =       173000000,
        .dss_fck_multiplier     =       1,
        .parent_clk_name        =       "dpll4_ck",
@@@ -1598,3 -1598,40 +1598,40 @@@ struct platform_driver omap_dsshw_drive
                .suppress_bind_attrs = true,
        },
  };
+ /* INIT */
+ static struct platform_driver * const omap_dss_drivers[] = {
+       &omap_dsshw_driver,
+       &omap_dispchw_driver,
+ #ifdef CONFIG_OMAP2_DSS_DSI
+       &omap_dsihw_driver,
+ #endif
+ #ifdef CONFIG_OMAP2_DSS_VENC
+       &omap_venchw_driver,
+ #endif
+ #ifdef CONFIG_OMAP4_DSS_HDMI
+       &omapdss_hdmi4hw_driver,
+ #endif
+ #ifdef CONFIG_OMAP5_DSS_HDMI
+       &omapdss_hdmi5hw_driver,
+ #endif
+ };
+ static int __init omap_dss_init(void)
+ {
+       return platform_register_drivers(omap_dss_drivers,
+                                        ARRAY_SIZE(omap_dss_drivers));
+ }
+ static void __exit omap_dss_exit(void)
+ {
+       platform_unregister_drivers(omap_dss_drivers,
+                                   ARRAY_SIZE(omap_dss_drivers));
+ }
+ module_init(omap_dss_init);
+ module_exit(omap_dss_exit);
+ MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>");
+ MODULE_DESCRIPTION("OMAP2/3/4/5 Display Subsystem");
+ MODULE_LICENSE("GPL v2");