]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/commitdiff
Merge tag 'drm-intel-next-2017-03-06' of git://anongit.freedesktop.org/git/drm-intel...
authorDave Airlie <airlied@redhat.com>
Wed, 8 Mar 2017 02:41:47 +0000 (12:41 +1000)
committerDave Airlie <airlied@redhat.com>
Wed, 8 Mar 2017 02:41:47 +0000 (12:41 +1000)
4 weeks worth of stuff since I was traveling&lazy:

- lspcon improvements (Imre)
- proper atomic state for cdclk handling (Ville)
- gpu reset improvements (Chris)
- lots and lots of polish around fences, requests, waiting and
  everything related all over (both gem and modeset code), from Chris
- atomic by default on gen5+ minus byt/bsw (Maarten did the patch to
  flip the default, really this is a massive joint team effort)
- moar power domains, now 64bit (Ander)
- big pile of in-kernel unit tests for various gem subsystems (Chris),
  including simple mock objects for i915 device and and the ggtt
  manager.
- i915_gpu_info in debugfs, for taking a snapshot of the current gpu
  state. Same thing as i915_error_state, but useful if the kernel didn't
  notice something is stick. From Chris.
- bxt dsi fixes (Umar Shankar)
- bxt w/a updates (Jani)
- no more struct_mutex for gem object unreference (Chris)
- some execlist refactoring (Tvrtko)
- color manager support for glk (Ander)
- improve the power-well sync code to better take over from the
  firmware (Imre)
- gem tracepoint polish (Tvrtko)
- lots of glk fixes all around (Ander)
- ctx switch improvements (Chris)
- glk dsi support&fixes (Deepak M)
- dsi fixes for vlv and clanups, lots of them (Hans de Goede)
- switch to i915.ko types in lots of our internal modeset code (Ander)
- byt/bsw atomic wm update code, yay (Ville)

* tag 'drm-intel-next-2017-03-06' of git://anongit.freedesktop.org/git/drm-intel: (432 commits)
  drm/i915: Update DRIVER_DATE to 20170306
  drm/i915: Don't use enums for hardware engine id
  drm/i915: Split breadcrumbs spinlock into two
  drm/i915: Refactor wakeup of the next breadcrumb waiter
  drm/i915: Take reference for signaling the request from hardirq
  drm/i915: Add FIFO underrun tracepoints
  drm/i915: Add cxsr toggle tracepoint
  drm/i915: Add VLV/CHV watermark/FIFO programming tracepoints
  drm/i915: Add plane update/disable tracepoints
  drm/i915: Kill level 0 wm hack for VLV/CHV
  drm/i915: Workaround VLV/CHV sprite1->sprite0 enable underrun
  drm/i915: Sanitize VLV/CHV watermarks properly
  drm/i915: Only use update_wm_{pre,post} for pre-ilk platforms
  drm/i915: Nuke crtc->wm.cxsr_allowed
  drm/i915: Compute proper intermediate wms for vlv/cvh
  drm/i915: Skip useless watermark/FIFO related work on VLV/CHV when not needed
  drm/i915: Compute vlv/chv wms the atomic way
  drm/i915: Compute VLV/CHV FIFO sizes based on the PM2 watermarks
  drm/i915: Plop vlv/chv fifo sizes into crtc state
  drm/i915: Plop vlv wm state into crtc_state
  ...

18 files changed:
1  2 
Documentation/gpu/i915.rst
drivers/gpu/drm/i915/Makefile
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_dmabuf.c
drivers/gpu/drm/i915/i915_gem_object.h
drivers/gpu/drm/i915/i915_gem_request.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/intel_audio.c
drivers/gpu/drm/i915/intel_breadcrumbs.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_fbdev.c
drivers/gpu/drm/i915/intel_hdmi.c
drivers/gpu/drm/i915/intel_pipe_crc.c

index b0d6709b8600bbc66a01aeae482c24db237d9074,1cd0ee518dee24e114e73d74ab7e73d286e71a08..9c7ed3e3f1e94007d3c1bd9aace282f8d8b96eba
@@@ -144,15 -144,6 +144,15 @@@ High Definition Audi
  .. kernel-doc:: include/drm/i915_component.h
     :internal:
  
 +Intel HDMI LPE Audio Support
 +----------------------------
 +
 +.. kernel-doc:: drivers/gpu/drm/i915/intel_lpe_audio.c
 +   :doc: LPE Audio integration for HDMI or DP playback
 +
 +.. kernel-doc:: drivers/gpu/drm/i915/intel_lpe_audio.c
 +   :internal:
 +
  Panel Self Refresh PSR (PSR/SRD)
  --------------------------------
  
@@@ -222,6 -213,15 +222,15 @@@ Video BIOS Table (VBT
  .. kernel-doc:: drivers/gpu/drm/i915/intel_vbt_defs.h
     :internal:
  
+ Display clocks
+ --------------
+ .. kernel-doc:: drivers/gpu/drm/i915/intel_cdclk.c
+    :doc: CDCLK / RAWCLK
+ .. kernel-doc:: drivers/gpu/drm/i915/intel_cdclk.c
+    :internal:
  Display PLLs
  ------------
  
index c62ab45683c0c7af0a1e9190978393d5ddb63efa,53e30fcb2751c3a7bfd79b1df4b812ccd0382b06..b1b580337c7a9ab850994d0ab93673149bebc51a
@@@ -29,6 -29,7 +29,7 @@@ i915-$(CONFIG_DEBUG_FS) += i915_debugfs
  # GEM code
  i915-y += i915_cmd_parser.o \
          i915_gem_batch_pool.o \
+         i915_gem_clflush.o \
          i915_gem_context.o \
          i915_gem_dmabuf.o \
          i915_gem_evict.o \
@@@ -72,6 -73,7 +73,7 @@@ i915-y += intel_audio.o 
          intel_atomic.o \
          intel_atomic_plane.o \
          intel_bios.o \
+         intel_cdclk.o \
          intel_color.o \
          intel_display.o \
          intel_dpio_phy.o \
@@@ -116,6 -118,9 +118,9 @@@ i915-y += dvo_ch7017.o 
  
  # Post-mortem debug and GPU hang state capture
  i915-$(CONFIG_DRM_I915_CAPTURE_ERROR) += i915_gpu_error.o
+ i915-$(CONFIG_DRM_I915_SELFTEST) += \
+       selftests/i915_random.o \
+       selftests/i915_selftest.o
  
  # virtual gpu code
  i915-y += i915_vgpu.o
@@@ -129,9 -134,6 +134,9 @@@ i915-y += intel_gvt.
  include $(src)/gvt/Makefile
  endif
  
 +# LPE Audio for VLV and CHT
 +i915-y += intel_lpe_audio.o
 +
  obj-$(CONFIG_DRM_I915) += i915.o
  
  CFLAGS_i915_trace_points.o := -I$(src)
index 7d7244798507de0ac773927cba3e1676baa98696,478f19d2f3d8c67de2f7ae529de1c6b1466c82be..aa2d726b43491eff78511b86d985cb14338669ea
@@@ -35,6 -35,49 +35,23 @@@ static inline struct drm_i915_private *
        return to_i915(node->minor->dev);
  }
  
 -/* As the drm_debugfs_init() routines are called before dev->dev_private is
 - * allocated we need to hook into the minor for release. */
 -static int
 -drm_add_fake_info_node(struct drm_minor *minor,
 -                     struct dentry *ent,
 -                     const void *key)
 -{
 -      struct drm_info_node *node;
 -
 -      node = kmalloc(sizeof(*node), GFP_KERNEL);
 -      if (node == NULL) {
 -              debugfs_remove(ent);
 -              return -ENOMEM;
 -      }
 -
 -      node->minor = minor;
 -      node->dent = ent;
 -      node->info_ent = (void *)key;
 -
 -      mutex_lock(&minor->debugfs_lock);
 -      list_add(&node->list, &minor->debugfs_list);
 -      mutex_unlock(&minor->debugfs_lock);
 -
 -      return 0;
 -}
 -
+ static __always_inline void seq_print_param(struct seq_file *m,
+                                           const char *name,
+                                           const char *type,
+                                           const void *x)
+ {
+       if (!__builtin_strcmp(type, "bool"))
+               seq_printf(m, "i915.%s=%s\n", name, yesno(*(const bool *)x));
+       else if (!__builtin_strcmp(type, "int"))
+               seq_printf(m, "i915.%s=%d\n", name, *(const int *)x);
+       else if (!__builtin_strcmp(type, "unsigned int"))
+               seq_printf(m, "i915.%s=%u\n", name, *(const unsigned int *)x);
+       else if (!__builtin_strcmp(type, "char *"))
+               seq_printf(m, "i915.%s=%s\n", name, *(const char **)x);
+       else
+               BUILD_BUG();
+ }
  static int i915_capabilities(struct seq_file *m, void *data)
  {
        struct drm_i915_private *dev_priv = node_to_i915(m->private);
        seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv));
        seq_printf(m, "platform: %s\n", intel_platform_name(info->platform));
        seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv));
  #define PRINT_FLAG(x)  seq_printf(m, #x ": %s\n", yesno(info->x))
        DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG);
  #undef PRINT_FLAG
  
+       kernel_param_lock(THIS_MODULE);
+ #define PRINT_PARAM(T, x) seq_print_param(m, #x, #T, &i915.x);
+       I915_PARAMS_FOR_EACH(PRINT_PARAM);
+ #undef PRINT_PARAM
+       kernel_param_unlock(THIS_MODULE);
        return 0;
  }
  
@@@ -428,7 -478,7 +452,7 @@@ static int i915_gem_object_info(struct 
                   dpy_count, dpy_size);
  
        seq_printf(m, "%llu [%llu] gtt total\n",
-                  ggtt->base.total, ggtt->mappable_end - ggtt->base.start);
+                  ggtt->base.total, ggtt->mappable_end);
  
        seq_putc(m, '\n');
        print_batch_pool_stats(m, dev_priv);
                mutex_lock(&dev->struct_mutex);
                request = list_first_entry_or_null(&file_priv->mm.request_list,
                                                   struct drm_i915_gem_request,
-                                                  client_list);
+                                                  client_link);
                rcu_read_lock();
                task = pid_task(request && request->ctx->pid ?
                                request->ctx->pid : file->pid,
@@@ -676,14 -726,14 +700,14 @@@ static void i915_ring_seqno_info(struc
        seq_printf(m, "Current sequence (%s): %x\n",
                   engine->name, intel_engine_get_seqno(engine));
  
-       spin_lock_irq(&b->lock);
+       spin_lock_irq(&b->rb_lock);
        for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
                struct intel_wait *w = rb_entry(rb, typeof(*w), node);
  
                seq_printf(m, "Waiting (%s): %s [%d] on %x\n",
                           engine->name, w->tsk->comm, w->tsk->pid, w->seqno);
        }
-       spin_unlock_irq(&b->lock);
+       spin_unlock_irq(&b->rb_lock);
  }
  
  static int i915_gem_seqno_info(struct seq_file *m, void *data)
@@@ -827,10 -877,22 +851,22 @@@ static int i915_interrupt_info(struct s
                           I915_READ(VLV_IIR_RW));
                seq_printf(m, "Display IMR:\t%08x\n",
                           I915_READ(VLV_IMR));
-               for_each_pipe(dev_priv, pipe)
+               for_each_pipe(dev_priv, pipe) {
+                       enum intel_display_power_domain power_domain;
+                       power_domain = POWER_DOMAIN_PIPE(pipe);
+                       if (!intel_display_power_get_if_enabled(dev_priv,
+                                                               power_domain)) {
+                               seq_printf(m, "Pipe %c power disabled\n",
+                                          pipe_name(pipe));
+                               continue;
+                       }
                        seq_printf(m, "Pipe %c stat:\t%08x\n",
                                   pipe_name(pipe),
                                   I915_READ(PIPESTAT(pipe)));
+                       intel_display_power_put(dev_priv, power_domain);
+               }
  
                seq_printf(m, "Master IER:\t%08x\n",
                           I915_READ(VLV_MASTER_IER));
@@@ -928,100 -990,95 +964,95 @@@ static int i915_gem_fence_regs_info(str
  }
  
  #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
- static ssize_t
- i915_error_state_write(struct file *filp,
-                      const char __user *ubuf,
-                      size_t cnt,
-                      loff_t *ppos)
+ static ssize_t gpu_state_read(struct file *file, char __user *ubuf,
+                             size_t count, loff_t *pos)
  {
-       struct i915_error_state_file_priv *error_priv = filp->private_data;
-       DRM_DEBUG_DRIVER("Resetting error state\n");
-       i915_destroy_error_state(error_priv->i915);
+       struct i915_gpu_state *error = file->private_data;
+       struct drm_i915_error_state_buf str;
+       ssize_t ret;
+       loff_t tmp;
  
-       return cnt;
- }
- static int i915_error_state_open(struct inode *inode, struct file *file)
- {
-       struct drm_i915_private *dev_priv = inode->i_private;
-       struct i915_error_state_file_priv *error_priv;
+       if (!error)
+               return 0;
  
-       error_priv = kzalloc(sizeof(*error_priv), GFP_KERNEL);
-       if (!error_priv)
-               return -ENOMEM;
+       ret = i915_error_state_buf_init(&str, error->i915, count, *pos);
+       if (ret)
+               return ret;
  
-       error_priv->i915 = dev_priv;
+       ret = i915_error_state_to_str(&str, error);
+       if (ret)
+               goto out;
  
-       i915_error_state_get(&dev_priv->drm, error_priv);
+       tmp = 0;
+       ret = simple_read_from_buffer(ubuf, count, &tmp, str.buf, str.bytes);
+       if (ret < 0)
+               goto out;
  
-       file->private_data = error_priv;
+       *pos = str.start + ret;
+ out:
+       i915_error_state_buf_release(&str);
+       return ret;
+ }
  
+ static int gpu_state_release(struct inode *inode, struct file *file)
+ {
+       i915_gpu_state_put(file->private_data);
        return 0;
  }
  
- static int i915_error_state_release(struct inode *inode, struct file *file)
+ static int i915_gpu_info_open(struct inode *inode, struct file *file)
  {
-       struct i915_error_state_file_priv *error_priv = file->private_data;
+       struct i915_gpu_state *gpu;
  
-       i915_error_state_put(error_priv);
-       kfree(error_priv);
+       gpu = i915_capture_gpu_state(inode->i_private);
+       if (!gpu)
+               return -ENOMEM;
  
+       file->private_data = gpu;
        return 0;
  }
  
- static ssize_t i915_error_state_read(struct file *file, char __user *userbuf,
-                                    size_t count, loff_t *pos)
+ static const struct file_operations i915_gpu_info_fops = {
+       .owner = THIS_MODULE,
+       .open = i915_gpu_info_open,
+       .read = gpu_state_read,
+       .llseek = default_llseek,
+       .release = gpu_state_release,
+ };
+ static ssize_t
+ i915_error_state_write(struct file *filp,
+                      const char __user *ubuf,
+                      size_t cnt,
+                      loff_t *ppos)
  {
-       struct i915_error_state_file_priv *error_priv = file->private_data;
-       struct drm_i915_error_state_buf error_str;
-       loff_t tmp_pos = 0;
-       ssize_t ret_count = 0;
-       int ret;
+       struct i915_gpu_state *error = filp->private_data;
  
-       ret = i915_error_state_buf_init(&error_str, error_priv->i915,
-                                       count, *pos);
-       if (ret)
-               return ret;
+       if (!error)
+               return 0;
  
-       ret = i915_error_state_to_str(&error_str, error_priv);
-       if (ret)
-               goto out;
+       DRM_DEBUG_DRIVER("Resetting error state\n");
+       i915_reset_error_state(error->i915);
  
-       ret_count = simple_read_from_buffer(userbuf, count, &tmp_pos,
-                                           error_str.buf,
-                                           error_str.bytes);
+       return cnt;
+ }
  
-       if (ret_count < 0)
-               ret = ret_count;
-       else
-               *pos = error_str.start + ret_count;
- out:
-       i915_error_state_buf_release(&error_str);
-       return ret ?: ret_count;
+ static int i915_error_state_open(struct inode *inode, struct file *file)
+ {
+       file->private_data = i915_first_error_state(inode->i_private);
+       return 0;
  }
  
  static const struct file_operations i915_error_state_fops = {
        .owner = THIS_MODULE,
        .open = i915_error_state_open,
-       .read = i915_error_state_read,
+       .read = gpu_state_read,
        .write = i915_error_state_write,
        .llseek = default_llseek,
-       .release = i915_error_state_release,
+       .release = gpu_state_release,
  };
  #endif
  
- static int
- i915_next_seqno_get(void *data, u64 *val)
- {
-       struct drm_i915_private *dev_priv = data;
-       *val = 1 + atomic_read(&dev_priv->gt.global_timeline.seqno);
-       return 0;
- }
  static int
  i915_next_seqno_set(void *data, u64 val)
  {
  }
  
  DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops,
-                       i915_next_seqno_get, i915_next_seqno_set,
+                       NULL, i915_next_seqno_set,
                        "0x%llx\n");
  
  static int i915_frequency_info(struct seq_file *m, void *unused)
  {
        struct drm_i915_private *dev_priv = node_to_i915(m->private);
-       struct drm_device *dev = &dev_priv->drm;
        int ret = 0;
  
        intel_runtime_pm_get(dev_priv);
                }
  
                /* RPSTAT1 is in the GT power well */
-               ret = mutex_lock_interruptible(&dev->struct_mutex);
-               if (ret)
-                       goto out;
                intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
  
                reqf = I915_READ(GEN6_RPNSWREQ);
                cagf = intel_gpu_freq(dev_priv, cagf);
  
                intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
-               mutex_unlock(&dev->struct_mutex);
  
                if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) {
                        pm_ier = I915_READ(GEN6_PMIER);
  
                max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 :
                            rp_state_cap >> 16) & 0xff;
-               max_freq *= (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ?
-                            GEN9_FREQ_SCALER : 1);
+               max_freq *= (IS_GEN9_BC(dev_priv) ? GEN9_FREQ_SCALER : 1);
                seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
                           intel_gpu_freq(dev_priv, max_freq));
  
                max_freq = (rp_state_cap & 0xff00) >> 8;
-               max_freq *= (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ?
-                            GEN9_FREQ_SCALER : 1);
+               max_freq *= (IS_GEN9_BC(dev_priv) ? GEN9_FREQ_SCALER : 1);
                seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
                           intel_gpu_freq(dev_priv, max_freq));
  
                max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 :
                            rp_state_cap >> 0) & 0xff;
-               max_freq *= (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ?
-                            GEN9_FREQ_SCALER : 1);
+               max_freq *= (IS_GEN9_BC(dev_priv) ? GEN9_FREQ_SCALER : 1);
                seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
                           intel_gpu_freq(dev_priv, max_freq));
                seq_printf(m, "Max overclocked frequency: %dMHz\n",
                seq_puts(m, "no P-state info available\n");
        }
  
-       seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk_freq);
+       seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk.hw.cdclk);
        seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq);
        seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq);
  
- out:
        intel_runtime_pm_put(dev_priv);
        return ret;
  }
@@@ -1307,35 -1354,40 +1328,40 @@@ static int i915_hangcheck_info(struct s
  
        intel_runtime_pm_put(dev_priv);
  
-       if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work)) {
-               seq_printf(m, "Hangcheck active, fires in %dms\n",
+       if (timer_pending(&dev_priv->gpu_error.hangcheck_work.timer))
+               seq_printf(m, "Hangcheck active, timer fires in %dms\n",
                           jiffies_to_msecs(dev_priv->gpu_error.hangcheck_work.timer.expires -
                                            jiffies));
-       } else
-               seq_printf(m, "Hangcheck inactive\n");
+       else if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work))
+               seq_puts(m, "Hangcheck active, work pending\n");
+       else
+               seq_puts(m, "Hangcheck inactive\n");
+       seq_printf(m, "GT active? %s\n", yesno(dev_priv->gt.awake));
  
        for_each_engine(engine, dev_priv, id) {
                struct intel_breadcrumbs *b = &engine->breadcrumbs;
                struct rb_node *rb;
  
                seq_printf(m, "%s:\n", engine->name);
-               seq_printf(m, "\tseqno = %x [current %x, last %x]\n",
+               seq_printf(m, "\tseqno = %x [current %x, last %x], inflight %d\n",
                           engine->hangcheck.seqno, seqno[id],
-                          intel_engine_last_submit(engine));
+                          intel_engine_last_submit(engine),
+                          engine->timeline->inflight_seqnos);
                seq_printf(m, "\twaiters? %s, fake irq active? %s, stalled? %s\n",
                           yesno(intel_engine_has_waiter(engine)),
                           yesno(test_bit(engine->id,
                                          &dev_priv->gpu_error.missed_irq_rings)),
                           yesno(engine->hangcheck.stalled));
  
-               spin_lock_irq(&b->lock);
+               spin_lock_irq(&b->rb_lock);
                for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
                        struct intel_wait *w = rb_entry(rb, typeof(*w), node);
  
                        seq_printf(m, "\t%s [%d] waiting for %x\n",
                                   w->tsk->comm, w->tsk->pid, w->seqno);
                }
-               spin_unlock_irq(&b->lock);
+               spin_unlock_irq(&b->rb_lock);
  
                seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
                           (long long)engine->hangcheck.acthd,
@@@ -1788,7 -1840,7 +1814,7 @@@ static int i915_ring_freq_table(struct 
        if (ret)
                goto out;
  
-       if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+       if (IS_GEN9_BC(dev_priv)) {
                /* Convert GT frequency to 50 HZ units */
                min_gpu_freq =
                        dev_priv->rps.min_freq_softlimit / GEN9_FREQ_SCALER;
                                       &ia_freq);
                seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
                           intel_gpu_freq(dev_priv, (gpu_freq *
-                               (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ?
-                                GEN9_FREQ_SCALER : 1))),
+                                                    (IS_GEN9_BC(dev_priv) ?
+                                                     GEN9_FREQ_SCALER : 1))),
                           ((ia_freq >> 0) & 0xff) * 100,
                           ((ia_freq >> 8) & 0xff) * 100);
        }
@@@ -2302,10 -2354,10 +2328,10 @@@ static int i915_rps_boost_info(struct s
                seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
                           rps_power_to_str(dev_priv->rps.power));
                seq_printf(m, "  Avg. up: %d%% [above threshold? %d%%]\n",
-                          100 * rpup / rpupei,
+                          rpup && rpupei ? 100 * rpup / rpupei : 0,
                           dev_priv->rps.up_threshold);
                seq_printf(m, "  Avg. down: %d%% [below threshold? %d%%]\n",
-                          100 * rpdown / rpdownei,
+                          rpdown && rpdownei ? 100 * rpdown / rpdownei : 0,
                           dev_priv->rps.down_threshold);
        } else {
                seq_puts(m, "\nRPS Autotuning inactive\n");
@@@ -2351,7 -2403,9 +2377,9 @@@ static int i915_huc_load_status_info(st
        seq_printf(m, "\tRSA: offset is %d; size = %d\n",
                huc_fw->rsa_offset, huc_fw->rsa_size);
  
+       intel_runtime_pm_get(dev_priv);
        seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2));
+       intel_runtime_pm_put(dev_priv);
  
        return 0;
  }
@@@ -2383,6 -2437,8 +2411,8 @@@ static int i915_guc_load_status_info(st
        seq_printf(m, "\tRSA: offset is %d; size = %d\n",
                guc_fw->rsa_offset, guc_fw->rsa_size);
  
+       intel_runtime_pm_get(dev_priv);
        tmp = I915_READ(GUC_STATUS);
  
        seq_printf(m, "\nGuC status 0x%08x:\n", tmp);
        for (i = 0; i < 16; i++)
                seq_printf(m, "\t%2d: \t0x%x\n", i, I915_READ(SOFT_SCRATCH(i)));
  
+       intel_runtime_pm_put(dev_priv);
        return 0;
  }
  
@@@ -2777,15 -2835,10 +2809,10 @@@ static int i915_power_domain_info(struc
                seq_printf(m, "%-25s %d\n", power_well->name,
                           power_well->count);
  
-               for (power_domain = 0; power_domain < POWER_DOMAIN_NUM;
-                    power_domain++) {
-                       if (!(BIT(power_domain) & power_well->domains))
-                               continue;
+               for_each_power_domain(power_domain, power_well->domains)
                        seq_printf(m, "  %-23s %d\n",
                                 intel_display_power_domain_str(power_domain),
                                 power_domains->domain_use_count[power_domain]);
-               }
        }
  
        mutex_unlock(&power_domains->lock);
@@@ -3205,6 -3258,11 +3232,11 @@@ static int i915_engine_info(struct seq_
  
        intel_runtime_pm_get(dev_priv);
  
+       seq_printf(m, "GT awake? %s\n",
+                  yesno(dev_priv->gt.awake));
+       seq_printf(m, "Global active requests: %d\n",
+                  dev_priv->gt.active_requests);
        for_each_engine(engine, dev_priv, id) {
                struct intel_breadcrumbs *b = &engine->breadcrumbs;
                struct drm_i915_gem_request *rq;
                u64 addr;
  
                seq_printf(m, "%s\n", engine->name);
-               seq_printf(m, "\tcurrent seqno %x, last %x, hangcheck %x [%d ms]\n",
+               seq_printf(m, "\tcurrent seqno %x, last %x, hangcheck %x [%d ms], inflight %d\n",
                           intel_engine_get_seqno(engine),
                           intel_engine_last_submit(engine),
                           engine->hangcheck.seqno,
-                          jiffies_to_msecs(jiffies - engine->hangcheck.action_timestamp));
+                          jiffies_to_msecs(jiffies - engine->hangcheck.action_timestamp),
+                          engine->timeline->inflight_seqnos);
  
                rcu_read_lock();
  
  
                        rcu_read_lock();
                        rq = READ_ONCE(engine->execlist_port[0].request);
-                       if (rq)
-                               print_request(m, rq, "\t\tELSP[0] ");
-                       else
+                       if (rq) {
+                               seq_printf(m, "\t\tELSP[0] count=%d, ",
+                                          engine->execlist_port[0].count);
+                               print_request(m, rq, "rq: ");
+                       } else {
                                seq_printf(m, "\t\tELSP[0] idle\n");
+                       }
                        rq = READ_ONCE(engine->execlist_port[1].request);
-                       if (rq)
-                               print_request(m, rq, "\t\tELSP[1] ");
-                       else
+                       if (rq) {
+                               seq_printf(m, "\t\tELSP[1] count=%d, ",
+                                          engine->execlist_port[1].count);
+                               print_request(m, rq, "rq: ");
+                       } else {
                                seq_printf(m, "\t\tELSP[1] idle\n");
+                       }
                        rcu_read_unlock();
  
                        spin_lock_irq(&engine->timeline->lock);
                                   I915_READ(RING_PP_DIR_DCLV(engine)));
                }
  
-               spin_lock_irq(&b->lock);
+               spin_lock_irq(&b->rb_lock);
                for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
                        struct intel_wait *w = rb_entry(rb, typeof(*w), node);
  
                        seq_printf(m, "\t%s [%d] waiting for %x\n",
                                   w->tsk->comm, w->tsk->pid, w->seqno);
                }
-               spin_unlock_irq(&b->lock);
+               spin_unlock_irq(&b->rb_lock);
  
                seq_puts(m, "\n");
        }
@@@ -3746,7 -3811,19 +3785,19 @@@ static int i915_displayport_test_data_s
                if (connector->status == connector_status_connected &&
                    connector->encoder != NULL) {
                        intel_dp = enc_to_intel_dp(connector->encoder);
-                       seq_printf(m, "%lx", intel_dp->compliance.test_data.edid);
+                       if (intel_dp->compliance.test_type ==
+                           DP_TEST_LINK_EDID_READ)
+                               seq_printf(m, "%lx",
+                                          intel_dp->compliance.test_data.edid);
+                       else if (intel_dp->compliance.test_type ==
+                                DP_TEST_LINK_VIDEO_PATTERN) {
+                               seq_printf(m, "hdisplay: %d\n",
+                                          intel_dp->compliance.test_data.hdisplay);
+                               seq_printf(m, "vdisplay: %d\n",
+                                          intel_dp->compliance.test_data.vdisplay);
+                               seq_printf(m, "bpc: %u\n",
+                                          intel_dp->compliance.test_data.bpc);
+                       }
                } else
                        seq_puts(m, "0");
        }
@@@ -4237,7 -4314,8 +4288,8 @@@ i915_max_freq_set(void *data, u64 val
  
        dev_priv->rps.max_freq_softlimit = val;
  
-       intel_set_rps(dev_priv, val);
+       if (intel_set_rps(dev_priv, val))
+               DRM_DEBUG_DRIVER("failed to update RPS to new softlimit\n");
  
        mutex_unlock(&dev_priv->rps.hw_lock);
  
@@@ -4292,7 -4370,8 +4344,8 @@@ i915_min_freq_set(void *data, u64 val
  
        dev_priv->rps.min_freq_softlimit = val;
  
-       intel_set_rps(dev_priv, val);
+       if (intel_set_rps(dev_priv, val))
+               DRM_DEBUG_DRIVER("failed to update RPS to new softlimit\n");
  
        mutex_unlock(&dev_priv->rps.hw_lock);
  
@@@ -4418,7 -4497,7 +4471,7 @@@ static void gen9_sseu_device_status(str
  
                sseu->slice_mask |= BIT(s);
  
-               if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
+               if (IS_GEN9_BC(dev_priv))
                        sseu->subslice_mask =
                                INTEL_INFO(dev_priv)->sseu.subslice_mask;
  
@@@ -4567,6 -4646,112 +4620,81 @@@ static const struct file_operations i91
        .release = i915_forcewake_release,
  };
  
 -static int i915_forcewake_create(struct dentry *root, struct drm_minor *minor)
 -{
 -      struct dentry *ent;
 -
 -      ent = debugfs_create_file("i915_forcewake_user",
 -                                S_IRUSR,
 -                                root, to_i915(minor->dev),
 -                                &i915_forcewake_fops);
 -      if (!ent)
 -              return -ENOMEM;
 -
 -      return drm_add_fake_info_node(minor, ent, &i915_forcewake_fops);
 -}
 -
+ static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
+ {
+       struct drm_i915_private *dev_priv = m->private;
+       struct i915_hotplug *hotplug = &dev_priv->hotplug;
+       seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
+       seq_printf(m, "Detected: %s\n",
+                  yesno(delayed_work_pending(&hotplug->reenable_work)));
+       return 0;
+ }
+ static ssize_t i915_hpd_storm_ctl_write(struct file *file,
+                                       const char __user *ubuf, size_t len,
+                                       loff_t *offp)
+ {
+       struct seq_file *m = file->private_data;
+       struct drm_i915_private *dev_priv = m->private;
+       struct i915_hotplug *hotplug = &dev_priv->hotplug;
+       unsigned int new_threshold;
+       int i;
+       char *newline;
+       char tmp[16];
+       if (len >= sizeof(tmp))
+               return -EINVAL;
+       if (copy_from_user(tmp, ubuf, len))
+               return -EFAULT;
+       tmp[len] = '\0';
+       /* Strip newline, if any */
+       newline = strchr(tmp, '\n');
+       if (newline)
+               *newline = '\0';
+       if (strcmp(tmp, "reset") == 0)
+               new_threshold = HPD_STORM_DEFAULT_THRESHOLD;
+       else if (kstrtouint(tmp, 10, &new_threshold) != 0)
+               return -EINVAL;
+       if (new_threshold > 0)
+               DRM_DEBUG_KMS("Setting HPD storm detection threshold to %d\n",
+                             new_threshold);
+       else
+               DRM_DEBUG_KMS("Disabling HPD storm detection\n");
+       spin_lock_irq(&dev_priv->irq_lock);
+       hotplug->hpd_storm_threshold = new_threshold;
+       /* Reset the HPD storm stats so we don't accidentally trigger a storm */
+       for_each_hpd_pin(i)
+               hotplug->stats[i].count = 0;
+       spin_unlock_irq(&dev_priv->irq_lock);
+       /* Re-enable hpd immediately if we were in an irq storm */
+       flush_delayed_work(&dev_priv->hotplug.reenable_work);
+       return len;
+ }
+ static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file)
+ {
+       return single_open(file, i915_hpd_storm_ctl_show, inode->i_private);
+ }
+ static const struct file_operations i915_hpd_storm_ctl_fops = {
+       .owner = THIS_MODULE,
+       .open = i915_hpd_storm_ctl_open,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = single_release,
+       .write = i915_hpd_storm_ctl_write
+ };
 -static int i915_debugfs_create(struct dentry *root,
 -                             struct drm_minor *minor,
 -                             const char *name,
 -                             const struct file_operations *fops)
 -{
 -      struct dentry *ent;
 -
 -      ent = debugfs_create_file(name,
 -                                S_IRUGO | S_IWUSR,
 -                                root, to_i915(minor->dev),
 -                                fops);
 -      if (!ent)
 -              return -ENOMEM;
 -
 -      return drm_add_fake_info_node(minor, ent, fops);
 -}
 -
  static const struct drm_info_list i915_debugfs_list[] = {
        {"i915_capabilities", i915_capabilities, 0},
        {"i915_gem_objects", i915_gem_object_info, 0},
@@@ -4633,6 -4818,7 +4761,7 @@@ static const struct i915_debugfs_files 
        {"i915_gem_drop_caches", &i915_drop_caches_fops},
  #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
        {"i915_error_state", &i915_error_state_fops},
+       {"i915_gpu_info", &i915_gpu_info_fops},
  #endif
        {"i915_next_seqno", &i915_next_seqno_fops},
        {"i915_display_crc_ctl", &i915_display_crc_ctl_fops},
        {"i915_dp_test_data", &i915_displayport_test_data_fops},
        {"i915_dp_test_type", &i915_displayport_test_type_fops},
        {"i915_dp_test_active", &i915_displayport_test_active_fops},
-       {"i915_guc_log_control", &i915_guc_log_control_fops}
+       {"i915_guc_log_control", &i915_guc_log_control_fops},
+       {"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops}
  };
  
  int i915_debugfs_register(struct drm_i915_private *dev_priv)
  {
        struct drm_minor *minor = dev_priv->drm.primary;
 +      struct dentry *ent;
        int ret, i;
  
 -      ret = i915_forcewake_create(minor->debugfs_root, minor);
 -      if (ret)
 -              return ret;
 +      ent = debugfs_create_file("i915_forcewake_user", S_IRUSR,
 +                                minor->debugfs_root, to_i915(minor->dev),
 +                                &i915_forcewake_fops);
 +      if (!ent)
 +              return -ENOMEM;
  
        ret = intel_pipe_crc_create(minor);
        if (ret)
                return ret;
  
        for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
 -              ret = i915_debugfs_create(minor->debugfs_root, minor,
 -                                        i915_debugfs_files[i].name,
 +              ent = debugfs_create_file(i915_debugfs_files[i].name,
 +                                        S_IRUGO | S_IWUSR,
 +                                        minor->debugfs_root,
 +                                        to_i915(minor->dev),
                                          i915_debugfs_files[i].fops);
 -              if (ret)
 -                      return ret;
 +              if (!ent)
 +                      return -ENOMEM;
        }
  
        return drm_debugfs_create_files(i915_debugfs_list,
                                        minor->debugfs_root, minor);
  }
  
 -void i915_debugfs_unregister(struct drm_i915_private *dev_priv)
 -{
 -      struct drm_minor *minor = dev_priv->drm.primary;
 -      int i;
 -
 -      drm_debugfs_remove_files(i915_debugfs_list,
 -                               I915_DEBUGFS_ENTRIES, minor);
 -
 -      drm_debugfs_remove_files((struct drm_info_list *)&i915_forcewake_fops,
 -                               1, minor);
 -
 -      intel_pipe_crc_cleanup(minor);
 -
 -      for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
 -              struct drm_info_list *info_list =
 -                      (struct drm_info_list *)i915_debugfs_files[i].fops;
 -
 -              drm_debugfs_remove_files(info_list, 1, minor);
 -      }
 -}
 -
  struct dpcd_block {
        /* DPCD dump start address. */
        unsigned int offset;
index 655d146e1126560a865d52dd29514e4037c036e6,9be9b9b7f9cbb5f36bc56c43c241d4cbc70e4e23..704dbcf63866f0bdde816aa460eb7936e94d0a15
@@@ -43,6 -43,7 +43,7 @@@
  
  #include <drm/drmP.h>
  #include <drm/drm_crtc_helper.h>
+ #include <drm/drm_atomic_helper.h>
  #include <drm/i915_drm.h>
  
  #include "i915_drv.h"
@@@ -248,6 -249,7 +249,7 @@@ static int i915_getparam(struct drm_dev
        case I915_PARAM_IRQ_ACTIVE:
        case I915_PARAM_ALLOW_BATCHBUFFER:
        case I915_PARAM_LAST_DISPATCH:
+       case I915_PARAM_HAS_EXEC_CONSTANTS:
                /* Reject all old ums/dri params. */
                return -ENODEV;
        case I915_PARAM_CHIPSET_ID:
        case I915_PARAM_HAS_BSD2:
                value = !!dev_priv->engine[VCS2];
                break;
-       case I915_PARAM_HAS_EXEC_CONSTANTS:
-               value = INTEL_GEN(dev_priv) >= 4;
-               break;
        case I915_PARAM_HAS_LLC:
                value = HAS_LLC(dev_priv);
                break;
                value = INTEL_INFO(dev_priv)->sseu.min_eu_in_pool;
                break;
        case I915_PARAM_HUC_STATUS:
-               /* The register is already force-woken. We dont need
-                * any rpm here
-                */
+               intel_runtime_pm_get(dev_priv);
                value = I915_READ(HUC_STATUS2) & HUC_FW_VERIFIED;
+               intel_runtime_pm_put(dev_priv);
                break;
        case I915_PARAM_MMAP_GTT_VERSION:
                /* Though we've started our numbering from 1, and so class all
        case I915_PARAM_HAS_EXEC_HANDLE_LUT:
        case I915_PARAM_HAS_COHERENT_PHYS_GTT:
        case I915_PARAM_HAS_EXEC_SOFTPIN:
+       case I915_PARAM_HAS_EXEC_ASYNC:
+       case I915_PARAM_HAS_EXEC_FENCE:
                /* For the time being all of these are always true;
                 * if some supported hardware does not have one of these
                 * features this value needs to be provided from
@@@ -756,6 -756,15 +756,15 @@@ out_err
        return -ENOMEM;
  }
  
+ static void i915_engines_cleanup(struct drm_i915_private *i915)
+ {
+       struct intel_engine_cs *engine;
+       enum intel_engine_id id;
+       for_each_engine(engine, i915, id)
+               kfree(engine);
+ }
  static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv)
  {
        destroy_workqueue(dev_priv->hotplug.dp_wq);
   */
  static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv)
  {
-       if (IS_HSW_EARLY_SDV(dev_priv) ||
-           IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0))
+       bool pre = false;
+       pre |= IS_HSW_EARLY_SDV(dev_priv);
+       pre |= IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0);
+       pre |= IS_BXT_REVID(dev_priv, 0, BXT_REVID_B_LAST);
+       if (pre) {
                DRM_ERROR("This is a pre-production stepping. "
                          "It may not be fully functional.\n");
+               add_taint(TAINT_MACHINE_CHECK, LOCKDEP_STILL_OK);
+       }
  }
  
  /**
@@@ -808,6 -824,7 +824,7 @@@ static int i915_driver_init_early(struc
        spin_lock_init(&dev_priv->gpu_error.lock);
        mutex_init(&dev_priv->backlight_lock);
        spin_lock_init(&dev_priv->uncore.lock);
        spin_lock_init(&dev_priv->mm.object_stat_lock);
        spin_lock_init(&dev_priv->mmio_flip_lock);
        spin_lock_init(&dev_priv->wm.dsparb_lock);
        mutex_init(&dev_priv->pps_mutex);
  
        intel_uc_init_early(dev_priv);
        i915_memcpy_init_early(dev_priv);
  
+       ret = intel_engines_init_early(dev_priv);
+       if (ret)
+               return ret;
        ret = i915_workqueues_init(dev_priv);
        if (ret < 0)
-               return ret;
+               goto err_engines;
  
        /* This must be called before any calls to HAS_PCH_* */
        intel_detect_pch(dev_priv);
  
  err_workqueues:
        i915_workqueues_cleanup(dev_priv);
+ err_engines:
+       i915_engines_cleanup(dev_priv);
        return ret;
  }
  
@@@ -864,6 -886,7 +886,7 @@@ static void i915_driver_cleanup_early(s
        i915_perf_fini(dev_priv);
        i915_gem_load_cleanup(dev_priv);
        i915_workqueues_cleanup(dev_priv);
+       i915_engines_cleanup(dev_priv);
  }
  
  static int i915_mmio_setup(struct drm_i915_private *dev_priv)
@@@ -930,6 -953,7 +953,7 @@@ static int i915_driver_init_mmio(struc
                goto put_bridge;
  
        intel_uncore_init(dev_priv);
+       i915_gem_init_mmio(dev_priv);
  
        return 0;
  
@@@ -967,7 -991,7 +991,7 @@@ static void intel_sanitize_options(stru
        DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt);
  
        i915.semaphores = intel_sanitize_semaphores(dev_priv, i915.semaphores);
-       DRM_DEBUG_DRIVER("use GPU sempahores? %s\n", yesno(i915.semaphores));
+       DRM_DEBUG_DRIVER("use GPU semaphores? %s\n", yesno(i915.semaphores));
  }
  
  /**
@@@ -1139,7 -1163,7 +1163,7 @@@ static void i915_driver_register(struc
        if (IS_GEN5(dev_priv))
                intel_gpu_ips_init(dev_priv);
  
 -      i915_audio_component_init(dev_priv);
 +      intel_audio_init(dev_priv);
  
        /*
         * Some ports require correctly set-up hpd registers for detection to
   */
  static void i915_driver_unregister(struct drm_i915_private *dev_priv)
  {
 -      i915_audio_component_cleanup(dev_priv);
 +      intel_audio_deinit(dev_priv);
  
        intel_gpu_ips_teardown();
        acpi_video_unregister();
  
        i915_teardown_sysfs(dev_priv);
        i915_guc_log_unregister(dev_priv);
 -      i915_debugfs_unregister(dev_priv);
        drm_dev_unregister(&dev_priv->drm);
  
        i915_gem_shrinker_cleanup(dev_priv);
   */
  int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
  {
+       const struct intel_device_info *match_info =
+               (struct intel_device_info *)ent->driver_data;
        struct drm_i915_private *dev_priv;
        int ret;
  
-       if (i915.nuclear_pageflip)
-               driver.driver_features |= DRIVER_ATOMIC;
+       /* Enable nuclear pageflip on ILK+, except vlv/chv */
+       if (!i915.nuclear_pageflip &&
+           (match_info->gen < 5 || match_info->has_gmch_display))
+               driver.driver_features &= ~DRIVER_ATOMIC;
  
        ret = -ENOMEM;
        dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
                ret = drm_dev_init(&dev_priv->drm, &driver, &pdev->dev);
        if (ret) {
                DRM_DEV_ERROR(&pdev->dev, "allocation failed\n");
-               kfree(dev_priv);
-               return ret;
+               goto out_free;
        }
  
        dev_priv->drm.pdev = pdev;
  
        ret = pci_enable_device(pdev);
        if (ret)
-               goto out_free_priv;
+               goto out_fini;
  
        pci_set_drvdata(pdev, &dev_priv->drm);
  
@@@ -1270,9 -1298,11 +1297,11 @@@ out_runtime_pm_put
        i915_driver_cleanup_early(dev_priv);
  out_pci_disable:
        pci_disable_device(pdev);
- out_free_priv:
+ out_fini:
        i915_load_error(dev_priv, "Device initialization failed (%d)\n", ret);
-       drm_dev_unref(&dev_priv->drm);
+       drm_dev_fini(&dev_priv->drm);
+ out_free:
+       kfree(dev_priv);
        return ret;
  }
  
@@@ -1280,6 -1310,8 +1309,8 @@@ void i915_driver_unload(struct drm_devi
  {
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct pci_dev *pdev = dev_priv->drm.pdev;
+       struct drm_modeset_acquire_ctx ctx;
+       int ret;
  
        intel_fbdev_fini(dev);
  
  
        intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
  
+       drm_modeset_acquire_init(&ctx, 0);
+       while (1) {
+               ret = drm_modeset_lock_all_ctx(dev, &ctx);
+               if (!ret)
+                       ret = drm_atomic_helper_disable_all(dev, &ctx);
+               if (ret != -EDEADLK)
+                       break;
+               drm_modeset_backoff(&ctx);
+       }
+       if (ret)
+               DRM_ERROR("Disabling all crtc's during unload failed with %i\n", ret);
+       drm_modeset_drop_locks(&ctx);
+       drm_modeset_acquire_fini(&ctx);
        intel_gvt_cleanup(dev_priv);
  
        i915_driver_unregister(dev_priv);
  
        /* Free error state after interrupts are fully disabled. */
        cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
-       i915_destroy_error_state(dev_priv);
+       i915_reset_error_state(dev_priv);
  
        /* Flush any outstanding unpin_work. */
        drain_workqueue(dev_priv->wq);
        i915_driver_cleanup_mmio(dev_priv);
  
        intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
+ }
+ static void i915_driver_release(struct drm_device *dev)
+ {
+       struct drm_i915_private *dev_priv = to_i915(dev);
  
        i915_driver_cleanup_early(dev_priv);
+       drm_dev_fini(&dev_priv->drm);
+       kfree(dev_priv);
  }
  
  static int i915_driver_open(struct drm_device *dev, struct drm_file *file)
@@@ -1716,6 -1774,8 +1773,8 @@@ static int i915_drm_resume_early(struc
            !(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload))
                intel_power_domains_init_hw(dev_priv, true);
  
+       i915_gem_sanitize(dev_priv);
        enable_rpm_wakeref_asserts(dev_priv);
  
  out:
@@@ -1787,7 -1847,7 +1846,7 @@@ void i915_reset(struct drm_i915_privat
                goto error;
        }
  
-       i915_gem_reset_finish(dev_priv);
+       i915_gem_reset(dev_priv);
        intel_overlay_reset(dev_priv);
  
        /* Ok, now get things going again... */
        i915_queue_hangcheck(dev_priv);
  
  wakeup:
+       i915_gem_reset_finish(dev_priv);
        enable_irq(dev_priv->drm.irq);
        wake_up_bit(&error->flags, I915_RESET_IN_PROGRESS);
        return;
@@@ -2532,7 -2593,7 +2592,7 @@@ static const struct drm_ioctl_desc i915
        DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
        DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
        DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH),
-       DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2_WR, i915_gem_execbuffer2, DRM_AUTH|DRM_RENDER_ALLOW),
        DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
        DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
        DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
@@@ -2574,7 -2635,8 +2634,8 @@@ static struct drm_driver driver = 
         */
        .driver_features =
            DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME |
-           DRIVER_RENDER | DRIVER_MODESET,
+           DRIVER_RENDER | DRIVER_MODESET | DRIVER_ATOMIC,
+       .release = i915_driver_release,
        .open = i915_driver_open,
        .lastclose = i915_driver_lastclose,
        .preclose = i915_driver_preclose,
        .minor = DRIVER_MINOR,
        .patchlevel = DRIVER_PATCHLEVEL,
  };
+ #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+ #include "selftests/mock_drm.c"
+ #endif
index 8df73751c367a1c9177bb68abc117e41086de36b,56e569f536ec9dc0d206824e532cf273d22fc5c8..9ae1e520f48c36ed3eeb8455efaf732e4a1c8fad
@@@ -79,8 -79,8 +79,8 @@@
  
  #define DRIVER_NAME           "i915"
  #define DRIVER_DESC           "Intel Graphics"
- #define DRIVER_DATE           "20170123"
- #define DRIVER_TIMESTAMP      1485156432
+ #define DRIVER_DATE           "20170306"
+ #define DRIVER_TIMESTAMP      1488785683
  
  #undef WARN_ON
  /* Many gcc seem to no see through this and fall over :( */
@@@ -293,6 -293,7 +293,7 @@@ enum plane_id 
        PLANE_PRIMARY,
        PLANE_SPRITE0,
        PLANE_SPRITE1,
+       PLANE_SPRITE2,
        PLANE_CURSOR,
        I915_MAX_PLANES,
  };
@@@ -343,6 -344,11 +344,11 @@@ enum intel_display_power_domain 
        POWER_DOMAIN_PORT_DDI_C_LANES,
        POWER_DOMAIN_PORT_DDI_D_LANES,
        POWER_DOMAIN_PORT_DDI_E_LANES,
+       POWER_DOMAIN_PORT_DDI_A_IO,
+       POWER_DOMAIN_PORT_DDI_B_IO,
+       POWER_DOMAIN_PORT_DDI_C_IO,
+       POWER_DOMAIN_PORT_DDI_D_IO,
+       POWER_DOMAIN_PORT_DDI_E_IO,
        POWER_DOMAIN_PORT_DSI,
        POWER_DOMAIN_PORT_CRT,
        POWER_DOMAIN_PORT_OTHER,
@@@ -384,6 -390,8 +390,8 @@@ enum hpd_pin 
  #define for_each_hpd_pin(__pin) \
        for ((__pin) = (HPD_NONE + 1); (__pin) < HPD_NUM_PINS; (__pin)++)
  
+ #define HPD_STORM_DEFAULT_THRESHOLD 5
  struct i915_hotplug {
        struct work_struct hotplug_work;
  
        struct work_struct poll_init_work;
        bool poll_enabled;
  
+       unsigned int hpd_storm_threshold;
        /*
         * if we get a HPD irq from DP and a HPD irq from non-DP
         * the non-DP HPD could block the workqueue on a mode config
  
  #define for_each_power_domain(domain, mask)                           \
        for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++)     \
-               for_each_if ((1 << (domain)) & (mask))
+               for_each_if (BIT_ULL(domain) & (mask))
+ #define for_each_power_well(__dev_priv, __power_well)                         \
+       for ((__power_well) = (__dev_priv)->power_domains.power_wells;  \
+            (__power_well) - (__dev_priv)->power_domains.power_wells < \
+               (__dev_priv)->power_domains.power_well_count;           \
+            (__power_well)++)
+ #define for_each_power_well_rev(__dev_priv, __power_well)                     \
+       for ((__power_well) = (__dev_priv)->power_domains.power_wells +         \
+                             (__dev_priv)->power_domains.power_well_count - 1; \
+            (__power_well) - (__dev_priv)->power_domains.power_wells >= 0;     \
+            (__power_well)--)
+ #define for_each_power_domain_well(__dev_priv, __power_well, __domain_mask)   \
+       for_each_power_well(__dev_priv, __power_well)                           \
+               for_each_if ((__power_well)->domains & (__domain_mask))
+ #define for_each_power_domain_well_rev(__dev_priv, __power_well, __domain_mask) \
+       for_each_power_well_rev(__dev_priv, __power_well)                       \
+               for_each_if ((__power_well)->domains & (__domain_mask))
+ #define for_each_intel_plane_in_state(__state, plane, plane_state, __i) \
+       for ((__i) = 0; \
+            (__i) < (__state)->base.dev->mode_config.num_total_plane && \
+                    ((plane) = to_intel_plane((__state)->base.planes[__i].ptr), \
+                     (plane_state) = to_intel_plane_state((__state)->base.planes[__i].state), 1); \
+            (__i)++) \
+               for_each_if (plane_state)
  
  struct drm_i915_private;
  struct i915_mm_struct;
@@@ -600,9 -638,13 +638,13 @@@ struct intel_initial_plane_config
  struct intel_crtc;
  struct intel_limit;
  struct dpll;
+ struct intel_cdclk_state;
  
  struct drm_i915_display_funcs {
-       int (*get_display_clock_speed)(struct drm_i915_private *dev_priv);
+       void (*get_cdclk)(struct drm_i915_private *dev_priv,
+                         struct intel_cdclk_state *cdclk_state);
+       void (*set_cdclk)(struct drm_i915_private *dev_priv,
+                         const struct intel_cdclk_state *cdclk_state);
        int (*get_fifo_size)(struct drm_i915_private *dev_priv, int plane);
        int (*compute_pipe_wm)(struct intel_crtc_state *cstate);
        int (*compute_intermediate_wm)(struct drm_device *dev,
        int (*compute_global_watermarks)(struct drm_atomic_state *state);
        void (*update_wm)(struct intel_crtc *crtc);
        int (*modeset_calc_cdclk)(struct drm_atomic_state *state);
-       void (*modeset_commit_cdclk)(struct drm_atomic_state *state);
        /* Returns the active state of the crtc, and if the crtc is active,
         * fills out the pipe-config with the hw state. */
        bool (*get_pipe_config)(struct intel_crtc *,
                                   struct intel_encoder *encoder,
                                   const struct drm_display_mode *adjusted_mode);
        void (*audio_codec_disable)(struct intel_encoder *encoder);
-       void (*fdi_link_train)(struct drm_crtc *crtc);
+       void (*fdi_link_train)(struct intel_crtc *crtc,
+                              const struct intel_crtc_state *crtc_state);
        void (*init_clock_gating)(struct drm_i915_private *dev_priv);
        int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
                          struct drm_framebuffer *fb,
@@@ -856,6 -898,7 +898,7 @@@ enum intel_platform 
        INTEL_BROXTON,
        INTEL_KABYLAKE,
        INTEL_GEMINILAKE,
+       INTEL_MAX_PLATFORMS
  };
  
  struct intel_device_info {
  
  struct intel_display_error_state;
  
- struct drm_i915_error_state {
+ struct i915_gpu_state {
        struct kref ref;
        struct timeval time;
        struct timeval boottime;
  
        char error_msg[128];
        bool simulated;
+       bool awake;
+       bool wakelock;
+       bool suspended;
        int iommu;
        u32 reset_count;
        u32 suspend_count;
        struct intel_device_info device_info;
+       struct i915_params params;
  
        /* Generic register state */
        u32 eir;
        u32 pgtbl_er;
        u32 ier;
-       u32 gtier[4];
+       u32 gtier[4], ngtier;
        u32 ccid;
        u32 derrmr;
        u32 forcewake;
        u32 gab_ctl;
        u32 gfx_mode;
  
+       u32 nfence;
        u64 fence[I915_MAX_NUM_FENCES];
        struct intel_overlay_error_state *overlay;
        struct intel_display_error_state *display;
                u32 semaphore_mboxes[I915_NUM_ENGINES - 1];
                struct intel_instdone instdone;
  
+               struct drm_i915_error_context {
+                       char comm[TASK_COMM_LEN];
+                       pid_t pid;
+                       u32 handle;
+                       u32 hw_id;
+                       int ban_score;
+                       int active;
+                       int guilty;
+               } context;
                struct drm_i915_error_object {
                        u64 gtt_offset;
                        u64 gtt_size;
                                u32 pp_dir_base;
                        };
                } vm_info;
-               pid_t pid;
-               char comm[TASK_COMM_LEN];
-               int context_bans;
        } engine[I915_NUM_ENGINES];
  
        struct drm_i915_error_buffer {
@@@ -1395,7 -1449,7 +1449,7 @@@ struct i915_power_well 
        int count;
        /* cached hw enabled state */
        bool hw_enabled;
-       unsigned long domains;
+       u64 domains;
        /* unique identifier for this power well */
        unsigned long id;
        /*
@@@ -1456,7 -1510,7 +1510,7 @@@ struct i915_gem_mm 
        struct work_struct free_work;
  
        /** Usable portion of the GTT for GEM */
-       phys_addr_t stolen_base; /* limited to low memory (32-bit) */
+       dma_addr_t stolen_base; /* limited to low memory (32-bit) */
  
        /** PPGTT used for aliasing the PPGTT with the GTT */
        struct i915_hw_ppgtt *aliasing_ppgtt;
@@@ -1498,11 -1552,6 +1552,6 @@@ struct drm_i915_error_state_buf 
        loff_t pos;
  };
  
- struct i915_error_state_file_priv {
-       struct drm_i915_private *i915;
-       struct drm_i915_error_state *error;
- };
  #define I915_RESET_TIMEOUT (10 * HZ) /* 10s */
  #define I915_FENCE_TIMEOUT (10 * HZ) /* 10s */
  
@@@ -1519,7 -1568,7 +1568,7 @@@ struct i915_gpu_error 
        /* For reset and error_state handling. */
        spinlock_t lock;
        /* Protected by the above dev->gpu_error.lock. */
-       struct drm_i915_error_state *first_error;
+       struct i915_gpu_state *first_error;
  
        unsigned long missed_irq_rings;
  
@@@ -2053,6 -2102,10 +2102,10 @@@ struct i915_oa_ops 
        bool (*oa_buffer_is_empty)(struct drm_i915_private *dev_priv);
  };
  
+ struct intel_cdclk_state {
+       unsigned int cdclk, vco, ref;
+ };
  struct drm_i915_private {
        struct drm_device drm;
  
  
        const struct intel_device_info info;
  
-       int relative_constants_mode;
        void __iomem *regs;
  
        struct intel_uncore uncore;
  
        unsigned int fsb_freq, mem_freq, is_ddr3;
        unsigned int skl_preferred_vco_freq;
-       unsigned int cdclk_freq, max_cdclk_freq;
-       /*
-        * For reading holding any crtc lock is sufficient,
-        * for writing must hold all of them.
-        */
-       unsigned int atomic_cdclk_freq;
+       unsigned int max_cdclk_freq;
  
        unsigned int max_dotclk_freq;
        unsigned int rawclk_freq;
        unsigned int czclk_freq;
  
        struct {
-               unsigned int vco, ref;
-       } cdclk_pll;
+               /*
+                * The current logical cdclk state.
+                * See intel_atomic_state.cdclk.logical
+                *
+                * For reading holding any crtc lock is sufficient,
+                * for writing must hold all of them.
+                */
+               struct intel_cdclk_state logical;
+               /*
+                * The current actual cdclk state.
+                * See intel_atomic_state.cdclk.actual
+                */
+               struct intel_cdclk_state actual;
+               /* The current hardware cdclk state */
+               struct intel_cdclk_state hw;
+       } cdclk;
  
        /**
         * wq - Driver workqueue for GEM.
        /* Used to save the pipe-to-encoder mapping for audio */
        struct intel_encoder *av_enc_map[I915_MAX_PIPES];
  
 +      /* necessary resource sharing with HDMI LPE audio driver. */
 +      struct {
 +              struct platform_device *platdev;
 +              int     irq;
 +      } lpe_audio;
 +
        /*
         * NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch
         * will be rejected. Instead look for a better place.
@@@ -2752,6 -2805,12 +2811,12 @@@ intel_info(const struct drm_i915_privat
  #define IS_KBL_REVID(dev_priv, since, until) \
        (IS_KABYLAKE(dev_priv) && IS_REVID(dev_priv, since, until))
  
+ #define GLK_REVID_A0          0x0
+ #define GLK_REVID_A1          0x1
+ #define IS_GLK_REVID(dev_priv, since, until) \
+       (IS_GEMINILAKE(dev_priv) && IS_REVID(dev_priv, since, until))
  /*
   * The genX designation typically refers to the render engine, so render
   * capability related checks should use IS_GEN, while display and other checks
  #define IS_GEN8(dev_priv)     (!!((dev_priv)->info.gen_mask & BIT(7)))
  #define IS_GEN9(dev_priv)     (!!((dev_priv)->info.gen_mask & BIT(8)))
  
- #define IS_GEN9_LP(dev_priv)  (IS_GEN9(dev_priv) && INTEL_INFO(dev_priv)->is_lp)
  #define IS_LP(dev_priv)       (INTEL_INFO(dev_priv)->is_lp)
+ #define IS_GEN9_LP(dev_priv)  (IS_GEN9(dev_priv) && IS_LP(dev_priv))
+ #define IS_GEN9_BC(dev_priv)  (IS_GEN9(dev_priv) && !IS_LP(dev_priv))
  
  #define ENGINE_MASK(id)       BIT(id)
  #define RENDER_RING   ENGINE_MASK(RCS)
  
  /* WaRsDisableCoarsePowerGating:skl,bxt */
  #define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \
-       (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1) || \
-        IS_SKL_GT3(dev_priv) || \
-        IS_SKL_GT4(dev_priv))
+       (IS_SKL_GT3(dev_priv) || IS_SKL_GT4(dev_priv))
  
  /*
   * dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts
@@@ -2952,6 -3010,9 +3016,9 @@@ extern unsigned long i915_gfx_val(struc
  extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
  int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on);
  
+ int intel_engines_init_early(struct drm_i915_private *dev_priv);
+ int intel_engines_init(struct drm_i915_private *dev_priv);
  /* intel_hotplug.c */
  void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
                           u32 pin_mask, u32 long_mask);
@@@ -3129,6 -3190,7 +3196,7 @@@ int i915_gem_get_aperture_ioctl(struct 
                                struct drm_file *file_priv);
  int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
                        struct drm_file *file_priv);
+ void i915_gem_sanitize(struct drm_i915_private *i915);
  int i915_gem_load_init(struct drm_i915_private *dev_priv);
  void i915_gem_load_cleanup(struct drm_i915_private *dev_priv);
  void i915_gem_load_init_fences(struct drm_i915_private *dev_priv);
@@@ -3341,18 -3403,20 +3409,20 @@@ static inline u32 i915_reset_count(stru
  }
  
  int i915_gem_reset_prepare(struct drm_i915_private *dev_priv);
+ void i915_gem_reset(struct drm_i915_private *dev_priv);
  void i915_gem_reset_finish(struct drm_i915_private *dev_priv);
  void i915_gem_set_wedged(struct drm_i915_private *dev_priv);
- void i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
+ void i915_gem_init_mmio(struct drm_i915_private *i915);
  int __must_check i915_gem_init(struct drm_i915_private *dev_priv);
  int __must_check i915_gem_init_hw(struct drm_i915_private *dev_priv);
  void i915_gem_init_swizzling(struct drm_i915_private *dev_priv);
  void i915_gem_cleanup_engines(struct drm_i915_private *dev_priv);
- int __must_check i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
-                                       unsigned int flags);
+ int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
+                          unsigned int flags);
  int __must_check i915_gem_suspend(struct drm_i915_private *dev_priv);
  void i915_gem_resume(struct drm_i915_private *dev_priv);
 -int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
 +int i915_gem_fault(struct vm_fault *vmf);
  int i915_gem_object_wait(struct drm_i915_gem_object *obj,
                         unsigned int flags,
                         long timeout,
@@@ -3528,10 -3592,12 +3598,10 @@@ u32 i915_gem_fence_alignment(struct drm
  /* i915_debugfs.c */
  #ifdef CONFIG_DEBUG_FS
  int i915_debugfs_register(struct drm_i915_private *dev_priv);
 -void i915_debugfs_unregister(struct drm_i915_private *dev_priv);
  int i915_debugfs_connector_add(struct drm_connector *connector);
  void intel_display_crc_init(struct drm_i915_private *dev_priv);
  #else
  static inline int i915_debugfs_register(struct drm_i915_private *dev_priv) {return 0;}
 -static inline void i915_debugfs_unregister(struct drm_i915_private *dev_priv) {}
  static inline int i915_debugfs_connector_add(struct drm_connector *connector)
  { return 0; }
  static inline void intel_display_crc_init(struct drm_i915_private *dev_priv) {}
  __printf(2, 3)
  void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...);
  int i915_error_state_to_str(struct drm_i915_error_state_buf *estr,
-                           const struct i915_error_state_file_priv *error);
+                           const struct i915_gpu_state *gpu);
  int i915_error_state_buf_init(struct drm_i915_error_state_buf *eb,
                              struct drm_i915_private *i915,
                              size_t count, loff_t pos);
@@@ -3552,13 -3618,28 +3622,28 @@@ static inline void i915_error_state_buf
  {
        kfree(eb->buf);
  }
+ struct i915_gpu_state *i915_capture_gpu_state(struct drm_i915_private *i915);
  void i915_capture_error_state(struct drm_i915_private *dev_priv,
                              u32 engine_mask,
                              const char *error_msg);
- void i915_error_state_get(struct drm_device *dev,
-                         struct i915_error_state_file_priv *error_priv);
- void i915_error_state_put(struct i915_error_state_file_priv *error_priv);
- void i915_destroy_error_state(struct drm_i915_private *dev_priv);
+ static inline struct i915_gpu_state *
+ i915_gpu_state_get(struct i915_gpu_state *gpu)
+ {
+       kref_get(&gpu->ref);
+       return gpu;
+ }
+ void __i915_gpu_state_free(struct kref *kref);
+ static inline void i915_gpu_state_put(struct i915_gpu_state *gpu)
+ {
+       if (gpu)
+               kref_put(&gpu->ref, __i915_gpu_state_free);
+ }
+ struct i915_gpu_state *i915_first_error_state(struct drm_i915_private *i915);
+ void i915_reset_error_state(struct drm_i915_private *i915);
  
  #else
  
@@@ -3568,7 -3649,13 +3653,13 @@@ static inline void i915_capture_error_s
  {
  }
  
- static inline void i915_destroy_error_state(struct drm_i915_private *dev_priv)
+ static inline struct i915_gpu_state *
+ i915_first_error_state(struct drm_i915_private *i915)
+ {
+       return NULL;
+ }
+ static inline void i915_reset_error_state(struct drm_i915_private *i915)
  {
  }
  
@@@ -3601,14 -3688,6 +3692,14 @@@ extern int i915_restore_state(struct dr
  void i915_setup_sysfs(struct drm_i915_private *dev_priv);
  void i915_teardown_sysfs(struct drm_i915_private *dev_priv);
  
 +/* intel_lpe_audio.c */
 +int  intel_lpe_audio_init(struct drm_i915_private *dev_priv);
 +void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv);
 +void intel_lpe_audio_irq_handler(struct drm_i915_private *dev_priv);
 +void intel_lpe_audio_notify(struct drm_i915_private *dev_priv,
 +                          void *eld, int port, int pipe, int tmds_clk_speed,
 +                          bool dp_output, int link_rate);
 +
  /* intel_i2c.c */
  extern int intel_setup_gmbus(struct drm_i915_private *dev_priv);
  extern void intel_teardown_gmbus(struct drm_i915_private *dev_priv);
@@@ -3708,7 -3787,7 +3799,7 @@@ extern void i915_redisable_vga(struct d
  extern void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv);
  extern bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val);
  extern void intel_init_pch_refclk(struct drm_i915_private *dev_priv);
- extern void intel_set_rps(struct drm_i915_private *dev_priv, u8 val);
+ extern int intel_set_rps(struct drm_i915_private *dev_priv, u8 val);
  extern bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv,
                                  bool enable);
  
@@@ -3724,7 -3803,6 +3815,6 @@@ extern void intel_overlay_print_error_s
  extern struct intel_display_error_state *
  intel_display_capture_error_state(struct drm_i915_private *dev_priv);
  extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
-                                           struct drm_i915_private *dev_priv,
                                            struct intel_display_error_state *error);
  
  int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val);
@@@ -3734,7 -3812,7 +3824,7 @@@ int skl_pcode_request(struct drm_i915_p
  
  /* intel_sideband.c */
  u32 vlv_punit_read(struct drm_i915_private *dev_priv, u32 addr);
void vlv_punit_write(struct drm_i915_private *dev_priv, u32 addr, u32 val);
int vlv_punit_write(struct drm_i915_private *dev_priv, u32 addr, u32 val);
  u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr);
  u32 vlv_iosf_sb_read(struct drm_i915_private *dev_priv, u8 port, u32 reg);
  void vlv_iosf_sb_write(struct drm_i915_private *dev_priv, u8 port, u32 reg, u32 val);
@@@ -3953,14 -4031,34 +4043,34 @@@ wait_remaining_ms_from_jiffies(unsigne
  }
  
  static inline bool
- __i915_request_irq_complete(struct drm_i915_gem_request *req)
+ __i915_request_irq_complete(const struct drm_i915_gem_request *req)
  {
        struct intel_engine_cs *engine = req->engine;
+       u32 seqno;
+       /* Note that the engine may have wrapped around the seqno, and
+        * so our request->global_seqno will be ahead of the hardware,
+        * even though it completed the request before wrapping. We catch
+        * this by kicking all the waiters before resetting the seqno
+        * in hardware, and also signal the fence.
+        */
+       if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &req->fence.flags))
+               return true;
+       /* The request was dequeued before we were awoken. We check after
+        * inspecting the hw to confirm that this was the same request
+        * that generated the HWS update. The memory barriers within
+        * the request execution are sufficient to ensure that a check
+        * after reading the value from hw matches this request.
+        */
+       seqno = i915_gem_request_global_seqno(req);
+       if (!seqno)
+               return false;
  
        /* Before we do the heavier coherent read of the seqno,
         * check the value (hopefully) in the CPU cacheline.
         */
-       if (__i915_gem_request_completed(req))
+       if (__i915_gem_request_completed(req, seqno))
                return true;
  
        /* Ensure our read of the seqno is coherent so that we
         * is woken.
         */
        if (engine->irq_seqno_barrier &&
-           rcu_access_pointer(engine->breadcrumbs.irq_seqno_bh) == current &&
-           cmpxchg_relaxed(&engine->breadcrumbs.irq_posted, 1, 0)) {
-               struct task_struct *tsk;
+           test_and_clear_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted)) {
+               struct intel_breadcrumbs *b = &engine->breadcrumbs;
+               unsigned long flags;
  
                /* The ordering of irq_posted versus applying the barrier
                 * is crucial. The clearing of the current irq_posted must
                 * the seqno before we believe it coherent since they see
                 * irq_posted == false but we are still running).
                 */
-               rcu_read_lock();
-               tsk = rcu_dereference(engine->breadcrumbs.irq_seqno_bh);
-               if (tsk && tsk != current)
+               spin_lock_irqsave(&b->irq_lock, flags);
+               if (b->irq_wait && b->irq_wait->tsk != current)
                        /* Note that if the bottom-half is changed as we
                         * are sending the wake-up, the new bottom-half will
                         * be woken by whomever made the change. We only have
                         * to worry about when we steal the irq-posted for
                         * ourself.
                         */
-                       wake_up_process(tsk);
-               rcu_read_unlock();
+                       wake_up_process(b->irq_wait->tsk);
+               spin_unlock_irqrestore(&b->irq_lock, flags);
  
-               if (__i915_gem_request_completed(req))
+               if (__i915_gem_request_completed(req, seqno))
                        return true;
        }
  
@@@ -4042,4 -4139,10 +4151,10 @@@ int remap_io_mapping(struct vm_area_str
                     unsigned long addr, unsigned long pfn, unsigned long size,
                     struct io_mapping *iomap);
  
+ static inline bool i915_gem_object_is_coherent(struct drm_i915_gem_object *obj)
+ {
+       return (obj->cache_level != I915_CACHE_NONE ||
+               HAS_LLC(to_i915(obj->base.dev)));
+ }
  #endif
index 6908123162d17cd998c1e7f0bf54a27064e67588,f7e85fb1464f44613b1050e559a5e81d898b69fb..7c20601fe1de626e1bbf6a819e81fdec3c8d6ada
  #include <drm/drm_vma_manager.h>
  #include <drm/i915_drm.h>
  #include "i915_drv.h"
+ #include "i915_gem_clflush.h"
  #include "i915_vgpu.h"
  #include "i915_trace.h"
  #include "intel_drv.h"
  #include "intel_frontbuffer.h"
  #include "intel_mocs.h"
  #include <linux/dma-fence-array.h>
+ #include <linux/kthread.h>
  #include <linux/reservation.h>
  #include <linux/shmem_fs.h>
  #include <linux/slab.h>
@@@ -47,18 -49,12 +49,12 @@@ static void i915_gem_flush_free_objects
  static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
  static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
  
- static bool cpu_cache_is_coherent(struct drm_device *dev,
-                                 enum i915_cache_level level)
- {
-       return HAS_LLC(to_i915(dev)) || level != I915_CACHE_NONE;
- }
  static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
  {
        if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
                return false;
  
-       if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
+       if (!i915_gem_object_is_coherent(obj))
                return true;
  
        return obj->pin_display;
@@@ -254,7 -250,7 +250,7 @@@ __i915_gem_object_release_shmem(struct 
  
        if (needs_clflush &&
            (obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0 &&
-           !cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
+           !i915_gem_object_is_coherent(obj))
                drm_clflush_sg(pages);
  
        obj->base.read_domains = I915_GEM_DOMAIN_CPU;
@@@ -312,6 -308,8 +308,8 @@@ static const struct drm_i915_gem_object
        .release = i915_gem_object_release_phys,
  };
  
+ static const struct drm_i915_gem_object_ops i915_gem_object_ops;
  int i915_gem_object_unbind(struct drm_i915_gem_object *obj)
  {
        struct i915_vma *vma;
@@@ -399,7 -397,7 +397,7 @@@ out
        if (flags & I915_WAIT_LOCKED && i915_gem_request_completed(rq))
                i915_gem_request_retire_upto(rq);
  
-       if (rps && rq->global_seqno == intel_engine_last_submit(rq->engine)) {
+       if (rps && i915_gem_request_global_seqno(rq) == intel_engine_last_submit(rq->engine)) {
                /* The GPU is now idle and this client has stalled.
                 * Since no other client has submitted a request in the
                 * meantime, assume that this client is the only one
@@@ -424,7 -422,9 +422,9 @@@ i915_gem_object_wait_reservation(struc
                                 long timeout,
                                 struct intel_rps_client *rps)
  {
+       unsigned int seq = __read_seqcount_begin(&resv->seq);
        struct dma_fence *excl;
+       bool prune_fences = false;
  
        if (flags & I915_WAIT_ALL) {
                struct dma_fence **shared;
                for (; i < count; i++)
                        dma_fence_put(shared[i]);
                kfree(shared);
+               prune_fences = count && timeout >= 0;
        } else {
                excl = reservation_object_get_excl_rcu(resv);
        }
  
-       if (excl && timeout >= 0)
+       if (excl && timeout >= 0) {
                timeout = i915_gem_object_wait_fence(excl, flags, timeout, rps);
+               prune_fences = timeout >= 0;
+       }
  
        dma_fence_put(excl);
  
+       if (prune_fences && !__read_seqcount_retry(&resv->seq, seq)) {
+               reservation_object_lock(resv, NULL);
+               if (!__read_seqcount_retry(&resv->seq, seq))
+                       reservation_object_add_excl_fence(resv, NULL);
+               reservation_object_unlock(resv);
+       }
        return timeout;
  }
  
@@@ -585,9 -596,18 +596,18 @@@ i915_gem_object_attach_phys(struct drm_
        if (obj->mm.pages)
                return -EBUSY;
  
+       GEM_BUG_ON(obj->ops != &i915_gem_object_ops);
        obj->ops = &i915_gem_phys_ops;
  
-       return i915_gem_object_pin_pages(obj);
+       ret = i915_gem_object_pin_pages(obj);
+       if (ret)
+               goto err_xfer;
+       return 0;
+ err_xfer:
+       obj->ops = &i915_gem_object_ops;
+       return ret;
  }
  
  static int
@@@ -608,7 -628,7 +628,7 @@@ i915_gem_phys_pwrite(struct drm_i915_ge
        drm_clflush_virt_range(vaddr, args->size);
        i915_gem_chipset_flush(to_i915(obj->base.dev));
  
-       intel_fb_obj_flush(obj, false, ORIGIN_CPU);
+       intel_fb_obj_flush(obj, ORIGIN_CPU);
        return 0;
  }
  
@@@ -771,8 -791,7 +791,7 @@@ int i915_gem_obj_prepare_shmem_read(str
         * anyway again before the next pread happens.
         */
        if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU))
-               *needs_clflush = !cpu_cache_is_coherent(obj->base.dev,
-                                                       obj->cache_level);
+               *needs_clflush = !i915_gem_object_is_coherent(obj);
  
        if (*needs_clflush && !static_cpu_has(X86_FEATURE_CLFLUSH)) {
                ret = i915_gem_object_set_to_cpu_domain(obj, false);
@@@ -828,8 -847,7 +847,7 @@@ int i915_gem_obj_prepare_shmem_write(st
         * before writing.
         */
        if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU))
-               *needs_clflush |= !cpu_cache_is_coherent(obj->base.dev,
-                                                        obj->cache_level);
+               *needs_clflush |= !i915_gem_object_is_coherent(obj);
  
        if (*needs_clflush && !static_cpu_has(X86_FEATURE_CLFLUSH)) {
                ret = i915_gem_object_set_to_cpu_domain(obj, true);
@@@ -1257,7 -1275,7 +1275,7 @@@ i915_gem_gtt_pwrite_fast(struct drm_i91
                user_data += page_length;
                offset += page_length;
        }
-       intel_fb_obj_flush(obj, false, ORIGIN_CPU);
+       intel_fb_obj_flush(obj, ORIGIN_CPU);
  
        mutex_lock(&i915->drm.struct_mutex);
  out_unpin:
@@@ -1393,7 -1411,7 +1411,7 @@@ i915_gem_shmem_pwrite(struct drm_i915_g
                offset = 0;
        }
  
-       intel_fb_obj_flush(obj, false, ORIGIN_CPU);
+       intel_fb_obj_flush(obj, ORIGIN_CPU);
        i915_gem_obj_finish_shmem_access(obj);
        return ret;
  }
@@@ -1596,23 -1614,16 +1614,16 @@@ i915_gem_sw_finish_ioctl(struct drm_dev
  {
        struct drm_i915_gem_sw_finish *args = data;
        struct drm_i915_gem_object *obj;
-       int err = 0;
  
        obj = i915_gem_object_lookup(file, args->handle);
        if (!obj)
                return -ENOENT;
  
        /* Pinned buffers may be scanout, so flush the cache */
-       if (READ_ONCE(obj->pin_display)) {
-               err = i915_mutex_lock_interruptible(dev);
-               if (!err) {
-                       i915_gem_object_flush_cpu_write_domain(obj);
-                       mutex_unlock(&dev->struct_mutex);
-               }
-       }
+       i915_gem_object_flush_if_display(obj);
        i915_gem_object_put(obj);
-       return err;
+       return 0;
  }
  
  /**
@@@ -1772,6 -1783,7 +1783,6 @@@ compute_partial_view(struct drm_i915_ge
  
  /**
   * i915_gem_fault - fault a page into the GTT
 - * @area: CPU VMA in question
   * @vmf: fault info
   *
   * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
   * The current feature set supported by i915_gem_fault() and thus GTT mmaps
   * is exposed via I915_PARAM_MMAP_GTT_VERSION (see i915_gem_mmap_gtt_version).
   */
 -int i915_gem_fault(struct vm_area_struct *area, struct vm_fault *vmf)
 +int i915_gem_fault(struct vm_fault *vmf)
  {
  #define MIN_CHUNK_PAGES ((1 << 20) >> PAGE_SHIFT) /* 1 MiB */
 +      struct vm_area_struct *area = vmf->vma;
        struct drm_i915_gem_object *obj = to_intel_bo(area->vm_private_data);
        struct drm_device *dev = obj->base.dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
@@@ -2223,17 -2234,17 +2234,17 @@@ unlock
        mutex_unlock(&obj->mm.lock);
  }
  
- static void i915_sg_trim(struct sg_table *orig_st)
+ static bool i915_sg_trim(struct sg_table *orig_st)
  {
        struct sg_table new_st;
        struct scatterlist *sg, *new_sg;
        unsigned int i;
  
        if (orig_st->nents == orig_st->orig_nents)
-               return;
+               return false;
  
        if (sg_alloc_table(&new_st, orig_st->nents, GFP_KERNEL | __GFP_NOWARN))
-               return;
+               return false;
  
        new_sg = new_st.sgl;
        for_each_sg(orig_st->sgl, sg, orig_st->nents, i) {
        sg_free_table(orig_st);
  
        *orig_st = new_st;
+       return true;
  }
  
  static struct sg_table *
@@@ -2596,7 -2608,8 +2608,8 @@@ static void i915_gem_context_mark_innoc
  struct drm_i915_gem_request *
  i915_gem_find_active_request(struct intel_engine_cs *engine)
  {
-       struct drm_i915_gem_request *request;
+       struct drm_i915_gem_request *request, *active = NULL;
+       unsigned long flags;
  
        /* We are called by the error capture and reset at a random
         * point in time. In particular, note that neither is crucially
         * extra delay for a recent interrupt is pointless. Hence, we do
         * not need an engine->irq_seqno_barrier() before the seqno reads.
         */
+       spin_lock_irqsave(&engine->timeline->lock, flags);
        list_for_each_entry(request, &engine->timeline->requests, link) {
-               if (__i915_gem_request_completed(request))
+               if (__i915_gem_request_completed(request,
+                                                request->global_seqno))
                        continue;
  
                GEM_BUG_ON(request->engine != engine);
-               return request;
+               GEM_BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
+                                   &request->fence.flags));
+               active = request;
+               break;
        }
+       spin_unlock_irqrestore(&engine->timeline->lock, flags);
  
-       return NULL;
+       return active;
  }
  
  static bool engine_stalled(struct intel_engine_cs *engine)
@@@ -2641,7 -2661,30 +2661,30 @@@ int i915_gem_reset_prepare(struct drm_i
        for_each_engine(engine, dev_priv, id) {
                struct drm_i915_gem_request *request;
  
+               /* Prevent the signaler thread from updating the request
+                * state (by calling dma_fence_signal) as we are processing
+                * the reset. The write from the GPU of the seqno is
+                * asynchronous and the signaler thread may see a different
+                * value to us and declare the request complete, even though
+                * the reset routine have picked that request as the active
+                * (incomplete) request. This conflict is not handled
+                * gracefully!
+                */
+               kthread_park(engine->breadcrumbs.signaler);
+               /* Prevent request submission to the hardware until we have
+                * completed the reset in i915_gem_reset_finish(). If a request
+                * is completed by one engine, it may then queue a request
+                * to a second via its engine->irq_tasklet *just* as we are
+                * calling engine->init_hw() and also writing the ELSP.
+                * Turning off the engine->irq_tasklet until the reset is over
+                * prevents the race.
+                */
                tasklet_kill(&engine->irq_tasklet);
+               tasklet_disable(&engine->irq_tasklet);
+               if (engine->irq_seqno_barrier)
+                       engine->irq_seqno_barrier(engine);
  
                if (engine_stalled(engine)) {
                        request = i915_gem_find_active_request(engine);
@@@ -2739,9 -2782,6 +2782,6 @@@ static void i915_gem_reset_engine(struc
  {
        struct drm_i915_gem_request *request;
  
-       if (engine->irq_seqno_barrier)
-               engine->irq_seqno_barrier(engine);
        request = i915_gem_find_active_request(engine);
        if (request && i915_gem_reset_request(request)) {
                DRM_DEBUG_DRIVER("resetting %s to restart from tail of request 0x%x\n",
        engine->reset_hw(engine, request);
  }
  
- void i915_gem_reset_finish(struct drm_i915_private *dev_priv)
+ void i915_gem_reset(struct drm_i915_private *dev_priv)
  {
        struct intel_engine_cs *engine;
        enum intel_engine_id id;
  
        i915_gem_retire_requests(dev_priv);
  
-       for_each_engine(engine, dev_priv, id)
+       for_each_engine(engine, dev_priv, id) {
+               struct i915_gem_context *ctx;
                i915_gem_reset_engine(engine);
+               ctx = fetch_and_zero(&engine->last_retired_context);
+               if (ctx)
+                       engine->context_unpin(engine, ctx);
+       }
  
        i915_gem_restore_fences(dev_priv);
  
        }
  }
  
+ void i915_gem_reset_finish(struct drm_i915_private *dev_priv)
+ {
+       struct intel_engine_cs *engine;
+       enum intel_engine_id id;
+       lockdep_assert_held(&dev_priv->drm.struct_mutex);
+       for_each_engine(engine, dev_priv, id) {
+               tasklet_enable(&engine->irq_tasklet);
+               kthread_unpark(engine->breadcrumbs.signaler);
+       }
+ }
  static void nop_submit_request(struct drm_i915_gem_request *request)
  {
        dma_fence_set_error(&request->fence, -EIO);
@@@ -2900,8 -2959,8 +2959,8 @@@ i915_gem_idle_work_handler(struct work_
         * new request is submitted.
         */
        wait_for(READ_ONCE(dev_priv->gt.active_requests) ||
-                intel_execlists_idle(dev_priv), 10);
+                intel_engines_are_idle(dev_priv),
+                10);
        if (READ_ONCE(dev_priv->gt.active_requests))
                return;
  
        if (dev_priv->gt.active_requests)
                goto out_unlock;
  
-       if (wait_for(intel_execlists_idle(dev_priv), 10))
+       if (wait_for(intel_engines_are_idle(dev_priv), 10))
                DRM_ERROR("Timeout waiting for engines to idle\n");
  
-       for_each_engine(engine, dev_priv, id)
+       for_each_engine(engine, dev_priv, id) {
+               intel_engine_disarm_breadcrumbs(engine);
                i915_gem_batch_pool_fini(&engine->batch_pool);
+       }
  
        GEM_BUG_ON(!dev_priv->gt.awake);
        dev_priv->gt.awake = false;
@@@ -3029,6 -3090,16 +3090,16 @@@ i915_gem_wait_ioctl(struct drm_device *
                args->timeout_ns -= ktime_to_ns(ktime_sub(ktime_get(), start));
                if (args->timeout_ns < 0)
                        args->timeout_ns = 0;
+               /*
+                * Apparently ktime isn't accurate enough and occasionally has a
+                * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch
+                * things up to make the test happy. We allow up to 1 jiffy.
+                *
+                * This is a regression from the timespec->ktime conversion.
+                */
+               if (ret == -ETIME && !nsecs_to_jiffies(args->timeout_ns))
+                       args->timeout_ns = 0;
        }
  
        i915_gem_object_put(obj);
@@@ -3071,41 -3142,6 +3142,6 @@@ int i915_gem_wait_for_idle(struct drm_i
        return 0;
  }
  
- void i915_gem_clflush_object(struct drm_i915_gem_object *obj,
-                            bool force)
- {
-       /* If we don't have a page list set up, then we're not pinned
-        * to GPU, and we can ignore the cache flush because it'll happen
-        * again at bind time.
-        */
-       if (!obj->mm.pages)
-               return;
-       /*
-        * Stolen memory is always coherent with the GPU as it is explicitly
-        * marked as wc by the system, or the system is cache-coherent.
-        */
-       if (obj->stolen || obj->phys_handle)
-               return;
-       /* If the GPU is snooping the contents of the CPU cache,
-        * we do not need to manually clear the CPU cache lines.  However,
-        * the caches are only snooped when the render cache is
-        * flushed/invalidated.  As we always have to emit invalidations
-        * and flushes when moving into and out of the RENDER domain, correct
-        * snooping behaviour occurs naturally as the result of our domain
-        * tracking.
-        */
-       if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level)) {
-               obj->cache_dirty = true;
-               return;
-       }
-       trace_i915_gem_object_clflush(obj);
-       drm_clflush_sg(obj->mm.pages);
-       obj->cache_dirty = false;
- }
  /** Flushes the GTT write domain for the object if it's dirty. */
  static void
  i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
        if (INTEL_GEN(dev_priv) >= 6 && !HAS_LLC(dev_priv))
                POSTING_READ(RING_ACTHD(dev_priv->engine[RCS]->mmio_base));
  
-       intel_fb_obj_flush(obj, false, write_origin(obj, I915_GEM_DOMAIN_GTT));
+       intel_fb_obj_flush(obj, write_origin(obj, I915_GEM_DOMAIN_GTT));
  
        obj->base.write_domain = 0;
-       trace_i915_gem_object_change_domain(obj,
-                                           obj->base.read_domains,
-                                           I915_GEM_DOMAIN_GTT);
  }
  
  /** Flushes the CPU write domain for the object if it's dirty. */
@@@ -3149,13 -3182,27 +3182,27 @@@ i915_gem_object_flush_cpu_write_domain(
        if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
                return;
  
-       i915_gem_clflush_object(obj, obj->pin_display);
-       intel_fb_obj_flush(obj, false, ORIGIN_CPU);
+       i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC);
+       obj->base.write_domain = 0;
+ }
+ static void __i915_gem_object_flush_for_display(struct drm_i915_gem_object *obj)
+ {
+       if (obj->base.write_domain != I915_GEM_DOMAIN_CPU && !obj->cache_dirty)
+               return;
  
+       i915_gem_clflush_object(obj, I915_CLFLUSH_FORCE);
        obj->base.write_domain = 0;
-       trace_i915_gem_object_change_domain(obj,
-                                           obj->base.read_domains,
-                                           I915_GEM_DOMAIN_CPU);
+ }
+ void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj)
+ {
+       if (!READ_ONCE(obj->pin_display))
+               return;
+       mutex_lock(&obj->base.dev->struct_mutex);
+       __i915_gem_object_flush_for_display(obj);
+       mutex_unlock(&obj->base.dev->struct_mutex);
  }
  
  /**
  int
  i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
  {
-       uint32_t old_write_domain, old_read_domains;
        int ret;
  
        lockdep_assert_held(&obj->base.dev->struct_mutex);
        if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
                mb();
  
-       old_write_domain = obj->base.write_domain;
-       old_read_domains = obj->base.read_domains;
        /* It should now be out of any other write domains, and we can update
         * the domain values for our changes.
         */
                obj->mm.dirty = true;
        }
  
-       trace_i915_gem_object_change_domain(obj,
-                                           old_read_domains,
-                                           old_write_domain);
        i915_gem_object_unpin_pages(obj);
        return 0;
  }
@@@ -3349,7 -3388,7 +3388,7 @@@ restart
        }
  
        if (obj->base.write_domain == I915_GEM_DOMAIN_CPU &&
-           cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
+           i915_gem_object_is_coherent(obj))
                obj->cache_dirty = true;
  
        list_for_each_entry(vma, &obj->vma_list, obj_link)
@@@ -3461,7 -3500,6 +3500,6 @@@ i915_gem_object_pin_to_display_plane(st
                                     const struct i915_ggtt_view *view)
  {
        struct i915_vma *vma;
-       u32 old_read_domains, old_write_domain;
        int ret;
  
        lockdep_assert_held(&obj->base.dev->struct_mutex);
        vma->display_alignment = max_t(u64, vma->display_alignment, alignment);
  
        /* Treat this as an end-of-frame, like intel_user_framebuffer_dirty() */
-       if (obj->cache_dirty || obj->base.write_domain == I915_GEM_DOMAIN_CPU) {
-               i915_gem_clflush_object(obj, true);
-               intel_fb_obj_flush(obj, false, ORIGIN_DIRTYFB);
-       }
-       old_write_domain = obj->base.write_domain;
-       old_read_domains = obj->base.read_domains;
+       __i915_gem_object_flush_for_display(obj);
+       intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
  
        /* It should now be out of any other write domains, and we can update
         * the domain values for our changes.
         */
-       obj->base.write_domain = 0;
        obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
  
-       trace_i915_gem_object_change_domain(obj,
-                                           old_read_domains,
-                                           old_write_domain);
        return vma;
  
  err_unpin_display:
@@@ -3574,7 -3602,6 +3602,6 @@@ i915_gem_object_unpin_from_display_plan
  int
  i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
  {
-       uint32_t old_write_domain, old_read_domains;
        int ret;
  
        lockdep_assert_held(&obj->base.dev->struct_mutex);
  
        i915_gem_object_flush_gtt_write_domain(obj);
  
-       old_write_domain = obj->base.write_domain;
-       old_read_domains = obj->base.read_domains;
        /* Flush the CPU cache if it's still invalid. */
        if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
-               i915_gem_clflush_object(obj, false);
+               i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC);
                obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
        }
  
                obj->base.write_domain = I915_GEM_DOMAIN_CPU;
        }
  
-       trace_i915_gem_object_change_domain(obj,
-                                           old_read_domains,
-                                           old_write_domain);
        return 0;
  }
  
@@@ -3647,16 -3666,14 +3666,14 @@@ i915_gem_ring_throttle(struct drm_devic
                return -EIO;
  
        spin_lock(&file_priv->mm.lock);
-       list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
+       list_for_each_entry(request, &file_priv->mm.request_list, client_link) {
                if (time_after_eq(request->emitted_jiffies, recent_enough))
                        break;
  
-               /*
-                * Note that the request might not have been submitted yet.
-                * In which case emitted_jiffies will be zero.
-                */
-               if (!request->emitted_jiffies)
-                       continue;
+               if (target) {
+                       list_del(&target->client_link);
+                       target->file_priv = NULL;
+               }
  
                target = request;
        }
@@@ -3942,7 -3959,7 +3959,7 @@@ frontbuffer_retire(struct i915_gem_acti
        struct drm_i915_gem_object *obj =
                container_of(active, typeof(*obj), frontbuffer_write);
  
-       intel_fb_obj_flush(obj, true, ORIGIN_CS);
+       intel_fb_obj_flush(obj, ORIGIN_CS);
  }
  
  void i915_gem_object_init(struct drm_i915_gem_object *obj,
@@@ -4203,11 -4220,29 +4220,29 @@@ static void assert_kernel_context_is_cu
                           !i915_gem_context_is_kernel(engine->last_retired_context));
  }
  
+ void i915_gem_sanitize(struct drm_i915_private *i915)
+ {
+       /*
+        * If we inherit context state from the BIOS or earlier occupants
+        * of the GPU, the GPU may be in an inconsistent state when we
+        * try to take over. The only way to remove the earlier state
+        * is by resetting. However, resetting on earlier gen is tricky as
+        * it may impact the display and we are uncertain about the stability
+        * of the reset, so we only reset recent machines with logical
+        * context support (that must be reset to remove any stray contexts).
+        */
+       if (HAS_HW_CONTEXTS(i915)) {
+               int reset = intel_gpu_reset(i915, ALL_ENGINES);
+               WARN_ON(reset && reset != -ENODEV);
+       }
+ }
  int i915_gem_suspend(struct drm_i915_private *dev_priv)
  {
        struct drm_device *dev = &dev_priv->drm;
        int ret;
  
+       intel_runtime_pm_get(dev_priv);
        intel_suspend_gt_powersave(dev_priv);
  
        mutex_lock(&dev->struct_mutex);
         */
        ret = i915_gem_switch_to_kernel_context(dev_priv);
        if (ret)
-               goto err;
+               goto err_unlock;
  
        ret = i915_gem_wait_for_idle(dev_priv,
                                     I915_WAIT_INTERRUPTIBLE |
                                     I915_WAIT_LOCKED);
        if (ret)
-               goto err;
+               goto err_unlock;
  
        i915_gem_retire_requests(dev_priv);
        GEM_BUG_ON(dev_priv->gt.active_requests);
         * reset the GPU back to its idle, low power state.
         */
        WARN_ON(dev_priv->gt.awake);
-       WARN_ON(!intel_execlists_idle(dev_priv));
+       WARN_ON(!intel_engines_are_idle(dev_priv));
  
        /*
         * Neither the BIOS, ourselves or any other kernel
         * machines is a good idea, we don't - just in case it leaves the
         * machine in an unusable condition.
         */
-       if (HAS_HW_CONTEXTS(dev_priv)) {
-               int reset = intel_gpu_reset(dev_priv, ALL_ENGINES);
-               WARN_ON(reset && reset != -ENODEV);
-       }
-       return 0;
+       i915_gem_sanitize(dev_priv);
+       goto out_rpm_put;
  
- err:
+ err_unlock:
        mutex_unlock(&dev->struct_mutex);
+ out_rpm_put:
+       intel_runtime_pm_put(dev_priv);
        return ret;
  }
  
@@@ -4351,11 -4384,24 +4384,24 @@@ static void init_unused_rings(struct dr
        }
  }
  
- int
- i915_gem_init_hw(struct drm_i915_private *dev_priv)
+ static int __i915_gem_restart_engines(void *data)
  {
+       struct drm_i915_private *i915 = data;
        struct intel_engine_cs *engine;
        enum intel_engine_id id;
+       int err;
+       for_each_engine(engine, i915, id) {
+               err = engine->init_hw(engine);
+               if (err)
+                       return err;
+       }
+       return 0;
+ }
+ int i915_gem_init_hw(struct drm_i915_private *dev_priv)
+ {
        int ret;
  
        dev_priv->gt.last_init_time = ktime_get();
        }
  
        /* Need to do basic initialisation of all rings first: */
-       for_each_engine(engine, dev_priv, id) {
-               ret = engine->init_hw(engine);
-               if (ret)
-                       goto out;
-       }
+       ret = __i915_gem_restart_engines(dev_priv);
+       if (ret)
+               goto out;
  
        intel_mocs_init_l3cc_table(dev_priv);
  
@@@ -4446,6 -4490,8 +4490,8 @@@ int i915_gem_init(struct drm_i915_priva
  
        mutex_lock(&dev_priv->drm.struct_mutex);
  
+       i915_gem_clflush_init(dev_priv);
        if (!i915.enable_execlists) {
                dev_priv->gt.resume = intel_legacy_submission_resume;
                dev_priv->gt.cleanup_engine = intel_engine_cleanup;
@@@ -4494,6 -4540,11 +4540,11 @@@ out_unlock
        return ret;
  }
  
+ void i915_gem_init_mmio(struct drm_i915_private *i915)
+ {
+       i915_gem_sanitize(i915);
+ }
  void
  i915_gem_cleanup_engines(struct drm_i915_private *dev_priv)
  {
@@@ -4583,8 -4634,6 +4634,6 @@@ i915_gem_load_init(struct drm_i915_priv
        init_waitqueue_head(&dev_priv->gpu_error.wait_queue);
        init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
  
-       dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
        init_waitqueue_head(&dev_priv->pending_flip_queue);
  
        dev_priv->mm.interruptible = true;
@@@ -4609,7 -4658,9 +4658,9 @@@ err_out
  
  void i915_gem_load_cleanup(struct drm_i915_private *dev_priv)
  {
+       i915_gem_drain_freed_objects(dev_priv);
        WARN_ON(!llist_empty(&dev_priv->mm.free_list));
+       WARN_ON(dev_priv->mm.object_count);
  
        mutex_lock(&dev_priv->drm.struct_mutex);
        i915_gem_timeline_fini(&dev_priv->gt.global_timeline);
  
  int i915_gem_freeze(struct drm_i915_private *dev_priv)
  {
-       intel_runtime_pm_get(dev_priv);
        mutex_lock(&dev_priv->drm.struct_mutex);
        i915_gem_shrink_all(dev_priv);
        mutex_unlock(&dev_priv->drm.struct_mutex);
  
-       intel_runtime_pm_put(dev_priv);
        return 0;
  }
  
@@@ -4685,7 -4732,7 +4732,7 @@@ void i915_gem_release(struct drm_devic
         * file_priv.
         */
        spin_lock(&file_priv->mm.lock);
-       list_for_each_entry(request, &file_priv->mm.request_list, client_list)
+       list_for_each_entry(request, &file_priv->mm.request_list, client_link)
                request->file_priv = NULL;
        spin_unlock(&file_priv->mm.lock);
  
@@@ -4949,3 -4996,11 +4996,11 @@@ i915_gem_object_get_dma_address(struct 
        sg = i915_gem_object_get_sg(obj, n, &offset);
        return sg_dma_address(sg) + (offset << PAGE_SHIFT);
  }
+ #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+ #include "selftests/scatterlist.c"
+ #include "selftests/mock_gem_device.c"
+ #include "selftests/huge_gem_object.c"
+ #include "selftests/i915_gem_object.c"
+ #include "selftests/i915_gem_coherency.c"
+ #endif
index 29bb8011dbc4a0860828fa896dda34da26e18b54,3e276eee0450ad506f0e77b6cbb7b007896602a5..11898cd97596a33965a54c2bf459dcc7d12608dd
@@@ -141,7 -141,7 +141,7 @@@ static int i915_gem_dmabuf_mmap(struct 
        if (!obj->base.filp)
                return -ENODEV;
  
 -      ret = obj->base.filp->f_op->mmap(obj->base.filp, vma);
 +      ret = call_mmap(obj->base.filp, vma);
        if (ret)
                return ret;
  
@@@ -307,3 -307,8 +307,8 @@@ fail_detach
  
        return ERR_PTR(ret);
  }
+ #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+ #include "selftests/mock_dmabuf.c"
+ #include "selftests/i915_gem_dmabuf.c"
+ #endif
index bf90b07163d1266a6bb0c87f036e84fa78181991,1495eeb033824d3c2609744258bc6c5a4a51a287..33b0dc4782a9de1c3a6352384dddeea0f4dfaf0b
@@@ -33,6 -33,8 +33,8 @@@
  
  #include <drm/i915_drm.h>
  
+ #include "i915_selftest.h"
  struct drm_i915_gem_object_ops {
        unsigned int flags;
  #define I915_GEM_OBJECT_HAS_STRUCT_PAGE 0x1
@@@ -84,6 -86,7 +86,7 @@@ struct drm_i915_gem_object 
        struct list_head obj_exec_link;
  
        struct list_head batch_pool_link;
+       I915_SELFTEST_DECLARE(struct list_head st_link);
  
        unsigned long flags;
  
        struct reservation_object *resv;
  
        /** References from framebuffers, locks out tiling changes. */
-       unsigned long framebuffer_references;
+       unsigned int framebuffer_references;
  
        /** Record of address bit 17 of each page at last unbind. */
        unsigned long *bit_17;
  
-       struct i915_gem_userptr {
-               uintptr_t ptr;
-               unsigned read_only :1;
+       union {
+               struct i915_gem_userptr {
+                       uintptr_t ptr;
+                       unsigned read_only :1;
+                       struct i915_mm_struct *mm;
+                       struct i915_mmu_object *mmu_object;
+                       struct work_struct *work;
+               } userptr;
  
-               struct i915_mm_struct *mm;
-               struct i915_mmu_object *mmu_object;
-               struct work_struct *work;
-       } userptr;
+               unsigned long scratch;
+       };
  
        /** for phys allocated objects */
        struct drm_dma_handle *phys_handle;
@@@ -253,10 -260,20 +260,20 @@@ extern void drm_gem_object_unreference(
  __deprecated
  extern void drm_gem_object_unreference_unlocked(struct drm_gem_object *);
  
+ static inline void i915_gem_object_lock(struct drm_i915_gem_object *obj)
+ {
+       reservation_object_lock(obj->resv, NULL);
+ }
+ static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj)
+ {
+       reservation_object_unlock(obj->resv);
+ }
  static inline bool
  i915_gem_object_is_dead(const struct drm_i915_gem_object *obj)
  {
 -      return atomic_read(&obj->base.refcount.refcount) == 0;
 +      return kref_read(&obj->base.refcount) == 0;
  }
  
  static inline bool
@@@ -299,6 -316,12 +316,12 @@@ i915_gem_object_clear_active_reference(
  
  void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj);
  
+ static inline bool
+ i915_gem_object_is_framebuffer(const struct drm_i915_gem_object *obj)
+ {
+       return READ_ONCE(obj->framebuffer_references);
+ }
  static inline unsigned int
  i915_gem_object_get_tiling(struct drm_i915_gem_object *obj)
  {
@@@ -357,5 -380,7 +380,7 @@@ i915_gem_object_last_write_engine(struc
        return engine;
  }
  
+ void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj);
  #endif
  
index e7c3c0318ff60f2bf60b3c5afce405d11ce54a5c,b36a7644e0555bfab83a35ef84132fa59aa18ef7..1e1d9f2072cd5d0e85d63d0de68f88ecb2e0c92d
@@@ -24,9 -24,6 +24,9 @@@
  
  #include <linux/prefetch.h>
  #include <linux/dma-fence-array.h>
 +#include <linux/sched.h>
 +#include <linux/sched/clock.h>
 +#include <linux/sched/signal.h>
  
  #include "i915_drv.h"
  
@@@ -72,7 -69,6 +72,6 @@@ static void i915_fence_release(struct d
         * caught trying to reuse dead objects.
         */
        i915_sw_fence_fini(&req->submit);
-       i915_sw_fence_fini(&req->execute);
  
        kmem_cache_free(req->i915->requests, req);
  }
@@@ -86,42 -82,20 +85,20 @@@ const struct dma_fence_ops i915_fence_o
        .release = i915_fence_release,
  };
  
- int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
-                                  struct drm_file *file)
- {
-       struct drm_i915_private *dev_private;
-       struct drm_i915_file_private *file_priv;
-       WARN_ON(!req || !file || req->file_priv);
-       if (!req || !file)
-               return -EINVAL;
-       if (req->file_priv)
-               return -EINVAL;
-       dev_private = req->i915;
-       file_priv = file->driver_priv;
-       spin_lock(&file_priv->mm.lock);
-       req->file_priv = file_priv;
-       list_add_tail(&req->client_list, &file_priv->mm.request_list);
-       spin_unlock(&file_priv->mm.lock);
-       return 0;
- }
  static inline void
  i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
  {
-       struct drm_i915_file_private *file_priv = request->file_priv;
+       struct drm_i915_file_private *file_priv;
  
+       file_priv = request->file_priv;
        if (!file_priv)
                return;
  
        spin_lock(&file_priv->mm.lock);
-       list_del(&request->client_list);
-       request->file_priv = NULL;
+       if (request->file_priv) {
+               list_del(&request->client_link);
+               request->file_priv = NULL;
+       }
        spin_unlock(&file_priv->mm.lock);
  }
  
@@@ -201,6 -175,92 +178,92 @@@ i915_priotree_init(struct i915_priotre
        pt->priority = INT_MIN;
  }
  
+ static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno)
+ {
+       struct i915_gem_timeline *timeline = &i915->gt.global_timeline;
+       struct intel_engine_cs *engine;
+       enum intel_engine_id id;
+       int ret;
+       /* Carefully retire all requests without writing to the rings */
+       ret = i915_gem_wait_for_idle(i915,
+                                    I915_WAIT_INTERRUPTIBLE |
+                                    I915_WAIT_LOCKED);
+       if (ret)
+               return ret;
+       i915_gem_retire_requests(i915);
+       GEM_BUG_ON(i915->gt.active_requests > 1);
+       /* If the seqno wraps around, we need to clear the breadcrumb rbtree */
+       for_each_engine(engine, i915, id) {
+               struct intel_timeline *tl = &timeline->engine[id];
+               if (wait_for(intel_engine_is_idle(engine), 50))
+                       return -EBUSY;
+               if (!i915_seqno_passed(seqno, tl->seqno)) {
+                       /* spin until threads are complete */
+                       while (intel_breadcrumbs_busy(engine))
+                               cond_resched();
+               }
+               /* Finally reset hw state */
+               tl->seqno = seqno;
+               intel_engine_init_global_seqno(engine, seqno);
+       }
+       list_for_each_entry(timeline, &i915->gt.timelines, link) {
+               for_each_engine(engine, i915, id) {
+                       struct intel_timeline *tl = &timeline->engine[id];
+                       memset(tl->sync_seqno, 0, sizeof(tl->sync_seqno));
+               }
+       }
+       return 0;
+ }
+ int i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno)
+ {
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       lockdep_assert_held(&dev_priv->drm.struct_mutex);
+       if (seqno == 0)
+               return -EINVAL;
+       /* HWS page needs to be set less than what we
+        * will inject to ring
+        */
+       return reset_all_global_seqno(dev_priv, seqno - 1);
+ }
+ static int reserve_seqno(struct intel_engine_cs *engine)
+ {
+       u32 active = ++engine->timeline->inflight_seqnos;
+       u32 seqno = engine->timeline->seqno;
+       int ret;
+       /* Reservation is fine until we need to wrap around */
+       if (likely(!add_overflows(seqno, active)))
+               return 0;
+       ret = reset_all_global_seqno(engine->i915, 0);
+       if (ret) {
+               engine->timeline->inflight_seqnos--;
+               return ret;
+       }
+       return 0;
+ }
+ static void unreserve_seqno(struct intel_engine_cs *engine)
+ {
+       GEM_BUG_ON(!engine->timeline->inflight_seqnos);
+       engine->timeline->inflight_seqnos--;
+ }
  void i915_gem_retire_noop(struct i915_gem_active *active,
                          struct drm_i915_gem_request *request)
  {
@@@ -214,7 -274,6 +277,6 @@@ static void i915_gem_request_retire(str
  
        lockdep_assert_held(&request->i915->drm.struct_mutex);
        GEM_BUG_ON(!i915_sw_fence_signaled(&request->submit));
-       GEM_BUG_ON(!i915_sw_fence_signaled(&request->execute));
        GEM_BUG_ON(!i915_gem_request_completed(request));
        GEM_BUG_ON(!request->i915->gt.active_requests);
  
                                 &request->i915->gt.idle_work,
                                 msecs_to_jiffies(100));
        }
+       unreserve_seqno(request->engine);
  
        /* Walk through the active list, calling retire on each. This allows
         * objects to track their GPU activity and mark themselves as idle
@@@ -310,88 -370,9 +373,9 @@@ void i915_gem_request_retire_upto(struc
        } while (tmp != req);
  }
  
- static int i915_gem_init_global_seqno(struct drm_i915_private *i915, u32 seqno)
- {
-       struct i915_gem_timeline *timeline = &i915->gt.global_timeline;
-       struct intel_engine_cs *engine;
-       enum intel_engine_id id;
-       int ret;
-       /* Carefully retire all requests without writing to the rings */
-       ret = i915_gem_wait_for_idle(i915,
-                                    I915_WAIT_INTERRUPTIBLE |
-                                    I915_WAIT_LOCKED);
-       if (ret)
-               return ret;
-       i915_gem_retire_requests(i915);
-       GEM_BUG_ON(i915->gt.active_requests > 1);
-       /* If the seqno wraps around, we need to clear the breadcrumb rbtree */
-       if (!i915_seqno_passed(seqno, atomic_read(&timeline->seqno))) {
-               while (intel_breadcrumbs_busy(i915))
-                       cond_resched(); /* spin until threads are complete */
-       }
-       atomic_set(&timeline->seqno, seqno);
-       /* Finally reset hw state */
-       for_each_engine(engine, i915, id)
-               intel_engine_init_global_seqno(engine, seqno);
-       list_for_each_entry(timeline, &i915->gt.timelines, link) {
-               for_each_engine(engine, i915, id) {
-                       struct intel_timeline *tl = &timeline->engine[id];
-                       memset(tl->sync_seqno, 0, sizeof(tl->sync_seqno));
-               }
-       }
-       return 0;
- }
- int i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno)
+ static u32 timeline_get_seqno(struct intel_timeline *tl)
  {
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       lockdep_assert_held(&dev_priv->drm.struct_mutex);
-       if (seqno == 0)
-               return -EINVAL;
-       /* HWS page needs to be set less than what we
-        * will inject to ring
-        */
-       return i915_gem_init_global_seqno(dev_priv, seqno - 1);
- }
- static int reserve_global_seqno(struct drm_i915_private *i915)
- {
-       u32 active_requests = ++i915->gt.active_requests;
-       u32 seqno = atomic_read(&i915->gt.global_timeline.seqno);
-       int ret;
-       /* Reservation is fine until we need to wrap around */
-       if (likely(seqno + active_requests > seqno))
-               return 0;
-       ret = i915_gem_init_global_seqno(i915, 0);
-       if (ret) {
-               i915->gt.active_requests--;
-               return ret;
-       }
-       return 0;
- }
- static u32 __timeline_get_seqno(struct i915_gem_timeline *tl)
- {
-       /* seqno only incremented under a mutex */
-       return ++tl->seqno.counter;
- }
- static u32 timeline_get_seqno(struct i915_gem_timeline *tl)
- {
-       return atomic_inc_return(&tl->seqno);
+       return ++tl->seqno;
  }
  
  void __i915_gem_request_submit(struct drm_i915_gem_request *request)
        struct intel_timeline *timeline;
        u32 seqno;
  
+       GEM_BUG_ON(!irqs_disabled());
+       lockdep_assert_held(&engine->timeline->lock);
+       trace_i915_gem_request_execute(request);
        /* Transfer from per-context onto the global per-engine timeline */
        timeline = engine->timeline;
        GEM_BUG_ON(timeline == request->timeline);
-       assert_spin_locked(&timeline->lock);
  
-       seqno = timeline_get_seqno(timeline->common);
+       seqno = timeline_get_seqno(timeline);
        GEM_BUG_ON(!seqno);
        GEM_BUG_ON(i915_seqno_passed(intel_engine_get_seqno(engine), seqno));
  
-       GEM_BUG_ON(i915_seqno_passed(timeline->last_submitted_seqno, seqno));
-       request->previous_seqno = timeline->last_submitted_seqno;
-       timeline->last_submitted_seqno = seqno;
        /* We may be recursing from the signal callback of another i915 fence */
        spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
        request->global_seqno = seqno;
                intel_engine_enable_signaling(request);
        spin_unlock(&request->lock);
  
-       GEM_BUG_ON(!request->global_seqno);
        engine->emit_breadcrumb(request,
                                request->ring->vaddr + request->postfix);
  
        list_move_tail(&request->link, &timeline->requests);
        spin_unlock(&request->timeline->lock);
  
-       i915_sw_fence_commit(&request->execute);
+       wake_up_all(&request->execute);
  }
  
  void i915_gem_request_submit(struct drm_i915_gem_request *request)
        spin_unlock_irqrestore(&engine->timeline->lock, flags);
  }
  
- static int __i915_sw_fence_call
- submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
+ void __i915_gem_request_unsubmit(struct drm_i915_gem_request *request)
  {
-       struct drm_i915_gem_request *request =
-               container_of(fence, typeof(*request), submit);
+       struct intel_engine_cs *engine = request->engine;
+       struct intel_timeline *timeline;
  
-       switch (state) {
-       case FENCE_COMPLETE:
-               request->engine->submit_request(request);
-               break;
+       GEM_BUG_ON(!irqs_disabled());
+       lockdep_assert_held(&engine->timeline->lock);
  
-       case FENCE_FREE:
-               i915_gem_request_put(request);
-               break;
-       }
+       /* Only unwind in reverse order, required so that the per-context list
+        * is kept in seqno/ring order.
+        */
+       GEM_BUG_ON(request->global_seqno != engine->timeline->seqno);
+       engine->timeline->seqno--;
  
-       return NOTIFY_DONE;
+       /* We may be recursing from the signal callback of another i915 fence */
+       spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
+       request->global_seqno = 0;
+       if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags))
+               intel_engine_cancel_signaling(request);
+       spin_unlock(&request->lock);
+       /* Transfer back from the global per-engine timeline to per-context */
+       timeline = request->timeline;
+       GEM_BUG_ON(timeline == engine->timeline);
+       spin_lock(&timeline->lock);
+       list_move(&request->link, &timeline->requests);
+       spin_unlock(&timeline->lock);
+       /* We don't need to wake_up any waiters on request->execute, they
+        * will get woken by any other event or us re-adding this request
+        * to the engine timeline (__i915_gem_request_submit()). The waiters
+        * should be quite adapt at finding that the request now has a new
+        * global_seqno to the one they went to sleep on.
+        */
+ }
+ void i915_gem_request_unsubmit(struct drm_i915_gem_request *request)
+ {
+       struct intel_engine_cs *engine = request->engine;
+       unsigned long flags;
+       /* Will be called from irq-context when using foreign fences. */
+       spin_lock_irqsave(&engine->timeline->lock, flags);
+       __i915_gem_request_unsubmit(request);
+       spin_unlock_irqrestore(&engine->timeline->lock, flags);
  }
  
  static int __i915_sw_fence_call
execute_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
  {
        struct drm_i915_gem_request *request =
-               container_of(fence, typeof(*request), execute);
+               container_of(fence, typeof(*request), submit);
  
        switch (state) {
        case FENCE_COMPLETE:
+               trace_i915_gem_request_submit(request);
+               request->engine->submit_request(request);
                break;
  
        case FENCE_FREE:
@@@ -517,14 -530,14 +533,14 @@@ i915_gem_request_alloc(struct intel_eng
        if (ret)
                return ERR_PTR(ret);
  
-       ret = reserve_global_seqno(dev_priv);
+       ret = reserve_seqno(engine);
        if (ret)
                goto err_unpin;
  
        /* Move the oldest request to the slab-cache (if not in use!) */
        req = list_first_entry_or_null(&engine->timeline->requests,
                                       typeof(*req), link);
-       if (req && __i915_gem_request_completed(req))
+       if (req && i915_gem_request_completed(req))
                i915_gem_request_retire(req);
  
        /* Beware: Dragons be flying overhead.
                       &i915_fence_ops,
                       &req->lock,
                       req->timeline->fence_context,
-                      __timeline_get_seqno(req->timeline->common));
+                      timeline_get_seqno(req->timeline));
  
        /* We bump the ref for the fence chain */
        i915_sw_fence_init(&i915_gem_request_get(req)->submit, submit_notify);
-       i915_sw_fence_init(&i915_gem_request_get(req)->execute, execute_notify);
-       /* Ensure that the execute fence completes after the submit fence -
-        * as we complete the execute fence from within the submit fence
-        * callback, its completion would otherwise be visible first.
-        */
-       i915_sw_fence_await_sw_fence(&req->execute, &req->submit, &req->execq);
+       init_waitqueue_head(&req->execute);
  
        i915_priotree_init(&req->priotree);
  
         */
        req->head = req->ring->tail;
  
+       /* Check that we didn't interrupt ourselves with a new request */
+       GEM_BUG_ON(req->timeline->seqno != req->fence.seqno);
        return req;
  
  err_ctx:
  
        kmem_cache_free(dev_priv->requests, req);
  err_unreserve:
-       dev_priv->gt.active_requests--;
+       unreserve_seqno(engine);
  err_unpin:
        engine->context_unpin(engine, ctx);
        return ERR_PTR(ret);
@@@ -634,6 -643,7 +646,7 @@@ static in
  i915_gem_request_await_request(struct drm_i915_gem_request *to,
                               struct drm_i915_gem_request *from)
  {
+       u32 seqno;
        int ret;
  
        GEM_BUG_ON(to == from);
                return ret < 0 ? ret : 0;
        }
  
-       if (!from->global_seqno) {
+       seqno = i915_gem_request_global_seqno(from);
+       if (!seqno) {
                ret = i915_sw_fence_await_dma_fence(&to->submit,
                                                    &from->fence, 0,
                                                    GFP_KERNEL);
                return ret < 0 ? ret : 0;
        }
  
-       if (from->global_seqno <= to->timeline->sync_seqno[from->engine->id])
+       if (seqno <= to->timeline->sync_seqno[from->engine->id])
                return 0;
  
        trace_i915_gem_ring_sync_to(to, from);
                        return ret;
        }
  
-       to->timeline->sync_seqno[from->engine->id] = from->global_seqno;
+       to->timeline->sync_seqno[from->engine->id] = seqno;
        return 0;
  }
  
@@@ -827,6 -838,7 +841,7 @@@ void __i915_add_request(struct drm_i915
        struct intel_ring *ring = request->ring;
        struct intel_timeline *timeline = request->timeline;
        struct drm_i915_gem_request *prev;
+       u32 *cs;
        int err;
  
        lockdep_assert_held(&request->i915->drm.struct_mutex);
         * our i915_gem_request_alloc() and called __i915_add_request() before
         * us, the timeline will hold its seqno which is later than ours.
         */
-       GEM_BUG_ON(i915_seqno_passed(timeline->last_submitted_seqno,
-                                    request->fence.seqno));
+       GEM_BUG_ON(timeline->seqno != request->fence.seqno);
  
        /*
         * To ensure that this call will not fail, space for its emissions
         * GPU processing the request, we never over-estimate the
         * position of the ring's HEAD.
         */
-       err = intel_ring_begin(request, engine->emit_breadcrumb_sz);
-       GEM_BUG_ON(err);
-       request->postfix = ring->tail;
-       ring->tail += engine->emit_breadcrumb_sz * sizeof(u32);
+       cs = intel_ring_begin(request, engine->emit_breadcrumb_sz);
+       GEM_BUG_ON(IS_ERR(cs));
+       request->postfix = intel_ring_offset(request, cs);
  
        /* Seal the request and mark it as pending execution. Note that
         * we may inspect this state, without holding any locks, during
        list_add_tail(&request->link, &timeline->requests);
        spin_unlock_irq(&timeline->lock);
  
-       GEM_BUG_ON(i915_seqno_passed(timeline->last_submitted_seqno,
-                                    request->fence.seqno));
-       timeline->last_submitted_seqno = request->fence.seqno;
+       GEM_BUG_ON(timeline->seqno != request->fence.seqno);
        i915_gem_active_set(&timeline->last_request, request);
  
        list_add_tail(&request->ring_link, &ring->request_list);
        request->emitted_jiffies = jiffies;
  
-       i915_gem_mark_busy(engine);
+       if (!request->i915->gt.active_requests++)
+               i915_gem_mark_busy(engine);
  
        /* Let the backend know a new request has arrived that may need
         * to adjust the existing execution schedule due to a high priority
        local_bh_enable(); /* Kick the execlists tasklet if just scheduled */
  }
  
- static void reset_wait_queue(wait_queue_head_t *q, wait_queue_t *wait)
- {
-       unsigned long flags;
-       spin_lock_irqsave(&q->lock, flags);
-       if (list_empty(&wait->task_list))
-               __add_wait_queue(q, wait);
-       spin_unlock_irqrestore(&q->lock, flags);
- }
  static unsigned long local_clock_us(unsigned int *cpu)
  {
        unsigned long t;
@@@ -964,9 -962,10 +965,10 @@@ static bool busywait_stop(unsigned lon
  }
  
  bool __i915_spin_request(const struct drm_i915_gem_request *req,
-                        int state, unsigned long timeout_us)
+                        u32 seqno, int state, unsigned long timeout_us)
  {
-       unsigned int cpu;
+       struct intel_engine_cs *engine = req->engine;
+       unsigned int irq, cpu;
  
        /* When waiting for high frequency requests, e.g. during synchronous
         * rendering split between the CPU and GPU, the finite amount of time
         * takes to sleep on a request, on the order of a microsecond.
         */
  
+       irq = atomic_read(&engine->irq_count);
        timeout_us += local_clock_us(&cpu);
        do {
-               if (__i915_gem_request_completed(req))
+               if (seqno != i915_gem_request_global_seqno(req))
+                       break;
+               if (i915_seqno_passed(intel_engine_get_seqno(req->engine),
+                                     seqno))
                        return true;
  
+               /* Seqno are meant to be ordered *before* the interrupt. If
+                * we see an interrupt without a corresponding seqno advance,
+                * assume we won't see one in the near future but require
+                * the engine->seqno_barrier() to fixup coherency.
+                */
+               if (atomic_read(&engine->irq_count) != irq)
+                       break;
                if (signal_pending_state(state, current))
                        break;
  
        return false;
  }
  
- static long
- __i915_request_wait_for_execute(struct drm_i915_gem_request *request,
-                               unsigned int flags,
-                               long timeout)
+ static bool __i915_wait_request_check_and_reset(struct drm_i915_gem_request *request)
  {
-       const int state = flags & I915_WAIT_INTERRUPTIBLE ?
-               TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
-       wait_queue_head_t *q = &request->i915->gpu_error.wait_queue;
-       DEFINE_WAIT(reset);
-       DEFINE_WAIT(wait);
-       if (flags & I915_WAIT_LOCKED)
-               add_wait_queue(q, &reset);
-       do {
-               prepare_to_wait(&request->execute.wait, &wait, state);
-               if (i915_sw_fence_done(&request->execute))
-                       break;
-               if (flags & I915_WAIT_LOCKED &&
-                   i915_reset_in_progress(&request->i915->gpu_error)) {
-                       __set_current_state(TASK_RUNNING);
-                       i915_reset(request->i915);
-                       reset_wait_queue(q, &reset);
-                       continue;
-               }
-               if (signal_pending_state(state, current)) {
-                       timeout = -ERESTARTSYS;
-                       break;
-               }
-               if (!timeout) {
-                       timeout = -ETIME;
-                       break;
-               }
-               timeout = io_schedule_timeout(timeout);
-       } while (1);
-       finish_wait(&request->execute.wait, &wait);
-       if (flags & I915_WAIT_LOCKED)
-               remove_wait_queue(q, &reset);
+       if (likely(!i915_reset_in_progress(&request->i915->gpu_error)))
+               return false;
  
-       return timeout;
+       __set_current_state(TASK_RUNNING);
+       i915_reset(request->i915);
+       return true;
  }
  
  /**
@@@ -1068,7 -1042,9 +1045,9 @@@ long i915_wait_request(struct drm_i915_
  {
        const int state = flags & I915_WAIT_INTERRUPTIBLE ?
                TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
-       DEFINE_WAIT(reset);
+       wait_queue_head_t *errq = &req->i915->gpu_error.wait_queue;
+       DEFINE_WAIT_FUNC(reset, default_wake_function);
+       DEFINE_WAIT_FUNC(exec, default_wake_function);
        struct intel_wait wait;
  
        might_sleep();
        if (!timeout)
                return -ETIME;
  
-       trace_i915_gem_request_wait_begin(req);
+       trace_i915_gem_request_wait_begin(req, flags);
+       add_wait_queue(&req->execute, &exec);
+       if (flags & I915_WAIT_LOCKED)
+               add_wait_queue(errq, &reset);
+       intel_wait_init(&wait, req);
+ restart:
+       do {
+               set_current_state(state);
+               if (intel_wait_update_request(&wait, req))
+                       break;
+               if (flags & I915_WAIT_LOCKED &&
+                   __i915_wait_request_check_and_reset(req))
+                       continue;
  
-       if (!i915_sw_fence_done(&req->execute)) {
-               timeout = __i915_request_wait_for_execute(req, flags, timeout);
-               if (timeout < 0)
+               if (signal_pending_state(state, current)) {
+                       timeout = -ERESTARTSYS;
                        goto complete;
+               }
  
-               GEM_BUG_ON(!i915_sw_fence_done(&req->execute));
-       }
-       GEM_BUG_ON(!i915_sw_fence_done(&req->submit));
-       GEM_BUG_ON(!req->global_seqno);
+               if (!timeout) {
+                       timeout = -ETIME;
+                       goto complete;
+               }
+               timeout = io_schedule_timeout(timeout);
+       } while (1);
+       GEM_BUG_ON(!intel_wait_has_seqno(&wait));
+       GEM_BUG_ON(!i915_sw_fence_signaled(&req->submit));
  
        /* Optimistic short spin before touching IRQs */
        if (i915_spin_request(req, state, 5))
                goto complete;
  
        set_current_state(state);
-       if (flags & I915_WAIT_LOCKED)
-               add_wait_queue(&req->i915->gpu_error.wait_queue, &reset);
-       intel_wait_init(&wait, req->global_seqno);
        if (intel_engine_add_wait(req->engine, &wait))
                /* In order to check that we haven't missed the interrupt
                 * as we enabled it, we need to kick ourselves to do a
                 */
                goto wakeup;
  
+       if (flags & I915_WAIT_LOCKED)
+               __i915_wait_request_check_and_reset(req);
        for (;;) {
                if (signal_pending_state(state, current)) {
                        timeout = -ERESTARTSYS;
  
                timeout = io_schedule_timeout(timeout);
  
-               if (intel_wait_complete(&wait))
+               if (intel_wait_complete(&wait) &&
+                   intel_wait_check_request(&wait, req))
                        break;
  
                set_current_state(state);
@@@ -1151,25 -1149,25 +1152,25 @@@ wakeup
                 * itself, or indirectly by recovering the GPU).
                 */
                if (flags & I915_WAIT_LOCKED &&
-                   i915_reset_in_progress(&req->i915->gpu_error)) {
-                       __set_current_state(TASK_RUNNING);
-                       i915_reset(req->i915);
-                       reset_wait_queue(&req->i915->gpu_error.wait_queue,
-                                        &reset);
+                   __i915_wait_request_check_and_reset(req))
                        continue;
-               }
  
                /* Only spin if we know the GPU is processing this request */
                if (i915_spin_request(req, state, 2))
                        break;
+               if (!intel_wait_check_request(&wait, req)) {
+                       intel_engine_remove_wait(req->engine, &wait);
+                       goto restart;
+               }
        }
  
        intel_engine_remove_wait(req->engine, &wait);
-       if (flags & I915_WAIT_LOCKED)
-               remove_wait_queue(&req->i915->gpu_error.wait_queue, &reset);
-       __set_current_state(TASK_RUNNING);
  complete:
+       __set_current_state(TASK_RUNNING);
+       if (flags & I915_WAIT_LOCKED)
+               remove_wait_queue(errq, &reset);
+       remove_wait_queue(&req->execute, &exec);
        trace_i915_gem_request_wait_end(req);
  
        return timeout;
  static void engine_retire_requests(struct intel_engine_cs *engine)
  {
        struct drm_i915_gem_request *request, *next;
+       u32 seqno = intel_engine_get_seqno(engine);
+       LIST_HEAD(retire);
  
+       spin_lock_irq(&engine->timeline->lock);
        list_for_each_entry_safe(request, next,
                                 &engine->timeline->requests, link) {
-               if (!__i915_gem_request_completed(request))
-                       return;
+               if (!i915_seqno_passed(seqno, request->global_seqno))
+                       break;
  
-               i915_gem_request_retire(request);
+               list_move_tail(&request->link, &retire);
        }
+       spin_unlock_irq(&engine->timeline->lock);
+       list_for_each_entry_safe(request, next, &retire, link)
+               i915_gem_request_retire(request);
  }
  
  void i915_gem_retire_requests(struct drm_i915_private *dev_priv)
        for_each_engine(engine, dev_priv, id)
                engine_retire_requests(engine);
  }
+ #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+ #include "selftests/mock_request.c"
+ #include "selftests/i915_gem_request.c"
+ #endif
index a62feb68689535d09a35ffda36ed9603a55b0ed4,29b002dacbd52f4357c8d4e0f8da056282646449..df95733cf112a5d8769ec409d1af13094077035a
@@@ -180,7 -180,7 +180,7 @@@ i915_hotplug_interrupt_update_locked(st
  {
        uint32_t val;
  
-       assert_spin_locked(&dev_priv->irq_lock);
+       lockdep_assert_held(&dev_priv->irq_lock);
        WARN_ON(bits & ~mask);
  
        val = I915_READ(PORT_HOTPLUG_EN);
@@@ -222,7 -222,7 +222,7 @@@ void ilk_update_display_irq(struct drm_
  {
        uint32_t new_val;
  
-       assert_spin_locked(&dev_priv->irq_lock);
+       lockdep_assert_held(&dev_priv->irq_lock);
  
        WARN_ON(enabled_irq_mask & ~interrupt_mask);
  
@@@ -250,7 -250,7 +250,7 @@@ static void ilk_update_gt_irq(struct dr
                              uint32_t interrupt_mask,
                              uint32_t enabled_irq_mask)
  {
-       assert_spin_locked(&dev_priv->irq_lock);
+       lockdep_assert_held(&dev_priv->irq_lock);
  
        WARN_ON(enabled_irq_mask & ~interrupt_mask);
  
@@@ -302,7 -302,7 +302,7 @@@ static void snb_update_pm_irq(struct dr
  
        WARN_ON(enabled_irq_mask & ~interrupt_mask);
  
-       assert_spin_locked(&dev_priv->irq_lock);
+       lockdep_assert_held(&dev_priv->irq_lock);
  
        new_val = dev_priv->pm_imr;
        new_val &= ~interrupt_mask;
@@@ -340,7 -340,7 +340,7 @@@ void gen6_reset_pm_iir(struct drm_i915_
  {
        i915_reg_t reg = gen6_pm_iir(dev_priv);
  
-       assert_spin_locked(&dev_priv->irq_lock);
+       lockdep_assert_held(&dev_priv->irq_lock);
  
        I915_WRITE(reg, reset_mask);
        I915_WRITE(reg, reset_mask);
  
  void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, u32 enable_mask)
  {
-       assert_spin_locked(&dev_priv->irq_lock);
+       lockdep_assert_held(&dev_priv->irq_lock);
  
        dev_priv->pm_ier |= enable_mask;
        I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier);
  
  void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, u32 disable_mask)
  {
-       assert_spin_locked(&dev_priv->irq_lock);
+       lockdep_assert_held(&dev_priv->irq_lock);
  
        dev_priv->pm_ier &= ~disable_mask;
        __gen6_mask_pm_irq(dev_priv, disable_mask);
@@@ -463,7 -463,7 +463,7 @@@ static void bdw_update_port_irq(struct 
        uint32_t new_val;
        uint32_t old_val;
  
-       assert_spin_locked(&dev_priv->irq_lock);
+       lockdep_assert_held(&dev_priv->irq_lock);
  
        WARN_ON(enabled_irq_mask & ~interrupt_mask);
  
@@@ -496,7 -496,7 +496,7 @@@ void bdw_update_pipe_irq(struct drm_i91
  {
        uint32_t new_val;
  
-       assert_spin_locked(&dev_priv->irq_lock);
+       lockdep_assert_held(&dev_priv->irq_lock);
  
        WARN_ON(enabled_irq_mask & ~interrupt_mask);
  
@@@ -530,7 -530,7 +530,7 @@@ void ibx_display_interrupt_update(struc
  
        WARN_ON(enabled_irq_mask & ~interrupt_mask);
  
-       assert_spin_locked(&dev_priv->irq_lock);
+       lockdep_assert_held(&dev_priv->irq_lock);
  
        if (WARN_ON(!intel_irqs_enabled(dev_priv)))
                return;
@@@ -546,7 -546,7 +546,7 @@@ __i915_enable_pipestat(struct drm_i915_
        i915_reg_t reg = PIPESTAT(pipe);
        u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
  
-       assert_spin_locked(&dev_priv->irq_lock);
+       lockdep_assert_held(&dev_priv->irq_lock);
        WARN_ON(!intel_irqs_enabled(dev_priv));
  
        if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
@@@ -573,7 -573,7 +573,7 @@@ __i915_disable_pipestat(struct drm_i915
        i915_reg_t reg = PIPESTAT(pipe);
        u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
  
-       assert_spin_locked(&dev_priv->irq_lock);
+       lockdep_assert_held(&dev_priv->irq_lock);
        WARN_ON(!intel_irqs_enabled(dev_priv));
  
        if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
@@@ -783,6 -783,9 +783,9 @@@ static int __intel_get_crtc_scanline(st
        enum pipe pipe = crtc->pipe;
        int position, vtotal;
  
+       if (!crtc->active)
+               return -1;
        vtotal = mode->crtc_vtotal;
        if (mode->flags & DRM_MODE_FLAG_INTERLACE)
                vtotal /= 2;
@@@ -1033,9 -1036,42 +1036,42 @@@ static void ironlake_rps_change_irq_han
  
  static void notify_ring(struct intel_engine_cs *engine)
  {
-       smp_store_mb(engine->breadcrumbs.irq_posted, true);
-       if (intel_engine_wakeup(engine))
-               trace_i915_gem_request_notify(engine);
+       struct drm_i915_gem_request *rq = NULL;
+       struct intel_wait *wait;
+       atomic_inc(&engine->irq_count);
+       set_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted);
+       spin_lock(&engine->breadcrumbs.irq_lock);
+       wait = engine->breadcrumbs.irq_wait;
+       if (wait) {
+               /* We use a callback from the dma-fence to submit
+                * requests after waiting on our own requests. To
+                * ensure minimum delay in queuing the next request to
+                * hardware, signal the fence now rather than wait for
+                * the signaler to be woken up. We still wake up the
+                * waiter in order to handle the irq-seqno coherency
+                * issues (we may receive the interrupt before the
+                * seqno is written, see __i915_request_irq_complete())
+                * and to handle coalescing of multiple seqno updates
+                * and many waiters.
+                */
+               if (i915_seqno_passed(intel_engine_get_seqno(engine),
+                                     wait->seqno))
+                       rq = i915_gem_request_get(wait->request);
+               wake_up_process(wait->tsk);
+       } else {
+               __intel_engine_disarm_breadcrumbs(engine);
+       }
+       spin_unlock(&engine->breadcrumbs.irq_lock);
+       if (rq) {
+               dma_fence_signal(&rq->fence);
+               i915_gem_request_put(rq);
+       }
+       trace_intel_engine_notify(engine, wait);
  }
  
  static void vlv_c0_read(struct drm_i915_private *dev_priv,
@@@ -1173,20 -1209,12 +1209,12 @@@ static void gen6_pm_rps_work(struct wor
  
                if (new_delay >= dev_priv->rps.max_freq_softlimit)
                        adj = 0;
-               /*
-                * For better performance, jump directly
-                * to RPe if we're below it.
-                */
-               if (new_delay < dev_priv->rps.efficient_freq - adj) {
-                       new_delay = dev_priv->rps.efficient_freq;
-                       adj = 0;
-               }
        } else if (client_boost || any_waiters(dev_priv)) {
                adj = 0;
        } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
                if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
                        new_delay = dev_priv->rps.efficient_freq;
-               else
+               else if (dev_priv->rps.cur_freq > dev_priv->rps.min_freq_softlimit)
                        new_delay = dev_priv->rps.min_freq_softlimit;
                adj = 0;
        } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
        new_delay += adj;
        new_delay = clamp_t(int, new_delay, min, max);
  
-       intel_set_rps(dev_priv, new_delay);
+       if (intel_set_rps(dev_priv, new_delay)) {
+               DRM_DEBUG_DRIVER("Failed to set new GPU frequency\n");
+               dev_priv->rps.last_adj = 0;
+       }
  
        mutex_unlock(&dev_priv->rps.hw_lock);
  }
@@@ -1349,8 -1380,11 +1380,11 @@@ gen8_cs_irq_handler(struct intel_engine
  {
        if (iir & (GT_RENDER_USER_INTERRUPT << test_shift))
                notify_ring(engine);
-       if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift))
-               tasklet_schedule(&engine->irq_tasklet);
+       if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift)) {
+               set_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
+               tasklet_hi_schedule(&engine->irq_tasklet);
+       }
  }
  
  static irqreturn_t gen8_gt_irq_ack(struct drm_i915_private *dev_priv,
@@@ -1926,10 -1960,6 +1960,10 @@@ static irqreturn_t valleyview_irq_handl
                 * signalled in iir */
                valleyview_pipestat_irq_ack(dev_priv, iir, pipe_stats);
  
 +              if (iir & (I915_LPE_PIPE_A_INTERRUPT |
 +                         I915_LPE_PIPE_B_INTERRUPT))
 +                      intel_lpe_audio_irq_handler(dev_priv);
 +
                /*
                 * VLV_IIR is single buffered, and reflects the level
                 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
@@@ -2010,11 -2040,6 +2044,11 @@@ static irqreturn_t cherryview_irq_handl
                 * signalled in iir */
                valleyview_pipestat_irq_ack(dev_priv, iir, pipe_stats);
  
 +              if (iir & (I915_LPE_PIPE_A_INTERRUPT |
 +                         I915_LPE_PIPE_B_INTERRUPT |
 +                         I915_LPE_PIPE_C_INTERRUPT))
 +                      intel_lpe_audio_irq_handler(dev_priv);
 +
                /*
                 * VLV_IIR is single buffered, and reflects the level
                 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
@@@ -2957,7 -2982,6 +2991,7 @@@ static void vlv_display_irq_postinstall
        u32 pipestat_mask;
        u32 enable_mask;
        enum pipe pipe;
 +      u32 val;
  
        pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
                        PIPE_CRC_DONE_INTERRUPT_STATUS;
  
        WARN_ON(dev_priv->irq_mask != ~0);
  
 +      val = (I915_LPE_PIPE_A_INTERRUPT |
 +              I915_LPE_PIPE_B_INTERRUPT |
 +              I915_LPE_PIPE_C_INTERRUPT);
 +
 +      enable_mask |= val;
 +
        dev_priv->irq_mask = ~enable_mask;
  
        GEN5_IRQ_INIT(VLV_, dev_priv->irq_mask, enable_mask);
@@@ -3106,19 -3124,9 +3140,9 @@@ static u32 intel_hpd_enabled_irqs(struc
        return enabled_irqs;
  }
  
- static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
+ static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv)
  {
-       u32 hotplug_irqs, hotplug, enabled_irqs;
-       if (HAS_PCH_IBX(dev_priv)) {
-               hotplug_irqs = SDE_HOTPLUG_MASK;
-               enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ibx);
-       } else {
-               hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
-               enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_cpt);
-       }
-       ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
+       u32 hotplug;
  
        /*
         * Enable digital hotplug on the PCH, and configure the DP short pulse
         * The pulse duration bits are reserved on LPT+.
         */
        hotplug = I915_READ(PCH_PORT_HOTPLUG);
-       hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
-       hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
-       hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
+       hotplug &= ~(PORTB_PULSE_DURATION_MASK |
+                    PORTC_PULSE_DURATION_MASK |
+                    PORTD_PULSE_DURATION_MASK);
        hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
+       hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
+       hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
        /*
         * When CPU and PCH are on the same package, port A
         * HPD must be enabled in both north and south.
        I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
  }
  
+ static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
+ {
+       u32 hotplug_irqs, enabled_irqs;
+       if (HAS_PCH_IBX(dev_priv)) {
+               hotplug_irqs = SDE_HOTPLUG_MASK;
+               enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ibx);
+       } else {
+               hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
+               enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_cpt);
+       }
+       ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
+       ibx_hpd_detection_setup(dev_priv);
+ }
  static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv)
  {
        u32 hotplug;
@@@ -3168,9 -3195,25 +3211,25 @@@ static void spt_hpd_irq_setup(struct dr
        spt_hpd_detection_setup(dev_priv);
  }
  
+ static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv)
+ {
+       u32 hotplug;
+       /*
+        * Enable digital hotplug on the CPU, and configure the DP short pulse
+        * duration to 2ms (which is the minimum in the Display Port spec)
+        * The pulse duration bits are reserved on HSW+.
+        */
+       hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
+       hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK;
+       hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE |
+                  DIGITAL_PORTA_PULSE_DURATION_2ms;
+       I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
+ }
  static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
  {
-       u32 hotplug_irqs, hotplug, enabled_irqs;
+       u32 hotplug_irqs, enabled_irqs;
  
        if (INTEL_GEN(dev_priv) >= 8) {
                hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG;
                ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
        }
  
-       /*
-        * Enable digital hotplug on the CPU, and configure the DP short pulse
-        * duration to 2ms (which is the minimum in the Display Port spec)
-        * The pulse duration bits are reserved on HSW+.
-        */
-       hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
-       hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK;
-       hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE | DIGITAL_PORTA_PULSE_DURATION_2ms;
-       I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
+       ilk_hpd_detection_setup(dev_priv);
  
        ibx_hpd_irq_setup(dev_priv);
  }
@@@ -3268,7 -3303,7 +3319,7 @@@ static void ibx_irq_postinstall(struct 
  
        if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
            HAS_PCH_LPT(dev_priv))
-               ; /* TODO: Enable HPD detection on older PCH platforms too */
+               ibx_hpd_detection_setup(dev_priv);
        else
                spt_hpd_detection_setup(dev_priv);
  }
@@@ -3345,6 -3380,8 +3396,8 @@@ static int ironlake_irq_postinstall(str
  
        gen5_gt_irq_postinstall(dev);
  
+       ilk_hpd_detection_setup(dev_priv);
        ibx_irq_postinstall(dev);
  
        if (IS_IRONLAKE_M(dev_priv)) {
  
  void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
  {
-       assert_spin_locked(&dev_priv->irq_lock);
+       lockdep_assert_held(&dev_priv->irq_lock);
  
        if (dev_priv->display_irqs_enabled)
                return;
  
  void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
  {
-       assert_spin_locked(&dev_priv->irq_lock);
+       lockdep_assert_held(&dev_priv->irq_lock);
  
        if (!dev_priv->display_irqs_enabled)
                return;
@@@ -3485,6 -3522,8 +3538,8 @@@ static void gen8_de_irq_postinstall(str
  
        if (IS_GEN9_LP(dev_priv))
                bxt_hpd_detection_setup(dev_priv);
+       else if (IS_BROADWELL(dev_priv))
+               ilk_hpd_detection_setup(dev_priv);
  }
  
  static int gen8_irq_postinstall(struct drm_device *dev)
@@@ -4052,7 -4091,7 +4107,7 @@@ static void i915_hpd_irq_setup(struct d
  {
        u32 hotplug_en;
  
-       assert_spin_locked(&dev_priv->irq_lock);
+       lockdep_assert_held(&dev_priv->irq_lock);
  
        /* Note HDMI and DP share hotplug bits */
        /* enable bits are the same for all generations */
@@@ -4249,6 -4288,7 +4304,6 @@@ void intel_irq_init(struct drm_i915_pri
        if (IS_GEN2(dev_priv)) {
                /* Gen2 doesn't have a hardware frame counter */
                dev->max_vblank_count = 0;
 -              dev->driver->get_vblank_counter = drm_vblank_no_hw_counter;
        } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
                dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
                dev->driver->get_vblank_counter = g4x_get_vblank_counter;
        if (!IS_GEN2(dev_priv))
                dev->vblank_disable_immediate = true;
  
+       /* Most platforms treat the display irq block as an always-on
+        * power domain. vlv/chv can disable it at runtime and need
+        * special care to avoid writing any of the display block registers
+        * outside of the power domain. We defer setting up the display irqs
+        * in this case to the runtime pm.
+        */
+       dev_priv->display_irqs_enabled = true;
+       if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+               dev_priv->display_irqs_enabled = false;
+       dev_priv->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD;
        dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
        dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
  
index 1c8f5b9a7fcd0ca18be30a4ab30bcfe3294b872d,243050ea4938803701206c8b8b4b6b033abf5a65..cc843f96576f0abf96c7ea1fff4d0c92cbcde976
@@@ -48,6 -48,8 +48,8 @@@ static inline bool i915_mmio_reg_valid(
        return !i915_mmio_reg_equal(reg, INVALID_MMIO_REG);
  }
  
+ #define _PICK(__index, ...) (((const u32 []){ __VA_ARGS__ })[__index])
  #define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a)))
  #define _MMIO_PIPE(pipe, a, b) _MMIO(_PIPE(pipe, a, b))
  #define _PLANE(plane, a, b) _PIPE(plane, a, b)
  #define _MMIO_TRANS(tran, a, b) _MMIO(_TRANS(tran, a, b))
  #define _PORT(port, a, b) ((a) + (port)*((b)-(a)))
  #define _MMIO_PORT(port, a, b) _MMIO(_PORT(port, a, b))
- #define _PIPE3(pipe, a, b, c) ((pipe) == PIPE_A ? (a) : \
-                              (pipe) == PIPE_B ? (b) : (c))
+ #define _PIPE3(pipe, ...) _PICK(pipe, __VA_ARGS__)
  #define _MMIO_PIPE3(pipe, a, b, c) _MMIO(_PIPE3(pipe, a, b, c))
- #define _PORT3(port, a, b, c) ((port) == PORT_A ? (a) : \
-                              (port) == PORT_B ? (b) : (c))
+ #define _PORT3(port, ...) _PICK(port, __VA_ARGS__)
  #define _MMIO_PORT3(pipe, a, b, c) _MMIO(_PORT3(pipe, a, b, c))
- #define _PHY3(phy, a, b, c) ((phy) == DPIO_PHY0 ? (a) : \
-                            (phy) == DPIO_PHY1 ? (b) : (c))
+ #define _PHY3(phy, ...) _PICK(phy, __VA_ARGS__)
  #define _MMIO_PHY3(phy, a, b, c) _MMIO(_PHY3(phy, a, b, c))
  
  #define _MASKED_FIELD(mask, value) ({                                    \
  #define _MASKED_BIT_ENABLE(a) ({ typeof(a) _a = (a); _MASKED_FIELD(_a, _a); })
  #define _MASKED_BIT_DISABLE(a)        (_MASKED_FIELD((a), 0))
  
+ /* Engine ID */
  
+ #define RCS_HW                0
+ #define VCS_HW                1
+ #define BCS_HW                2
+ #define VECS_HW               3
+ #define VCS2_HW               4
  
  /* PCI config space */
  
  #define GCFGC 0xf0 /* 915+ only */
  #define   GC_LOW_FREQUENCY_ENABLE     (1 << 7)
  #define   GC_DISPLAY_CLOCK_190_200_MHZ        (0 << 4)
- #define   GC_DISPLAY_CLOCK_333_MHZ    (4 << 4)
+ #define   GC_DISPLAY_CLOCK_333_320_MHZ        (4 << 4)
  #define   GC_DISPLAY_CLOCK_267_MHZ_PNV        (0 << 4)
  #define   GC_DISPLAY_CLOCK_333_MHZ_PNV        (1 << 4)
  #define   GC_DISPLAY_CLOCK_444_MHZ_PNV        (2 << 4)
@@@ -1553,6 -1558,7 +1558,7 @@@ enum skl_disp_power_wells 
        _MMIO(_BXT_PHY_CH(phy, ch, reg_ch0, reg_ch1))
  
  #define BXT_P_CR_GT_DISP_PWRON                _MMIO(0x138090)
+ #define  MIPIO_RST_CTRL                               (1 << 2)
  
  #define _BXT_PHY_CTL_DDI_A            0x64C00
  #define _BXT_PHY_CTL_DDI_B            0x64C10
  #define I915_ASLE_INTERRUPT                           (1<<0)
  #define I915_BSD_USER_INTERRUPT                               (1<<25)
  
 +#define I915_HDMI_LPE_AUDIO_BASE      (VLV_DISPLAY_BASE + 0x65000)
 +#define I915_HDMI_LPE_AUDIO_SIZE      0x1000
 +
 +/* DisplayPort Audio w/ LPE */
 +#define VLV_AUD_CHICKEN_BIT_REG               _MMIO(VLV_DISPLAY_BASE + 0x62F38)
 +#define VLV_CHICKEN_BIT_DBG_ENABLE    (1 << 0)
 +
 +#define _VLV_AUD_PORT_EN_B_DBG                (VLV_DISPLAY_BASE + 0x62F20)
 +#define _VLV_AUD_PORT_EN_C_DBG                (VLV_DISPLAY_BASE + 0x62F30)
 +#define _VLV_AUD_PORT_EN_D_DBG                (VLV_DISPLAY_BASE + 0x62F34)
 +#define VLV_AUD_PORT_EN_DBG(port)     _MMIO_PORT3((port) - PORT_B,       \
 +                                                  _VLV_AUD_PORT_EN_B_DBG, \
 +                                                  _VLV_AUD_PORT_EN_C_DBG, \
 +                                                  _VLV_AUD_PORT_EN_D_DBG)
 +#define VLV_AMP_MUTE                  (1 << 1)
 +
  #define GEN6_BSD_RNCID                        _MMIO(0x12198)
  
  #define GEN7_FF_THREAD_MODE           _MMIO(0x20a0)
@@@ -3376,10 -3366,22 +3382,22 @@@ enum 
        INTEL_LEGACY_64B_CONTEXT
  };
  
+ enum {
+       FAULT_AND_HANG = 0,
+       FAULT_AND_HALT, /* Debug only */
+       FAULT_AND_STREAM,
+       FAULT_AND_CONTINUE /* Unsupported */
+ };
+ #define GEN8_CTX_VALID (1<<0)
+ #define GEN8_CTX_FORCE_PD_RESTORE (1<<1)
+ #define GEN8_CTX_FORCE_RESTORE (1<<2)
+ #define GEN8_CTX_L3LLC_COHERENT (1<<5)
+ #define GEN8_CTX_PRIVILEGE (1<<8)
  #define GEN8_CTX_ADDRESSING_MODE_SHIFT 3
- #define GEN8_CTX_ADDRESSING_MODE(dev_priv) (USES_FULL_48BIT_PPGTT(dev_priv) ?\
-                               INTEL_LEGACY_64B_CONTEXT : \
-                               INTEL_LEGACY_32B_CONTEXT)
+ #define GEN8_CTX_ID_SHIFT 32
+ #define GEN8_CTX_ID_WIDTH 21
  
  #define CHV_CLK_CTL1                  _MMIO(0x101100)
  #define VLV_CLK_CTL2                  _MMIO(0x101104)
  #define _PLANE_KEYMSK_2_A                     0x70298
  #define _PLANE_KEYMAX_1_A                     0x701a0
  #define _PLANE_KEYMAX_2_A                     0x702a0
+ #define _PLANE_COLOR_CTL_1_A                  0x701CC /* GLK+ */
+ #define _PLANE_COLOR_CTL_2_A                  0x702CC /* GLK+ */
+ #define _PLANE_COLOR_CTL_3_A                  0x703CC /* GLK+ */
+ #define   PLANE_COLOR_PIPE_GAMMA_ENABLE               (1 << 30)
+ #define   PLANE_COLOR_PIPE_CSC_ENABLE         (1 << 23)
+ #define   PLANE_COLOR_PLANE_GAMMA_DISABLE     (1 << 13)
  #define _PLANE_BUF_CFG_1_A                    0x7027c
  #define _PLANE_BUF_CFG_2_A                    0x7037c
  #define _PLANE_NV12_BUF_CFG_1_A               0x70278
  #define _PLANE_NV12_BUF_CFG_2_A               0x70378
  
  #define _PLANE_CTL_1_B                                0x71180
  #define _PLANE_CTL_2_B                                0x71280
  #define _PLANE_CTL_3_B                                0x71380
  #define PLANE_NV12_BUF_CFG(pipe, plane)       \
        _MMIO_PLANE(plane, _PLANE_NV12_BUF_CFG_1(pipe), _PLANE_NV12_BUF_CFG_2(pipe))
  
- /* SKL new cursor registers */
+ #define _PLANE_COLOR_CTL_1_B                  0x711CC
+ #define _PLANE_COLOR_CTL_2_B                  0x712CC
+ #define _PLANE_COLOR_CTL_3_B                  0x713CC
+ #define _PLANE_COLOR_CTL_1(pipe)      \
+       _PIPE(pipe, _PLANE_COLOR_CTL_1_A, _PLANE_COLOR_CTL_1_B)
+ #define _PLANE_COLOR_CTL_2(pipe)      \
+       _PIPE(pipe, _PLANE_COLOR_CTL_2_A, _PLANE_COLOR_CTL_2_B)
+ #define PLANE_COLOR_CTL(pipe, plane)  \
+       _MMIO_PLANE(plane, _PLANE_COLOR_CTL_1(pipe), _PLANE_COLOR_CTL_2(pipe))
+ #/* SKL new cursor registers */
  #define _CUR_BUF_CFG_A                                0x7017c
  #define _CUR_BUF_CFG_B                                0x7117c
  #define CUR_BUF_CFG(pipe)     _MMIO_PIPE(pipe, _CUR_BUF_CFG_A, _CUR_BUF_CFG_B)
  #define CHICKEN_PAR2_1                _MMIO(0x42090)
  #define  KVM_CONFIG_CHANGE_NOTIFICATION_SELECT        (1 << 14)
  
+ #define CHICKEN_MISC_2                _MMIO(0x42084)
+ #define  GLK_CL0_PWR_DOWN     (1 << 10)
+ #define  GLK_CL1_PWR_DOWN     (1 << 11)
+ #define  GLK_CL2_PWR_DOWN     (1 << 12)
  #define _CHICKEN_PIPESL_1_A   0x420b0
  #define _CHICKEN_PIPESL_1_B   0x420b4
  #define  HSW_FBCQ_DIS                 (1 << 22)
  #define   PAL_PREC_10_12_BIT          (0 << 31)
  #define   PAL_PREC_SPLIT_MODE         (1 << 31)
  #define   PAL_PREC_AUTO_INCREMENT     (1 << 15)
+ #define   PAL_PREC_INDEX_VALUE_MASK   (0x3ff << 0)
  #define _PAL_PREC_DATA_A      0x4A404
  #define _PAL_PREC_DATA_B      0x4AC04
  #define _PAL_PREC_DATA_C      0x4B404
  #define _PAL_PREC_EXT_GC_MAX_A        0x4A420
  #define _PAL_PREC_EXT_GC_MAX_B        0x4AC20
  #define _PAL_PREC_EXT_GC_MAX_C        0x4B420
+ #define _PAL_PREC_EXT2_GC_MAX_A       0x4A430
+ #define _PAL_PREC_EXT2_GC_MAX_B       0x4AC30
+ #define _PAL_PREC_EXT2_GC_MAX_C       0x4B430
  
  #define PREC_PAL_INDEX(pipe)          _MMIO_PIPE(pipe, _PAL_PREC_INDEX_A, _PAL_PREC_INDEX_B)
  #define PREC_PAL_DATA(pipe)           _MMIO_PIPE(pipe, _PAL_PREC_DATA_A, _PAL_PREC_DATA_B)
  #define PREC_PAL_GC_MAX(pipe, i)      _MMIO(_PIPE(pipe, _PAL_PREC_GC_MAX_A, _PAL_PREC_GC_MAX_B) + (i) * 4)
  #define PREC_PAL_EXT_GC_MAX(pipe, i)  _MMIO(_PIPE(pipe, _PAL_PREC_EXT_GC_MAX_A, _PAL_PREC_EXT_GC_MAX_B) + (i) * 4)
  
+ #define _PRE_CSC_GAMC_INDEX_A 0x4A484
+ #define _PRE_CSC_GAMC_INDEX_B 0x4AC84
+ #define _PRE_CSC_GAMC_INDEX_C 0x4B484
+ #define   PRE_CSC_GAMC_AUTO_INCREMENT (1 << 10)
+ #define _PRE_CSC_GAMC_DATA_A  0x4A488
+ #define _PRE_CSC_GAMC_DATA_B  0x4AC88
+ #define _PRE_CSC_GAMC_DATA_C  0x4B488
+ #define PRE_CSC_GAMC_INDEX(pipe)      _MMIO_PIPE(pipe, _PRE_CSC_GAMC_INDEX_A, _PRE_CSC_GAMC_INDEX_B)
+ #define PRE_CSC_GAMC_DATA(pipe)               _MMIO_PIPE(pipe, _PRE_CSC_GAMC_DATA_A, _PRE_CSC_GAMC_DATA_B)
  /* pipe CSC & degamma/gamma LUTs on CHV */
  #define _CGM_PIPE_A_CSC_COEFF01       (VLV_DISPLAY_BASE + 0x67900)
  #define _CGM_PIPE_A_CSC_COEFF23       (VLV_DISPLAY_BASE + 0x67904)
  
  /* MIPI DSI registers */
  
- #define _MIPI_PORT(port, a, c)        _PORT3(port, a, 0, c)   /* ports A and C only */
+ #define _MIPI_PORT(port, a, c)        ((port) ? c : a)        /* ports A and C only */
  #define _MMIO_MIPI(port, a, c)        _MMIO(_MIPI_PORT(port, a, c))
  
+ #define MIPIO_TXESC_CLK_DIV1                  _MMIO(0x160004)
+ #define  GLK_TX_ESC_CLK_DIV1_MASK                     0x3FF
+ #define MIPIO_TXESC_CLK_DIV2                  _MMIO(0x160008)
+ #define  GLK_TX_ESC_CLK_DIV2_MASK                     0x3FF
  /* BXT MIPI clock controls */
  #define BXT_MAX_VAR_OUTPUT_KHZ                        39500
  
  #define  BXT_DSI_PLL_PVD_RATIO_SHIFT  16
  #define  BXT_DSI_PLL_PVD_RATIO_MASK   (3 << BXT_DSI_PLL_PVD_RATIO_SHIFT)
  #define  BXT_DSI_PLL_PVD_RATIO_1      (1 << BXT_DSI_PLL_PVD_RATIO_SHIFT)
+ #define  BXT_DSIC_16X_BY1             (0 << 10)
  #define  BXT_DSIC_16X_BY2             (1 << 10)
  #define  BXT_DSIC_16X_BY3             (2 << 10)
  #define  BXT_DSIC_16X_BY4             (3 << 10)
  #define  BXT_DSIC_16X_MASK            (3 << 10)
+ #define  BXT_DSIA_16X_BY1             (0 << 8)
  #define  BXT_DSIA_16X_BY2             (1 << 8)
  #define  BXT_DSIA_16X_BY3             (2 << 8)
  #define  BXT_DSIA_16X_BY4             (3 << 8)
  
  #define BXT_DSI_PLL_RATIO_MAX         0x7D
  #define BXT_DSI_PLL_RATIO_MIN         0x22
+ #define GLK_DSI_PLL_RATIO_MAX         0x6F
+ #define GLK_DSI_PLL_RATIO_MIN         0x22
  #define BXT_DSI_PLL_RATIO_MASK                0xFF
  #define BXT_REF_CLOCK_KHZ             19200
  
  #define _BXT_MIPIC_PORT_CTRL                          0x6B8C0
  #define BXT_MIPI_PORT_CTRL(tc)        _MMIO_MIPI(tc, _BXT_MIPIA_PORT_CTRL, _BXT_MIPIC_PORT_CTRL)
  
+ #define BXT_P_DSI_REGULATOR_CFG                       _MMIO(0x160020)
+ #define  STAP_SELECT                                  (1 << 0)
+ #define BXT_P_DSI_REGULATOR_TX_CTRL           _MMIO(0x160054)
+ #define  HS_IO_CTRL_SELECT                            (1 << 0)
  #define  DPI_ENABLE                                   (1 << 31) /* A + C */
  #define  MIPIA_MIPI4DPHY_DELAY_COUNT_SHIFT            27
  #define  MIPIA_MIPI4DPHY_DELAY_COUNT_MASK             (0xf << 27)
  #define  LP_BYTECLK_SHIFT                             0
  #define  LP_BYTECLK_MASK                              (0xffff << 0)
  
+ #define _MIPIA_TLPX_TIME_COUNT                (dev_priv->mipi_mmio_base + 0xb0a4)
+ #define _MIPIC_TLPX_TIME_COUNT                (dev_priv->mipi_mmio_base + 0xb8a4)
+ #define MIPI_TLPX_TIME_COUNT(port)     _MMIO_MIPI(port, _MIPIA_TLPX_TIME_COUNT, _MIPIC_TLPX_TIME_COUNT)
+ #define _MIPIA_CLK_LANE_TIMING                (dev_priv->mipi_mmio_base + 0xb098)
+ #define _MIPIC_CLK_LANE_TIMING                (dev_priv->mipi_mmio_base + 0xb898)
+ #define MIPI_CLK_LANE_TIMING(port)     _MMIO_MIPI(port, _MIPIA_CLK_LANE_TIMING, _MIPIC_CLK_LANE_TIMING)
  /* bits 31:0 */
  #define _MIPIA_LP_GEN_DATA            (dev_priv->mipi_mmio_base + 0xb064)
  #define _MIPIC_LP_GEN_DATA            (dev_priv->mipi_mmio_base + 0xb864)
index d76f3033e890b6da60ae6b017dab311e9a29c8da,1ab401faed34a9b262d58bbbbafe41cc6064317f..52c207e81f413a4465bec60098fedf09efea59cb
@@@ -24,7 -24,6 +24,7 @@@
  #include <linux/kernel.h>
  #include <linux/component.h>
  #include <drm/i915_component.h>
 +#include <drm/intel_lpe_audio.h>
  #include "intel_drv.h"
  
  #include <drm/drmP.h>
@@@ -624,28 -623,13 +624,28 @@@ void intel_audio_codec_enable(struct in
        dev_priv->av_enc_map[pipe] = intel_encoder;
        mutex_unlock(&dev_priv->av_mutex);
  
 -      /* audio drivers expect pipe = -1 to indicate Non-MST cases */
 -      if (intel_encoder->type != INTEL_OUTPUT_DP_MST)
 -              pipe = -1;
 -
 -      if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify)
 +      if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
 +              /* audio drivers expect pipe = -1 to indicate Non-MST cases */
 +              if (intel_encoder->type != INTEL_OUTPUT_DP_MST)
 +                      pipe = -1;
                acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
                                                 (int) port, (int) pipe);
 +      }
 +
 +      switch (intel_encoder->type) {
 +      case INTEL_OUTPUT_HDMI:
 +              intel_lpe_audio_notify(dev_priv, connector->eld, port, pipe,
 +                                     crtc_state->port_clock,
 +                                     false, 0);
 +              break;
 +      case INTEL_OUTPUT_DP:
 +              intel_lpe_audio_notify(dev_priv, connector->eld, port, pipe,
 +                                     adjusted_mode->crtc_clock,
 +                                     true, crtc_state->port_clock);
 +              break;
 +      default:
 +              break;
 +      }
  }
  
  /**
@@@ -672,15 -656,13 +672,15 @@@ void intel_audio_codec_disable(struct i
        dev_priv->av_enc_map[pipe] = NULL;
        mutex_unlock(&dev_priv->av_mutex);
  
 -      /* audio drivers expect pipe = -1 to indicate Non-MST cases */
 -      if (intel_encoder->type != INTEL_OUTPUT_DP_MST)
 -              pipe = -1;
 -
 -      if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify)
 +      if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
 +              /* audio drivers expect pipe = -1 to indicate Non-MST cases */
 +              if (intel_encoder->type != INTEL_OUTPUT_DP_MST)
 +                      pipe = -1;
                acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
                                                 (int) port, (int) pipe);
 +      }
 +
 +      intel_lpe_audio_notify(dev_priv, NULL, port, pipe, 0, false, 0);
  }
  
  /**
@@@ -720,7 -702,7 +720,7 @@@ static void i915_audio_component_codec_
        struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
        u32 tmp;
  
-       if (!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv))
+       if (!IS_GEN9_BC(dev_priv))
                return;
  
        i915_audio_component_get_power(kdev);
@@@ -752,7 -734,7 +752,7 @@@ static int i915_audio_component_get_cdc
        if (WARN_ON_ONCE(!HAS_DDI(dev_priv)))
                return -ENODEV;
  
-       return dev_priv->cdclk_freq;
+       return dev_priv->cdclk.hw.cdclk;
  }
  
  /*
@@@ -974,28 -956,3 +974,28 @@@ void i915_audio_component_cleanup(struc
        component_del(dev_priv->drm.dev, &i915_audio_component_bind_ops);
        dev_priv->audio_component_registered = false;
  }
 +
 +/**
 + * intel_audio_init() - Initialize the audio driver either using
 + * component framework or using lpe audio bridge
 + * @dev_priv: the i915 drm device private data
 + *
 + */
 +void intel_audio_init(struct drm_i915_private *dev_priv)
 +{
 +      if (intel_lpe_audio_init(dev_priv) < 0)
 +              i915_audio_component_init(dev_priv);
 +}
 +
 +/**
 + * intel_audio_deinit() - deinitialize the audio driver
 + * @dev_priv: the i915 drm device private data
 + *
 + */
 +void intel_audio_deinit(struct drm_i915_private *dev_priv)
 +{
 +      if ((dev_priv)->lpe_audio.platdev != NULL)
 +              intel_lpe_audio_teardown(dev_priv);
 +      else
 +              i915_audio_component_cleanup(dev_priv);
 +}
index 7044e9a6abf7a51b099c2f9686b5a6bb73d51560,6032d2a937d58dbe53896d71a9e29f4b2522fcff..2393bb9fe665633ce16f7158d24e6343516896e8
   */
  
  #include <linux/kthread.h>
 +#include <uapi/linux/sched/types.h>
  
  #include "i915_drv.h"
  
+ static unsigned int __intel_breadcrumbs_wakeup(struct intel_breadcrumbs *b)
+ {
+       struct intel_wait *wait;
+       unsigned int result = 0;
+       lockdep_assert_held(&b->irq_lock);
+       wait = b->irq_wait;
+       if (wait) {
+               result = ENGINE_WAKEUP_WAITER;
+               if (wake_up_process(wait->tsk))
+                       result |= ENGINE_WAKEUP_ASLEEP;
+       }
+       return result;
+ }
+ unsigned int intel_engine_wakeup(struct intel_engine_cs *engine)
+ {
+       struct intel_breadcrumbs *b = &engine->breadcrumbs;
+       unsigned long flags;
+       unsigned int result;
+       spin_lock_irqsave(&b->irq_lock, flags);
+       result = __intel_breadcrumbs_wakeup(b);
+       spin_unlock_irqrestore(&b->irq_lock, flags);
+       return result;
+ }
+ static unsigned long wait_timeout(void)
+ {
+       return round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES);
+ }
+ static noinline void missed_breadcrumb(struct intel_engine_cs *engine)
+ {
+       DRM_DEBUG_DRIVER("%s missed breadcrumb at %pF, irq posted? %s\n",
+                        engine->name, __builtin_return_address(0),
+                        yesno(test_bit(ENGINE_IRQ_BREADCRUMB,
+                                       &engine->irq_posted)));
+       set_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings);
+ }
  static void intel_breadcrumbs_hangcheck(unsigned long data)
  {
        struct intel_engine_cs *engine = (struct intel_engine_cs *)data;
        struct intel_breadcrumbs *b = &engine->breadcrumbs;
  
-       if (!b->irq_enabled)
+       if (!b->irq_armed)
                return;
  
-       if (time_before(jiffies, b->timeout)) {
-               mod_timer(&b->hangcheck, b->timeout);
+       if (b->hangcheck_interrupts != atomic_read(&engine->irq_count)) {
+               b->hangcheck_interrupts = atomic_read(&engine->irq_count);
+               mod_timer(&b->hangcheck, wait_timeout());
                return;
        }
  
-       DRM_DEBUG("Hangcheck timer elapsed... %s idle\n", engine->name);
-       set_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings);
-       mod_timer(&engine->breadcrumbs.fake_irq, jiffies + 1);
-       /* Ensure that even if the GPU hangs, we get woken up.
+       /* We keep the hangcheck time alive until we disarm the irq, even
+        * if there are no waiters at present.
         *
-        * However, note that if no one is waiting, we never notice
-        * a gpu hang. Eventually, we will have to wait for a resource
-        * held by the GPU and so trigger a hangcheck. In the most
-        * pathological case, this will be upon memory starvation! To
-        * prevent this, we also queue the hangcheck from the retire
-        * worker.
+        * If the waiter was currently running, assume it hasn't had a chance
+        * to process the pending interrupt (e.g, low priority task on a loaded
+        * system) and wait until it sleeps before declaring a missed interrupt.
+        *
+        * If the waiter was asleep (and not even pending a wakeup), then we
+        * must have missed an interrupt as the GPU has stopped advancing
+        * but we still have a waiter. Assuming all batches complete within
+        * DRM_I915_HANGCHECK_JIFFIES [1.5s]!
         */
-       i915_queue_hangcheck(engine->i915);
- }
- static unsigned long wait_timeout(void)
- {
-       return round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES);
+       if (intel_engine_wakeup(engine) & ENGINE_WAKEUP_ASLEEP) {
+               missed_breadcrumb(engine);
+               mod_timer(&engine->breadcrumbs.fake_irq, jiffies + 1);
+       } else {
+               mod_timer(&b->hangcheck, wait_timeout());
+       }
  }
  
  static void intel_breadcrumbs_fake_irq(unsigned long data)
  {
        struct intel_engine_cs *engine = (struct intel_engine_cs *)data;
+       struct intel_breadcrumbs *b = &engine->breadcrumbs;
+       unsigned long flags;
  
        /*
         * The timer persists in case we cannot enable interrupts,
         * every jiffie in order to kick the oldest waiter to do the
         * coherent seqno check.
         */
-       if (intel_engine_wakeup(engine))
-               mod_timer(&engine->breadcrumbs.fake_irq, jiffies + 1);
+       spin_lock_irqsave(&b->irq_lock, flags);
+       if (!__intel_breadcrumbs_wakeup(b))
+               __intel_engine_disarm_breadcrumbs(engine);
+       spin_unlock_irqrestore(&b->irq_lock, flags);
+       if (!b->irq_armed)
+               return;
+       mod_timer(&b->fake_irq, jiffies + 1);
+       /* Ensure that even if the GPU hangs, we get woken up.
+        *
+        * However, note that if no one is waiting, we never notice
+        * a gpu hang. Eventually, we will have to wait for a resource
+        * held by the GPU and so trigger a hangcheck. In the most
+        * pathological case, this will be upon memory starvation! To
+        * prevent this, we also queue the hangcheck from the retire
+        * worker.
+        */
+       i915_queue_hangcheck(engine->i915);
  }
  
  static void irq_enable(struct intel_engine_cs *engine)
         * we still need to force the barrier before reading the seqno,
         * just in case.
         */
-       engine->breadcrumbs.irq_posted = true;
+       set_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted);
  
        /* Caller disables interrupts */
        spin_lock(&engine->i915->irq_lock);
@@@ -96,61 -160,114 +161,114 @@@ static void irq_disable(struct intel_en
        spin_lock(&engine->i915->irq_lock);
        engine->irq_disable(engine);
        spin_unlock(&engine->i915->irq_lock);
+ }
+ void __intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine)
+ {
+       struct intel_breadcrumbs *b = &engine->breadcrumbs;
+       lockdep_assert_held(&b->irq_lock);
  
-       engine->breadcrumbs.irq_posted = false;
+       if (b->irq_enabled) {
+               irq_disable(engine);
+               b->irq_enabled = false;
+       }
+       b->irq_armed = false;
  }
  
static void __intel_breadcrumbs_enable_irq(struct intel_breadcrumbs *b)
void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine)
  {
-       struct intel_engine_cs *engine =
-               container_of(b, struct intel_engine_cs, breadcrumbs);
-       struct drm_i915_private *i915 = engine->i915;
+       struct intel_breadcrumbs *b = &engine->breadcrumbs;
+       unsigned long flags;
  
-       assert_spin_locked(&b->lock);
-       if (b->rpm_wakelock)
+       if (!b->irq_armed)
                return;
  
-       /* Since we are waiting on a request, the GPU should be busy
-        * and should have its own rpm reference. For completeness,
-        * record an rpm reference for ourselves to cover the
-        * interrupt we unmask.
+       spin_lock_irqsave(&b->irq_lock, flags);
+       /* We only disarm the irq when we are idle (all requests completed),
+        * so if there remains a sleeping waiter, it missed the request
+        * completion.
         */
-       intel_runtime_pm_get_noresume(i915);
-       b->rpm_wakelock = true;
+       if (__intel_breadcrumbs_wakeup(b) & ENGINE_WAKEUP_ASLEEP)
+               missed_breadcrumb(engine);
  
-       /* No interrupts? Kick the waiter every jiffie! */
-       if (intel_irqs_enabled(i915)) {
-               if (!test_bit(engine->id, &i915->gpu_error.test_irq_rings))
-                       irq_enable(engine);
-               b->irq_enabled = true;
-       }
+       __intel_engine_disarm_breadcrumbs(engine);
+       spin_unlock_irqrestore(&b->irq_lock, flags);
+ }
+ static bool use_fake_irq(const struct intel_breadcrumbs *b)
+ {
+       const struct intel_engine_cs *engine =
+               container_of(b, struct intel_engine_cs, breadcrumbs);
+       if (!test_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings))
+               return false;
  
-       if (!b->irq_enabled ||
-           test_bit(engine->id, &i915->gpu_error.missed_irq_rings)) {
+       /* Only start with the heavy weight fake irq timer if we have not
+        * seen any interrupts since enabling it the first time. If the
+        * interrupts are still arriving, it means we made a mistake in our
+        * engine->seqno_barrier(), a timing error that should be transient
+        * and unlikely to reoccur.
+        */
+       return atomic_read(&engine->irq_count) == b->hangcheck_interrupts;
+ }
+ static void enable_fake_irq(struct intel_breadcrumbs *b)
+ {
+       /* Ensure we never sleep indefinitely */
+       if (!b->irq_enabled || use_fake_irq(b))
                mod_timer(&b->fake_irq, jiffies + 1);
-       } else {
-               /* Ensure we never sleep indefinitely */
-               GEM_BUG_ON(!time_after(b->timeout, jiffies));
-               mod_timer(&b->hangcheck, b->timeout);
-       }
+       else
+               mod_timer(&b->hangcheck, wait_timeout());
  }
  
- static void __intel_breadcrumbs_disable_irq(struct intel_breadcrumbs *b)
+ static void __intel_breadcrumbs_enable_irq(struct intel_breadcrumbs *b)
  {
        struct intel_engine_cs *engine =
                container_of(b, struct intel_engine_cs, breadcrumbs);
+       struct drm_i915_private *i915 = engine->i915;
  
-       assert_spin_locked(&b->lock);
-       if (!b->rpm_wakelock)
+       lockdep_assert_held(&b->irq_lock);
+       if (b->irq_armed)
                return;
  
-       if (b->irq_enabled) {
-               irq_disable(engine);
-               b->irq_enabled = false;
+       /* The breadcrumb irq will be disarmed on the interrupt after the
+        * waiters are signaled. This gives us a single interrupt window in
+        * which we can add a new waiter and avoid the cost of re-enabling
+        * the irq.
+        */
+       b->irq_armed = true;
+       GEM_BUG_ON(b->irq_enabled);
+       if (I915_SELFTEST_ONLY(b->mock)) {
+               /* For our mock objects we want to avoid interaction
+                * with the real hardware (which is not set up). So
+                * we simply pretend we have enabled the powerwell
+                * and the irq, and leave it up to the mock
+                * implementation to call intel_engine_wakeup()
+                * itself when it wants to simulate a user interrupt,
+                */
+               return;
+       }
+       /* Since we are waiting on a request, the GPU should be busy
+        * and should have its own rpm reference. This is tracked
+        * by i915->gt.awake, we can forgo holding our own wakref
+        * for the interrupt as before i915->gt.awake is released (when
+        * the driver is idle) we disarm the breadcrumbs.
+        */
+       /* No interrupts? Kick the waiter every jiffie! */
+       if (intel_irqs_enabled(i915)) {
+               if (!test_bit(engine->id, &i915->gpu_error.test_irq_rings))
+                       irq_enable(engine);
+               b->irq_enabled = true;
        }
  
-       intel_runtime_pm_put(engine->i915);
-       b->rpm_wakelock = false;
+       enable_fake_irq(b);
  }
  
  static inline struct intel_wait *to_wait(struct rb_node *node)
  static inline void __intel_breadcrumbs_finish(struct intel_breadcrumbs *b,
                                              struct intel_wait *wait)
  {
-       assert_spin_locked(&b->lock);
+       lockdep_assert_held(&b->rb_lock);
  
        /* This request is completed, so remove it from the tree, mark it as
         * complete, and *then* wake up the associated task.
        wake_up_process(wait->tsk); /* implicit smp_wmb() */
  }
  
+ static inline void __intel_breadcrumbs_next(struct intel_engine_cs *engine,
+                                           struct rb_node *next)
+ {
+       struct intel_breadcrumbs *b = &engine->breadcrumbs;
+       spin_lock(&b->irq_lock);
+       GEM_BUG_ON(!b->irq_armed);
+       b->irq_wait = to_wait(next);
+       spin_unlock(&b->irq_lock);
+       /* We always wake up the next waiter that takes over as the bottom-half
+        * as we may delegate not only the irq-seqno barrier to the next waiter
+        * but also the task of waking up concurrent waiters.
+        */
+       if (next)
+               wake_up_process(to_wait(next)->tsk);
+ }
  static bool __intel_engine_add_wait(struct intel_engine_cs *engine,
                                    struct intel_wait *wait)
  {
        }
        rb_link_node(&wait->node, parent, p);
        rb_insert_color(&wait->node, &b->waiters);
-       GEM_BUG_ON(!first && !rcu_access_pointer(b->irq_seqno_bh));
  
        if (completed) {
                struct rb_node *next = rb_next(completed);
                GEM_BUG_ON(!next && !first);
                if (next && next != &wait->node) {
                        GEM_BUG_ON(first);
-                       b->timeout = wait_timeout();
-                       b->first_wait = to_wait(next);
-                       rcu_assign_pointer(b->irq_seqno_bh, b->first_wait->tsk);
-                       /* As there is a delay between reading the current
-                        * seqno, processing the completed tasks and selecting
-                        * the next waiter, we may have missed the interrupt
-                        * and so need for the next bottom-half to wakeup.
-                        *
-                        * Also as we enable the IRQ, we may miss the
-                        * interrupt for that seqno, so we have to wake up
-                        * the next bottom-half in order to do a coherent check
-                        * in case the seqno passed.
-                        */
-                       __intel_breadcrumbs_enable_irq(b);
-                       if (READ_ONCE(b->irq_posted))
-                               wake_up_process(to_wait(next)->tsk);
+                       __intel_breadcrumbs_next(engine, next);
                }
  
                do {
        }
  
        if (first) {
+               spin_lock(&b->irq_lock);
                GEM_BUG_ON(rb_first(&b->waiters) != &wait->node);
-               b->timeout = wait_timeout();
-               b->first_wait = wait;
-               rcu_assign_pointer(b->irq_seqno_bh, wait->tsk);
+               b->irq_wait = wait;
                /* After assigning ourselves as the new bottom-half, we must
                 * perform a cursory check to prevent a missed interrupt.
                 * Either we miss the interrupt whilst programming the hardware,
                 * and so we miss the wake up.
                 */
                __intel_breadcrumbs_enable_irq(b);
+               spin_unlock(&b->irq_lock);
        }
-       GEM_BUG_ON(!rcu_access_pointer(b->irq_seqno_bh));
-       GEM_BUG_ON(!b->first_wait);
-       GEM_BUG_ON(rb_first(&b->waiters) != &b->first_wait->node);
+       GEM_BUG_ON(!b->irq_wait);
+       GEM_BUG_ON(rb_first(&b->waiters) != &b->irq_wait->node);
  
        return first;
  }
@@@ -296,9 -414,9 +415,9 @@@ bool intel_engine_add_wait(struct intel
        struct intel_breadcrumbs *b = &engine->breadcrumbs;
        bool first;
  
-       spin_lock_irq(&b->lock);
+       spin_lock_irq(&b->rb_lock);
        first = __intel_engine_add_wait(engine, wait);
-       spin_unlock_irq(&b->lock);
+       spin_unlock_irq(&b->rb_lock);
  
        return first;
  }
@@@ -317,29 -435,20 +436,20 @@@ static inline int wakeup_priority(struc
                return tsk->prio;
  }
  
void intel_engine_remove_wait(struct intel_engine_cs *engine,
-                             struct intel_wait *wait)
static void __intel_engine_remove_wait(struct intel_engine_cs *engine,
+                                      struct intel_wait *wait)
  {
        struct intel_breadcrumbs *b = &engine->breadcrumbs;
  
-       /* Quick check to see if this waiter was already decoupled from
-        * the tree by the bottom-half to avoid contention on the spinlock
-        * by the herd.
-        */
-       if (RB_EMPTY_NODE(&wait->node))
-               return;
-       spin_lock_irq(&b->lock);
+       lockdep_assert_held(&b->rb_lock);
  
        if (RB_EMPTY_NODE(&wait->node))
-               goto out_unlock;
+               goto out;
  
-       if (b->first_wait == wait) {
+       if (b->irq_wait == wait) {
                const int priority = wakeup_priority(b, wait->tsk);
                struct rb_node *next;
  
-               GEM_BUG_ON(rcu_access_pointer(b->irq_seqno_bh) != wait->tsk);
                /* We are the current bottom-half. Find the next candidate,
                 * the first waiter in the queue on the remaining oldest
                 * request. As multiple seqnos may complete in the time it
                        }
                }
  
-               if (next) {
-                       /* In our haste, we may have completed the first waiter
-                        * before we enabled the interrupt. Do so now as we
-                        * have a second waiter for a future seqno. Afterwards,
-                        * we have to wake up that waiter in case we missed
-                        * the interrupt, or if we have to handle an
-                        * exception rather than a seqno completion.
-                        */
-                       b->timeout = wait_timeout();
-                       b->first_wait = to_wait(next);
-                       rcu_assign_pointer(b->irq_seqno_bh, b->first_wait->tsk);
-                       if (b->first_wait->seqno != wait->seqno)
-                               __intel_breadcrumbs_enable_irq(b);
-                       wake_up_process(b->first_wait->tsk);
-               } else {
-                       b->first_wait = NULL;
-                       rcu_assign_pointer(b->irq_seqno_bh, NULL);
-                       __intel_breadcrumbs_disable_irq(b);
-               }
+               __intel_breadcrumbs_next(engine, next);
        } else {
                GEM_BUG_ON(rb_first(&b->waiters) == &wait->node);
        }
        GEM_BUG_ON(RB_EMPTY_NODE(&wait->node));
        rb_erase(&wait->node, &b->waiters);
  
- out_unlock:
-       GEM_BUG_ON(b->first_wait == wait);
+ out:
+       GEM_BUG_ON(b->irq_wait == wait);
        GEM_BUG_ON(rb_first(&b->waiters) !=
-                  (b->first_wait ? &b->first_wait->node : NULL));
-       GEM_BUG_ON(!rcu_access_pointer(b->irq_seqno_bh) ^ RB_EMPTY_ROOT(&b->waiters));
-       spin_unlock_irq(&b->lock);
+                  (b->irq_wait ? &b->irq_wait->node : NULL));
+ }
+ void intel_engine_remove_wait(struct intel_engine_cs *engine,
+                             struct intel_wait *wait)
+ {
+       struct intel_breadcrumbs *b = &engine->breadcrumbs;
+       /* Quick check to see if this waiter was already decoupled from
+        * the tree by the bottom-half to avoid contention on the spinlock
+        * by the herd.
+        */
+       if (RB_EMPTY_NODE(&wait->node))
+               return;
+       spin_lock_irq(&b->rb_lock);
+       __intel_engine_remove_wait(engine, wait);
+       spin_unlock_irq(&b->rb_lock);
+ }
+ static bool signal_valid(const struct drm_i915_gem_request *request)
+ {
+       return intel_wait_check_request(&request->signaling.wait, request);
  }
  
- static bool signal_complete(struct drm_i915_gem_request *request)
+ static bool signal_complete(const struct drm_i915_gem_request *request)
  {
        if (!request)
                return false;
         * signalled that this wait is already completed.
         */
        if (intel_wait_complete(&request->signaling.wait))
-               return true;
+               return signal_valid(request);
  
        /* Carefully check if the request is complete, giving time for the
         * seqno to be visible or if the GPU hung.
@@@ -458,40 -569,62 +570,62 @@@ static int intel_breadcrumbs_signaler(v
                 * need to wait for a new interrupt from the GPU or for
                 * a new client.
                 */
-               request = READ_ONCE(b->first_signal);
+               rcu_read_lock();
+               request = rcu_dereference(b->first_signal);
+               if (request)
+                       request = i915_gem_request_get_rcu(request);
+               rcu_read_unlock();
                if (signal_complete(request)) {
-                       /* Wake up all other completed waiters and select the
-                        * next bottom-half for the next user interrupt.
-                        */
-                       intel_engine_remove_wait(engine,
-                                                &request->signaling.wait);
                        local_bh_disable();
                        dma_fence_signal(&request->fence);
                        local_bh_enable(); /* kick start the tasklets */
  
+                       spin_lock_irq(&b->rb_lock);
+                       /* Wake up all other completed waiters and select the
+                        * next bottom-half for the next user interrupt.
+                        */
+                       __intel_engine_remove_wait(engine,
+                                                  &request->signaling.wait);
                        /* Find the next oldest signal. Note that as we have
                         * not been holding the lock, another client may
                         * have installed an even older signal than the one
                         * we just completed - so double check we are still
                         * the oldest before picking the next one.
                         */
-                       spin_lock_irq(&b->lock);
-                       if (request == b->first_signal) {
+                       if (request == rcu_access_pointer(b->first_signal)) {
                                struct rb_node *rb =
                                        rb_next(&request->signaling.node);
-                               b->first_signal = rb ? to_signaler(rb) : NULL;
+                               rcu_assign_pointer(b->first_signal,
+                                                  rb ? to_signaler(rb) : NULL);
                        }
                        rb_erase(&request->signaling.node, &b->signals);
-                       spin_unlock_irq(&b->lock);
+                       RB_CLEAR_NODE(&request->signaling.node);
+                       spin_unlock_irq(&b->rb_lock);
  
                        i915_gem_request_put(request);
                } else {
-                       if (kthread_should_stop())
+                       DEFINE_WAIT(exec);
+                       if (kthread_should_stop()) {
+                               GEM_BUG_ON(request);
                                break;
+                       }
+                       if (request)
+                               add_wait_queue(&request->execute, &exec);
  
                        schedule();
+                       if (request)
+                               remove_wait_queue(&request->execute, &exec);
+                       if (kthread_should_park())
+                               kthread_parkme();
                }
+               i915_gem_request_put(request);
        } while (1);
        __set_current_state(TASK_RUNNING);
  
@@@ -504,6 -637,7 +638,7 @@@ void intel_engine_enable_signaling(stru
        struct intel_breadcrumbs *b = &engine->breadcrumbs;
        struct rb_node *parent, **p;
        bool first, wakeup;
+       u32 seqno;
  
        /* Note that we may be called from an interrupt handler on another
         * device (e.g. nouveau signaling a fence completion causing us
         */
  
        /* locked by dma_fence_enable_sw_signaling() (irqsafe fence->lock) */
-       assert_spin_locked(&request->lock);
-       if (!request->global_seqno)
+       GEM_BUG_ON(!irqs_disabled());
+       lockdep_assert_held(&request->lock);
+       seqno = i915_gem_request_global_seqno(request);
+       if (!seqno)
                return;
  
        request->signaling.wait.tsk = b->signaler;
-       request->signaling.wait.seqno = request->global_seqno;
+       request->signaling.wait.request = request;
+       request->signaling.wait.seqno = seqno;
        i915_gem_request_get(request);
  
-       spin_lock(&b->lock);
+       spin_lock(&b->rb_lock);
  
        /* First add ourselves into the list of waiters, but register our
         * bottom-half as the signaller thread. As per usual, only the oldest
        p = &b->signals.rb_node;
        while (*p) {
                parent = *p;
-               if (i915_seqno_passed(request->global_seqno,
-                                     to_signaler(parent)->global_seqno)) {
+               if (i915_seqno_passed(seqno,
+                                     to_signaler(parent)->signaling.wait.seqno)) {
                        p = &parent->rb_right;
                        first = false;
                } else {
        rb_link_node(&request->signaling.node, parent, p);
        rb_insert_color(&request->signaling.node, &b->signals);
        if (first)
-               smp_store_mb(b->first_signal, request);
+               rcu_assign_pointer(b->first_signal, request);
  
-       spin_unlock(&b->lock);
+       spin_unlock(&b->rb_lock);
  
        if (wakeup)
                wake_up_process(b->signaler);
  }
  
+ void intel_engine_cancel_signaling(struct drm_i915_gem_request *request)
+ {
+       struct intel_engine_cs *engine = request->engine;
+       struct intel_breadcrumbs *b = &engine->breadcrumbs;
+       GEM_BUG_ON(!irqs_disabled());
+       lockdep_assert_held(&request->lock);
+       GEM_BUG_ON(!request->signaling.wait.seqno);
+       spin_lock(&b->rb_lock);
+       if (!RB_EMPTY_NODE(&request->signaling.node)) {
+               if (request == rcu_access_pointer(b->first_signal)) {
+                       struct rb_node *rb =
+                               rb_next(&request->signaling.node);
+                       rcu_assign_pointer(b->first_signal,
+                                          rb ? to_signaler(rb) : NULL);
+               }
+               rb_erase(&request->signaling.node, &b->signals);
+               RB_CLEAR_NODE(&request->signaling.node);
+               i915_gem_request_put(request);
+       }
+       __intel_engine_remove_wait(engine, &request->signaling.wait);
+       spin_unlock(&b->rb_lock);
+       request->signaling.wait.seqno = 0;
+ }
  int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine)
  {
        struct intel_breadcrumbs *b = &engine->breadcrumbs;
        struct task_struct *tsk;
  
-       spin_lock_init(&b->lock);
+       spin_lock_init(&b->rb_lock);
+       spin_lock_init(&b->irq_lock);
        setup_timer(&b->fake_irq,
                    intel_breadcrumbs_fake_irq,
                    (unsigned long)engine);
@@@ -604,20 -774,26 +775,26 @@@ void intel_engine_reset_breadcrumbs(str
        struct intel_breadcrumbs *b = &engine->breadcrumbs;
  
        cancel_fake_irq(engine);
-       spin_lock_irq(&b->lock);
+       spin_lock_irq(&b->irq_lock);
  
-       __intel_breadcrumbs_disable_irq(b);
-       if (intel_engine_has_waiter(engine)) {
-               b->timeout = wait_timeout();
-               __intel_breadcrumbs_enable_irq(b);
-               if (READ_ONCE(b->irq_posted))
-                       wake_up_process(b->first_wait->tsk);
-       } else {
-               /* sanitize the IMR and unmask any auxiliary interrupts */
+       if (b->irq_enabled)
+               irq_enable(engine);
+       else
                irq_disable(engine);
-       }
  
-       spin_unlock_irq(&b->lock);
+       /* We set the IRQ_BREADCRUMB bit when we enable the irq presuming the
+        * GPU is active and may have already executed the MI_USER_INTERRUPT
+        * before the CPU is ready to receive. However, the engine is currently
+        * idle (we haven't started it yet), there is no possibility for a
+        * missed interrupt as we enabled the irq and so we can clear the
+        * immediate wakeup (until a real interrupt arrives for the waiter).
+        */
+       clear_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted);
+       if (b->irq_armed)
+               enable_fake_irq(b);
+       spin_unlock_irq(&b->irq_lock);
  }
  
  void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine)
        struct intel_breadcrumbs *b = &engine->breadcrumbs;
  
        /* The engines should be idle and all requests accounted for! */
-       WARN_ON(READ_ONCE(b->first_wait));
+       WARN_ON(READ_ONCE(b->irq_wait));
        WARN_ON(!RB_EMPTY_ROOT(&b->waiters));
-       WARN_ON(READ_ONCE(b->first_signal));
+       WARN_ON(rcu_access_pointer(b->first_signal));
        WARN_ON(!RB_EMPTY_ROOT(&b->signals));
  
        if (!IS_ERR_OR_NULL(b->signaler))
        cancel_fake_irq(engine);
  }
  
unsigned int intel_breadcrumbs_busy(struct drm_i915_private *i915)
bool intel_breadcrumbs_busy(struct intel_engine_cs *engine)
  {
-       struct intel_engine_cs *engine;
-       enum intel_engine_id id;
-       unsigned int mask = 0;
-       for_each_engine(engine, i915, id) {
-               struct intel_breadcrumbs *b = &engine->breadcrumbs;
-               spin_lock_irq(&b->lock);
+       struct intel_breadcrumbs *b = &engine->breadcrumbs;
+       bool busy = false;
  
-               if (b->first_wait) {
-                       wake_up_process(b->first_wait->tsk);
-                       mask |= intel_engine_flag(engine);
-               }
+       spin_lock_irq(&b->rb_lock);
  
-               if (b->first_signal) {
-                       wake_up_process(b->signaler);
-                       mask |= intel_engine_flag(engine);
-               }
+       if (b->irq_wait) {
+               wake_up_process(b->irq_wait->tsk);
+               busy |= intel_engine_flag(engine);
+       }
  
-               spin_unlock_irq(&b->lock);
+       if (rcu_access_pointer(b->first_signal)) {
+               wake_up_process(b->signaler);
+               busy |= intel_engine_flag(engine);
        }
  
-       return mask;
+       spin_unlock_irq(&b->rb_lock);
+       return busy;
  }
+ #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+ #include "selftests/intel_breadcrumbs.c"
+ #endif
index a2fece5e9fb38c80051ba6cea8be8af790c9e94b,72e2c91707d48373d1c931ffdf76c4bcb49ca4d2..7369ee31ad914ac8726413bc3d1cec755ae08c2b
@@@ -37,6 -37,7 +37,7 @@@
  #include "intel_frontbuffer.h"
  #include <drm/i915_drm.h>
  #include "i915_drv.h"
+ #include "i915_gem_clflush.h"
  #include "intel_dsi.h"
  #include "i915_trace.h"
  #include <drm/drm_atomic.h>
@@@ -96,10 -97,9 +97,9 @@@ static void i9xx_crtc_clock_get(struct 
  static void ironlake_pch_clock_get(struct intel_crtc *crtc,
                                   struct intel_crtc_state *pipe_config);
  
- static int intel_framebuffer_init(struct drm_device *dev,
-                                 struct intel_framebuffer *ifb,
-                                 struct drm_mode_fb_cmd2 *mode_cmd,
-                                 struct drm_i915_gem_object *obj);
+ static int intel_framebuffer_init(struct intel_framebuffer *ifb,
+                                 struct drm_i915_gem_object *obj,
+                                 struct drm_mode_fb_cmd2 *mode_cmd);
  static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc);
  static void intel_set_pipe_timings(struct intel_crtc *intel_crtc);
  static void intel_set_pipe_src_size(struct intel_crtc *intel_crtc);
@@@ -122,9 -122,6 +122,6 @@@ static void ironlake_pfit_disable(struc
  static void ironlake_pfit_enable(struct intel_crtc *crtc);
  static void intel_modeset_setup_hw_state(struct drm_device *dev);
  static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc);
- static int ilk_max_pixel_rate(struct drm_atomic_state *state);
- static int glk_calc_cdclk(int max_pixclk);
- static int bxt_calc_cdclk(int max_pixclk);
  
  struct intel_limit {
        struct {
  };
  
  /* returns HPLL frequency in kHz */
static int valleyview_get_vco(struct drm_i915_private *dev_priv)
int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
  {
        int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
  
@@@ -170,73 -167,16 +167,16 @@@ int vlv_get_cck_clock(struct drm_i915_p
        return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
  }
  
static int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
-                                 const char *name, u32 reg)
+ int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
+                          const char *name, u32 reg)
  {
        if (dev_priv->hpll_freq == 0)
-               dev_priv->hpll_freq = valleyview_get_vco(dev_priv);
+               dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv);
  
        return vlv_get_cck_clock(dev_priv, name, reg,
                                 dev_priv->hpll_freq);
  }
  
- static int
- intel_pch_rawclk(struct drm_i915_private *dev_priv)
- {
-       return (I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK) * 1000;
- }
- static int
- intel_vlv_hrawclk(struct drm_i915_private *dev_priv)
- {
-       /* RAWCLK_FREQ_VLV register updated from power well code */
-       return vlv_get_cck_clock_hpll(dev_priv, "hrawclk",
-                                     CCK_DISPLAY_REF_CLOCK_CONTROL);
- }
- static int
- intel_g4x_hrawclk(struct drm_i915_private *dev_priv)
- {
-       uint32_t clkcfg;
-       /* hrawclock is 1/4 the FSB frequency */
-       clkcfg = I915_READ(CLKCFG);
-       switch (clkcfg & CLKCFG_FSB_MASK) {
-       case CLKCFG_FSB_400:
-               return 100000;
-       case CLKCFG_FSB_533:
-               return 133333;
-       case CLKCFG_FSB_667:
-               return 166667;
-       case CLKCFG_FSB_800:
-               return 200000;
-       case CLKCFG_FSB_1067:
-               return 266667;
-       case CLKCFG_FSB_1333:
-               return 333333;
-       /* these two are just a guess; one of them might be right */
-       case CLKCFG_FSB_1600:
-       case CLKCFG_FSB_1600_ALT:
-               return 400000;
-       default:
-               return 133333;
-       }
- }
- void intel_update_rawclk(struct drm_i915_private *dev_priv)
- {
-       if (HAS_PCH_SPLIT(dev_priv))
-               dev_priv->rawclk_freq = intel_pch_rawclk(dev_priv);
-       else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
-               dev_priv->rawclk_freq = intel_vlv_hrawclk(dev_priv);
-       else if (IS_G4X(dev_priv) || IS_PINEVIEW(dev_priv))
-               dev_priv->rawclk_freq = intel_g4x_hrawclk(dev_priv);
-       else
-               return; /* no rawclk on other platforms, or no need to know it */
-       DRM_DEBUG_DRIVER("rawclk rate: %d kHz\n", dev_priv->rawclk_freq);
- }
  static void intel_update_czclk(struct drm_i915_private *dev_priv)
  {
        if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
@@@ -2112,11 -2052,13 +2052,13 @@@ static void intel_tile_dims(const struc
  }
  
  unsigned int
- intel_fb_align_height(struct drm_device *dev, unsigned int height,
-                     uint32_t pixel_format, uint64_t fb_modifier)
+ intel_fb_align_height(struct drm_i915_private *dev_priv,
+                     unsigned int height,
+                     uint32_t pixel_format,
+                     uint64_t fb_modifier)
  {
        unsigned int cpp = drm_format_plane_cpp(pixel_format, 0);
-       unsigned int tile_height = intel_tile_height(to_i915(dev), fb_modifier, cpp);
+       unsigned int tile_height = intel_tile_height(dev_priv, fb_modifier, cpp);
  
        return ALIGN(height, tile_height);
  }
@@@ -2682,15 -2624,13 +2624,13 @@@ intel_alloc_initial_plane_obj(struct in
                return false;
  
        mutex_lock(&dev->struct_mutex);
        obj = i915_gem_object_create_stolen_for_preallocated(dev_priv,
                                                             base_aligned,
                                                             base_aligned,
                                                             size_aligned);
-       if (!obj) {
-               mutex_unlock(&dev->struct_mutex);
+       mutex_unlock(&dev->struct_mutex);
+       if (!obj)
                return false;
-       }
  
        if (plane_config->tiling == I915_TILING_X)
                obj->tiling_and_stride = fb->pitches[0] | I915_TILING_X;
        mode_cmd.modifier[0] = fb->modifier;
        mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
  
-       if (intel_framebuffer_init(dev, to_intel_framebuffer(fb),
-                                  &mode_cmd, obj)) {
+       if (intel_framebuffer_init(to_intel_framebuffer(fb), obj, &mode_cmd)) {
                DRM_DEBUG_KMS("intel fb init failed\n");
                goto out_unref_obj;
        }
  
-       mutex_unlock(&dev->struct_mutex);
  
        DRM_DEBUG_KMS("initial plane fb obj %p\n", obj);
        return true;
  
  out_unref_obj:
        i915_gem_object_put(obj);
-       mutex_unlock(&dev->struct_mutex);
        return false;
  }
  
@@@ -2733,6 -2670,29 +2670,29 @@@ update_state_fb(struct drm_plane *plane
                drm_framebuffer_reference(plane->state->fb);
  }
  
+ static void
+ intel_set_plane_visible(struct intel_crtc_state *crtc_state,
+                       struct intel_plane_state *plane_state,
+                       bool visible)
+ {
+       struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+       plane_state->base.visible = visible;
+       /* FIXME pre-g4x don't work like this */
+       if (visible) {
+               crtc_state->base.plane_mask |= BIT(drm_plane_index(&plane->base));
+               crtc_state->active_planes |= BIT(plane->id);
+       } else {
+               crtc_state->base.plane_mask &= ~BIT(drm_plane_index(&plane->base));
+               crtc_state->active_planes &= ~BIT(plane->id);
+       }
+       DRM_DEBUG_KMS("%s active planes 0x%x\n",
+                     crtc_state->base.crtc->name,
+                     crtc_state->active_planes);
+ }
  static void
  intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
                             struct intel_initial_plane_config *plane_config)
         * simplest solution is to just disable the primary plane now and
         * pretend the BIOS never had it enabled.
         */
-       plane_state->visible = false;
-       crtc_state->plane_mask &= ~(1 << drm_plane_index(primary));
+       intel_set_plane_visible(to_intel_crtc_state(crtc_state),
+                               to_intel_plane_state(plane_state),
+                               false);
        intel_pre_disable_primary_noatomic(&intel_crtc->base);
+       trace_intel_disable_plane(primary, intel_crtc);
        intel_plane->disable_plane(primary, &intel_crtc->base);
  
        return;
@@@ -2831,7 -2793,11 +2793,11 @@@ valid_fb
        drm_framebuffer_reference(fb);
        primary->fb = primary->state->fb = fb;
        primary->crtc = primary->state->crtc = &intel_crtc->base;
-       intel_crtc->base.state->plane_mask |= (1 << drm_plane_index(primary));
+       intel_set_plane_visible(to_intel_crtc_state(crtc_state),
+                               to_intel_plane_state(plane_state),
+                               true);
        atomic_or(to_intel_plane(primary)->frontbuffer_bit,
                  &obj->frontbuffer_bits);
  }
@@@ -3386,13 -3352,22 +3352,22 @@@ static void skylake_update_primary_plan
        int dst_w = drm_rect_width(&plane_state->base.dst);
        int dst_h = drm_rect_height(&plane_state->base.dst);
  
-       plane_ctl = PLANE_CTL_ENABLE |
-                   PLANE_CTL_PIPE_GAMMA_ENABLE |
-                   PLANE_CTL_PIPE_CSC_ENABLE;
+       plane_ctl = PLANE_CTL_ENABLE;
+       if (IS_GEMINILAKE(dev_priv)) {
+               I915_WRITE(PLANE_COLOR_CTL(pipe, plane_id),
+                          PLANE_COLOR_PIPE_GAMMA_ENABLE |
+                          PLANE_COLOR_PIPE_CSC_ENABLE |
+                          PLANE_COLOR_PLANE_GAMMA_DISABLE);
+       } else {
+               plane_ctl |=
+                       PLANE_CTL_PIPE_GAMMA_ENABLE |
+                       PLANE_CTL_PIPE_CSC_ENABLE |
+                       PLANE_CTL_PLANE_GAMMA_DISABLE;
+       }
  
        plane_ctl |= skl_plane_ctl_format(fb->format->format);
        plane_ctl |= skl_plane_ctl_tiling(fb->modifier);
-       plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
        plane_ctl |= skl_plane_ctl_rotation(rotation);
  
        /* Sizes are 0 based */
@@@ -3473,17 -3448,20 +3448,21 @@@ static void intel_update_primary_planes
                struct intel_plane_state *plane_state =
                        to_intel_plane_state(plane->base.state);
  
-               if (plane_state->base.visible)
+               if (plane_state->base.visible) {
+                       trace_intel_update_plane(&plane->base,
+                                                to_intel_crtc(crtc));
                        plane->update_plane(&plane->base,
                                            to_intel_crtc_state(crtc->state),
                                            plane_state);
+               }
        }
  }
  
  static int
  __intel_display_resume(struct drm_device *dev,
 -                     struct drm_atomic_state *state)
 +                     struct drm_atomic_state *state,
 +                     struct drm_modeset_acquire_ctx *ctx)
  {
        struct drm_crtc_state *crtc_state;
        struct drm_crtc *crtc;
        }
  
        /* ignore any reset values/BIOS leftovers in the WM registers */
-       to_intel_atomic_state(state)->skip_intermediate_wm = true;
+       if (!HAS_GMCH_DISPLAY(to_i915(dev)))
+               to_intel_atomic_state(state)->skip_intermediate_wm = true;
  
 -      ret = drm_atomic_commit(state);
 +      ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
  
        WARN_ON(ret == -EDEADLK);
        return ret;
@@@ -3597,7 -3576,7 +3577,7 @@@ void intel_finish_reset(struct drm_i915
                         */
                        intel_update_primary_planes(dev);
                } else {
 -                      ret = __intel_display_resume(dev, state);
 +                      ret = __intel_display_resume(dev, state, ctx);
                        if (ret)
                                DRM_ERROR("Restoring old state failed with %i\n", ret);
                }
                        dev_priv->display.hpd_irq_setup(dev_priv);
                spin_unlock_irq(&dev_priv->irq_lock);
  
 -              ret = __intel_display_resume(dev, state);
 +              ret = __intel_display_resume(dev, state, ctx);
                if (ret)
                        DRM_ERROR("Restoring old state failed with %i\n", ret);
  
@@@ -3701,12 -3680,11 +3681,11 @@@ static void intel_update_pipe_config(st
        }
  }
  
- static void intel_fdi_normal_train(struct drm_crtc *crtc)
+ static void intel_fdi_normal_train(struct intel_crtc *crtc)
  {
-       struct drm_device *dev = crtc->dev;
+       struct drm_device *dev = crtc->base.dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       int pipe = intel_crtc->pipe;
+       int pipe = crtc->pipe;
        i915_reg_t reg;
        u32 temp;
  
  }
  
  /* The FDI link training functions for ILK/Ibexpeak. */
- static void ironlake_fdi_link_train(struct drm_crtc *crtc)
+ static void ironlake_fdi_link_train(struct intel_crtc *crtc,
+                                   const struct intel_crtc_state *crtc_state)
  {
-       struct drm_device *dev = crtc->dev;
+       struct drm_device *dev = crtc->base.dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       int pipe = intel_crtc->pipe;
+       int pipe = crtc->pipe;
        i915_reg_t reg;
        u32 temp, tries;
  
        reg = FDI_TX_CTL(pipe);
        temp = I915_READ(reg);
        temp &= ~FDI_DP_PORT_WIDTH_MASK;
-       temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
+       temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
        temp &= ~FDI_LINK_TRAIN_NONE;
        temp |= FDI_LINK_TRAIN_PATTERN_1;
        I915_WRITE(reg, temp | FDI_TX_ENABLE);
@@@ -3845,12 -3823,12 +3824,12 @@@ static const int snb_b_fdi_train_param[
  };
  
  /* The FDI link training functions for SNB/Cougarpoint. */
- static void gen6_fdi_link_train(struct drm_crtc *crtc)
+ static void gen6_fdi_link_train(struct intel_crtc *crtc,
+                               const struct intel_crtc_state *crtc_state)
  {
-       struct drm_device *dev = crtc->dev;
+       struct drm_device *dev = crtc->base.dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       int pipe = intel_crtc->pipe;
+       int pipe = crtc->pipe;
        i915_reg_t reg;
        u32 temp, i, retry;
  
        reg = FDI_TX_CTL(pipe);
        temp = I915_READ(reg);
        temp &= ~FDI_DP_PORT_WIDTH_MASK;
-       temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
+       temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
        temp &= ~FDI_LINK_TRAIN_NONE;
        temp |= FDI_LINK_TRAIN_PATTERN_1;
        temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
  }
  
  /* Manual link training for Ivy Bridge A0 parts */
- static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
+ static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
+                                     const struct intel_crtc_state *crtc_state)
  {
-       struct drm_device *dev = crtc->dev;
+       struct drm_device *dev = crtc->base.dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       int pipe = intel_crtc->pipe;
+       int pipe = crtc->pipe;
        i915_reg_t reg;
        u32 temp, i, j;
  
                reg = FDI_TX_CTL(pipe);
                temp = I915_READ(reg);
                temp &= ~FDI_DP_PORT_WIDTH_MASK;
-               temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
+               temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
                temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
                temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
                temp |= snb_b_fdi_train_param[j/2];
@@@ -4308,10 -4286,10 +4287,10 @@@ void lpt_disable_iclkip(struct drm_i915
  }
  
  /* Program iCLKIP clock to the desired frequency */
- static void lpt_program_iclkip(struct drm_crtc *crtc)
+ static void lpt_program_iclkip(struct intel_crtc *crtc)
  {
-       struct drm_i915_private *dev_priv = to_i915(crtc->dev);
-       int clock = to_intel_crtc(crtc)->config->base.adjusted_mode.crtc_clock;
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       int clock = crtc->config->base.adjusted_mode.crtc_clock;
        u32 divsel, phaseinc, auxdiv, phasedir = 0;
        u32 temp;
  
@@@ -4492,12 -4470,12 +4471,12 @@@ static void ivybridge_update_fdi_bc_bif
  
  /* Return which DP Port should be selected for Transcoder DP control */
  static enum port
- intel_trans_dp_port_sel(struct drm_crtc *crtc)
+ intel_trans_dp_port_sel(struct intel_crtc *crtc)
  {
-       struct drm_device *dev = crtc->dev;
+       struct drm_device *dev = crtc->base.dev;
        struct intel_encoder *encoder;
  
-       for_each_encoder_on_crtc(dev, crtc, encoder) {
+       for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
                if (encoder->type == INTEL_OUTPUT_DP ||
                    encoder->type == INTEL_OUTPUT_EDP)
                        return enc_to_dig_port(&encoder->base)->port;
   *   - DP transcoding bits
   *   - transcoder
   */
- static void ironlake_pch_enable(struct drm_crtc *crtc)
+ static void ironlake_pch_enable(const struct intel_crtc_state *crtc_state)
  {
-       struct drm_device *dev = crtc->dev;
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       struct drm_device *dev = crtc->base.dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       int pipe = intel_crtc->pipe;
+       int pipe = crtc->pipe;
        u32 temp;
  
        assert_pch_transcoder_disabled(dev_priv, pipe);
  
        if (IS_IVYBRIDGE(dev_priv))
-               ivybridge_update_fdi_bc_bifurcation(intel_crtc);
+               ivybridge_update_fdi_bc_bifurcation(crtc);
  
        /* Write the TU size bits before fdi link training, so that error
         * detection works. */
                   I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
  
        /* For PCH output, training FDI link */
-       dev_priv->display.fdi_link_train(crtc);
+       dev_priv->display.fdi_link_train(crtc, crtc_state);
  
        /* We need to program the right clock selection before writing the pixel
         * mutliplier into the DPLL. */
                temp = I915_READ(PCH_DPLL_SEL);
                temp |= TRANS_DPLL_ENABLE(pipe);
                sel = TRANS_DPLLB_SEL(pipe);
-               if (intel_crtc->config->shared_dpll ==
+               if (crtc_state->shared_dpll ==
                    intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B))
                        temp |= sel;
                else
         * Note that enable_shared_dpll tries to do the right thing, but
         * get_shared_dpll unconditionally resets the pll - we need that to have
         * the right LVDS enable sequence. */
-       intel_enable_shared_dpll(intel_crtc);
+       intel_enable_shared_dpll(crtc);
  
        /* set transcoder timing, panel must allow it */
        assert_panel_unlocked(dev_priv, pipe);
-       ironlake_pch_transcoder_set_timings(intel_crtc, pipe);
+       ironlake_pch_transcoder_set_timings(crtc, pipe);
  
        intel_fdi_normal_train(crtc);
  
        /* For PCH DP, enable TRANS_DP_CTL */
        if (HAS_PCH_CPT(dev_priv) &&
-           intel_crtc_has_dp_encoder(intel_crtc->config)) {
+           intel_crtc_has_dp_encoder(crtc_state)) {
                const struct drm_display_mode *adjusted_mode =
-                       &intel_crtc->config->base.adjusted_mode;
+                       &crtc_state->base.adjusted_mode;
                u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
                i915_reg_t reg = TRANS_DP_CTL(pipe);
                temp = I915_READ(reg);
        ironlake_enable_pch_transcoder(dev_priv, pipe);
  }
  
- static void lpt_pch_enable(struct drm_crtc *crtc)
+ static void lpt_pch_enable(const struct intel_crtc_state *crtc_state)
  {
-       struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
  
        assert_pch_transcoder_disabled(dev_priv, TRANSCODER_A);
  
        lpt_program_iclkip(crtc);
  
        /* Set transcoder timing. */
-       ironlake_pch_transcoder_set_timings(intel_crtc, PIPE_A);
+       ironlake_pch_transcoder_set_timings(crtc, PIPE_A);
  
        lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
  }
@@@ -5026,8 -5003,6 +5004,6 @@@ static void intel_post_plane_update(str
  
        intel_frontbuffer_flip(to_i915(crtc->base.dev), pipe_config->fb_bits);
  
-       crtc->wm.cxsr_allowed = true;
        if (pipe_config->update_wm_post && pipe_config->base.active)
                intel_update_watermarks(crtc);
  
@@@ -5074,22 -5049,18 +5050,18 @@@ static void intel_pre_plane_update(stru
                        intel_pre_disable_primary(&crtc->base);
        }
  
-       if (pipe_config->disable_cxsr && HAS_GMCH_DISPLAY(dev_priv)) {
-               crtc->wm.cxsr_allowed = false;
-               /*
-                * Vblank time updates from the shadow to live plane control register
-                * are blocked if the memory self-refresh mode is active at that
-                * moment. So to make sure the plane gets truly disabled, disable
-                * first the self-refresh mode. The self-refresh enable bit in turn
-                * will be checked/applied by the HW only at the next frame start
-                * event which is after the vblank start event, so we need to have a
-                * wait-for-vblank between disabling the plane and the pipe.
-                */
-               if (old_crtc_state->base.active &&
-                   intel_set_memory_cxsr(dev_priv, false))
-                       intel_wait_for_vblank(dev_priv, crtc->pipe);
-       }
+       /*
+        * Vblank time updates from the shadow to live plane control register
+        * are blocked if the memory self-refresh mode is active at that
+        * moment. So to make sure the plane gets truly disabled, disable
+        * first the self-refresh mode. The self-refresh enable bit in turn
+        * will be checked/applied by the HW only at the next frame start
+        * event which is after the vblank start event, so we need to have a
+        * wait-for-vblank between disabling the plane and the pipe.
+        */
+       if (HAS_GMCH_DISPLAY(dev_priv) && old_crtc_state->base.active &&
+           pipe_config->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
+               intel_wait_for_vblank(dev_priv, crtc->pipe);
  
        /*
         * IVB workaround: must disable low power watermarks for at least
@@@ -5344,7 -5315,7 +5316,7 @@@ static void ironlake_crtc_enable(struc
        intel_enable_pipe(intel_crtc);
  
        if (intel_crtc->config->has_pch_encoder)
-               ironlake_pch_enable(crtc);
+               ironlake_pch_enable(pipe_config);
  
        assert_vblank_disabled(crtc);
        drm_crtc_vblank_on(crtc);
@@@ -5426,10 -5397,10 +5398,10 @@@ static void haswell_crtc_enable(struct 
        intel_encoders_pre_enable(crtc, pipe_config, old_state);
  
        if (intel_crtc->config->has_pch_encoder)
-               dev_priv->display.fdi_link_train(crtc);
+               dev_priv->display.fdi_link_train(intel_crtc, pipe_config);
  
        if (!transcoder_is_dsi(cpu_transcoder))
-               intel_ddi_enable_pipe_clock(intel_crtc);
+               intel_ddi_enable_pipe_clock(pipe_config);
  
        if (INTEL_GEN(dev_priv) >= 9)
                skylake_pfit_enable(intel_crtc);
         */
        intel_color_load_luts(&pipe_config->base);
  
-       intel_ddi_set_pipe_settings(crtc);
+       intel_ddi_set_pipe_settings(pipe_config);
        if (!transcoder_is_dsi(cpu_transcoder))
-               intel_ddi_enable_transcoder_func(crtc);
+               intel_ddi_enable_transcoder_func(pipe_config);
  
        if (dev_priv->display.initial_watermarks != NULL)
                dev_priv->display.initial_watermarks(old_intel_state, pipe_config);
                intel_enable_pipe(intel_crtc);
  
        if (intel_crtc->config->has_pch_encoder)
-               lpt_pch_enable(crtc);
+               lpt_pch_enable(pipe_config);
  
        if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DP_MST))
-               intel_ddi_set_vc_payload_alloc(crtc, true);
+               intel_ddi_set_vc_payload_alloc(pipe_config, true);
  
        assert_vblank_disabled(crtc);
        drm_crtc_vblank_on(crtc);
@@@ -5579,7 -5550,7 +5551,7 @@@ static void haswell_crtc_disable(struc
                intel_disable_pipe(intel_crtc);
  
        if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DP_MST))
-               intel_ddi_set_vc_payload_alloc(crtc, false);
+               intel_ddi_set_vc_payload_alloc(intel_crtc->config, false);
  
        if (!transcoder_is_dsi(cpu_transcoder))
                intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
                ironlake_pfit_disable(intel_crtc, false);
  
        if (!transcoder_is_dsi(cpu_transcoder))
-               intel_ddi_disable_pipe_clock(intel_crtc);
+               intel_ddi_disable_pipe_clock(intel_crtc->config);
  
        intel_encoders_post_disable(crtc, old_crtc_state, old_state);
  
@@@ -5623,7 -5594,7 +5595,7 @@@ static void i9xx_pfit_enable(struct int
        I915_WRITE(BCLRPAT(crtc->pipe), 0);
  }
  
static enum intel_display_power_domain port_to_power_domain(enum port port)
enum intel_display_power_domain intel_port_to_power_domain(enum port port)
  {
        switch (port) {
        case PORT_A:
        }
  }
  
- static enum intel_display_power_domain port_to_aux_power_domain(enum port port)
- {
-       switch (port) {
-       case PORT_A:
-               return POWER_DOMAIN_AUX_A;
-       case PORT_B:
-               return POWER_DOMAIN_AUX_B;
-       case PORT_C:
-               return POWER_DOMAIN_AUX_C;
-       case PORT_D:
-               return POWER_DOMAIN_AUX_D;
-       case PORT_E:
-               /* FIXME: Check VBT for actual wiring of PORT E */
-               return POWER_DOMAIN_AUX_D;
-       default:
-               MISSING_CASE(port);
-               return POWER_DOMAIN_AUX_A;
-       }
- }
- enum intel_display_power_domain
- intel_display_port_power_domain(struct intel_encoder *intel_encoder)
- {
-       struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
-       struct intel_digital_port *intel_dig_port;
-       switch (intel_encoder->type) {
-       case INTEL_OUTPUT_UNKNOWN:
-               /* Only DDI platforms should ever use this output type */
-               WARN_ON_ONCE(!HAS_DDI(dev_priv));
-       case INTEL_OUTPUT_DP:
-       case INTEL_OUTPUT_HDMI:
-       case INTEL_OUTPUT_EDP:
-               intel_dig_port = enc_to_dig_port(&intel_encoder->base);
-               return port_to_power_domain(intel_dig_port->port);
-       case INTEL_OUTPUT_DP_MST:
-               intel_dig_port = enc_to_mst(&intel_encoder->base)->primary;
-               return port_to_power_domain(intel_dig_port->port);
-       case INTEL_OUTPUT_ANALOG:
-               return POWER_DOMAIN_PORT_CRT;
-       case INTEL_OUTPUT_DSI:
-               return POWER_DOMAIN_PORT_DSI;
-       default:
-               return POWER_DOMAIN_PORT_OTHER;
-       }
- }
- enum intel_display_power_domain
- intel_display_port_aux_power_domain(struct intel_encoder *intel_encoder)
- {
-       struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
-       struct intel_digital_port *intel_dig_port;
-       switch (intel_encoder->type) {
-       case INTEL_OUTPUT_UNKNOWN:
-       case INTEL_OUTPUT_HDMI:
-               /*
-                * Only DDI platforms should ever use these output types.
-                * We can get here after the HDMI detect code has already set
-                * the type of the shared encoder. Since we can't be sure
-                * what's the status of the given connectors, play safe and
-                * run the DP detection too.
-                */
-               WARN_ON_ONCE(!HAS_DDI(dev_priv));
-       case INTEL_OUTPUT_DP:
-       case INTEL_OUTPUT_EDP:
-               intel_dig_port = enc_to_dig_port(&intel_encoder->base);
-               return port_to_aux_power_domain(intel_dig_port->port);
-       case INTEL_OUTPUT_DP_MST:
-               intel_dig_port = enc_to_mst(&intel_encoder->base)->primary;
-               return port_to_aux_power_domain(intel_dig_port->port);
-       default:
-               MISSING_CASE(intel_encoder->type);
-               return POWER_DOMAIN_AUX_A;
-       }
- }
- static unsigned long get_crtc_power_domains(struct drm_crtc *crtc,
-                                           struct intel_crtc_state *crtc_state)
+ static u64 get_crtc_power_domains(struct drm_crtc *crtc,
+                                 struct intel_crtc_state *crtc_state)
  {
        struct drm_device *dev = crtc->dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_encoder *encoder;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        enum pipe pipe = intel_crtc->pipe;
-       unsigned long mask;
+       u64 mask;
        enum transcoder transcoder = crtc_state->cpu_transcoder;
  
        if (!crtc_state->base.active)
        mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder));
        if (crtc_state->pch_pfit.enabled ||
            crtc_state->pch_pfit.force_thru)
-               mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
+               mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
  
        drm_for_each_encoder_mask(encoder, dev, crtc_state->base.encoder_mask) {
                struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
  
-               mask |= BIT(intel_display_port_power_domain(intel_encoder));
+               mask |= BIT_ULL(intel_encoder->power_domain);
        }
  
+       if (HAS_DDI(dev_priv) && crtc_state->has_audio)
+               mask |= BIT(POWER_DOMAIN_AUDIO);
        if (crtc_state->shared_dpll)
-               mask |= BIT(POWER_DOMAIN_PLLS);
+               mask |= BIT_ULL(POWER_DOMAIN_PLLS);
  
        return mask;
  }
  
- static unsigned long
+ static u64
  modeset_get_crtc_power_domains(struct drm_crtc *crtc,
                               struct intel_crtc_state *crtc_state)
  {
        struct drm_i915_private *dev_priv = to_i915(crtc->dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        enum intel_display_power_domain domain;
-       unsigned long domains, new_domains, old_domains;
+       u64 domains, new_domains, old_domains;
  
        old_domains = intel_crtc->enabled_power_domains;
        intel_crtc->enabled_power_domains = new_domains =
  }
  
  static void modeset_put_power_domains(struct drm_i915_private *dev_priv,
-                                     unsigned long domains)
+                                     u64 domains)
  {
        enum intel_display_power_domain domain;
  
                intel_display_power_put(dev_priv, domain);
  }
  
- static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
+ static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
+                                  struct drm_atomic_state *old_state)
  {
-       int max_cdclk_freq = dev_priv->max_cdclk_freq;
+       struct intel_atomic_state *old_intel_state =
+               to_intel_atomic_state(old_state);
+       struct drm_crtc *crtc = pipe_config->base.crtc;
+       struct drm_device *dev = crtc->dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       int pipe = intel_crtc->pipe;
  
-       if (IS_GEMINILAKE(dev_priv))
-               return 2 * max_cdclk_freq;
-       else if (INTEL_INFO(dev_priv)->gen >= 9 ||
-                IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
-               return max_cdclk_freq;
-       else if (IS_CHERRYVIEW(dev_priv))
-               return max_cdclk_freq*95/100;
-       else if (INTEL_INFO(dev_priv)->gen < 4)
-               return 2*max_cdclk_freq*90/100;
-       else
-               return max_cdclk_freq*90/100;
- }
+       if (WARN_ON(intel_crtc->active))
+               return;
  
- static int skl_calc_cdclk(int max_pixclk, int vco);
+       if (intel_crtc_has_dp_encoder(intel_crtc->config))
+               intel_dp_set_m_n(intel_crtc, M1_N1);
  
- static void intel_update_max_cdclk(struct drm_i915_private *dev_priv)
- {
-       if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
-               u32 limit = I915_READ(SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK;
-               int max_cdclk, vco;
+       intel_set_pipe_timings(intel_crtc);
+       intel_set_pipe_src_size(intel_crtc);
  
-               vco = dev_priv->skl_preferred_vco_freq;
-               WARN_ON(vco != 8100000 && vco != 8640000);
+       if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
+               struct drm_i915_private *dev_priv = to_i915(dev);
  
-               /*
-                * Use the lower (vco 8640) cdclk values as a
-                * first guess. skl_calc_cdclk() will correct it
-                * if the preferred vco is 8100 instead.
-                */
-               if (limit == SKL_DFSM_CDCLK_LIMIT_675)
-                       max_cdclk = 617143;
-               else if (limit == SKL_DFSM_CDCLK_LIMIT_540)
-                       max_cdclk = 540000;
-               else if (limit == SKL_DFSM_CDCLK_LIMIT_450)
-                       max_cdclk = 432000;
-               else
-                       max_cdclk = 308571;
-               dev_priv->max_cdclk_freq = skl_calc_cdclk(max_cdclk, vco);
-       } else if (IS_GEMINILAKE(dev_priv)) {
-               dev_priv->max_cdclk_freq = 316800;
-       } else if (IS_BROXTON(dev_priv)) {
-               dev_priv->max_cdclk_freq = 624000;
-       } else if (IS_BROADWELL(dev_priv))  {
-               /*
-                * FIXME with extra cooling we can allow
-                * 540 MHz for ULX and 675 Mhz for ULT.
-                * How can we know if extra cooling is
-                * available? PCI ID, VTB, something else?
-                */
-               if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
-                       dev_priv->max_cdclk_freq = 450000;
-               else if (IS_BDW_ULX(dev_priv))
-                       dev_priv->max_cdclk_freq = 450000;
-               else if (IS_BDW_ULT(dev_priv))
-                       dev_priv->max_cdclk_freq = 540000;
-               else
-                       dev_priv->max_cdclk_freq = 675000;
-       } else if (IS_CHERRYVIEW(dev_priv)) {
-               dev_priv->max_cdclk_freq = 320000;
-       } else if (IS_VALLEYVIEW(dev_priv)) {
-               dev_priv->max_cdclk_freq = 400000;
-       } else {
-               /* otherwise assume cdclk is fixed */
-               dev_priv->max_cdclk_freq = dev_priv->cdclk_freq;
+               I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
+               I915_WRITE(CHV_CANVAS(pipe), 0);
        }
  
-       dev_priv->max_dotclk_freq = intel_compute_max_dotclk(dev_priv);
+       i9xx_set_pipeconf(intel_crtc);
  
-       DRM_DEBUG_DRIVER("Max CD clock rate: %d kHz\n",
-                        dev_priv->max_cdclk_freq);
+       intel_crtc->active = true;
  
-       DRM_DEBUG_DRIVER("Max dotclock rate: %d kHz\n",
-                        dev_priv->max_dotclk_freq);
- }
+       intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
  
- static void intel_update_cdclk(struct drm_i915_private *dev_priv)
- {
-       dev_priv->cdclk_freq = dev_priv->display.get_display_clock_speed(dev_priv);
+       intel_encoders_pre_pll_enable(crtc, pipe_config, old_state);
  
-       if (INTEL_GEN(dev_priv) >= 9)
-               DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz, VCO: %d kHz, ref: %d kHz\n",
-                                dev_priv->cdclk_freq, dev_priv->cdclk_pll.vco,
-                                dev_priv->cdclk_pll.ref);
-       else
-               DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz\n",
-                                dev_priv->cdclk_freq);
+       if (IS_CHERRYVIEW(dev_priv)) {
+               chv_prepare_pll(intel_crtc, intel_crtc->config);
+               chv_enable_pll(intel_crtc, intel_crtc->config);
+       } else {
+               vlv_prepare_pll(intel_crtc, intel_crtc->config);
+               vlv_enable_pll(intel_crtc, intel_crtc->config);
+       }
  
-       /*
-        * 9:0 CMBUS [sic] CDCLK frequency (cdfreq):
-        * Programmng [sic] note: bit[9:2] should be programmed to the number
-        * of cdclk that generates 4MHz reference clock freq which is used to
-        * generate GMBus clock. This will vary with the cdclk freq.
-        */
-       if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
-               I915_WRITE(GMBUSFREQ_VLV, DIV_ROUND_UP(dev_priv->cdclk_freq, 1000));
- }
+       intel_encoders_pre_enable(crtc, pipe_config, old_state);
  
- /* convert from kHz to .1 fixpoint MHz with -1MHz offset */
- static int skl_cdclk_decimal(int cdclk)
- {
-       return DIV_ROUND_CLOSEST(cdclk - 1000, 500);
- }
+       i9xx_pfit_enable(intel_crtc);
  
- static int bxt_de_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
- {
-       int ratio;
+       intel_color_load_luts(&pipe_config->base);
  
-       if (cdclk == dev_priv->cdclk_pll.ref)
-               return 0;
+       dev_priv->display.initial_watermarks(old_intel_state,
+                                            pipe_config);
+       intel_enable_pipe(intel_crtc);
  
-       switch (cdclk) {
-       default:
-               MISSING_CASE(cdclk);
-       case 144000:
-       case 288000:
-       case 384000:
-       case 576000:
-               ratio = 60;
-               break;
-       case 624000:
-               ratio = 65;
-               break;
-       }
+       assert_vblank_disabled(crtc);
+       drm_crtc_vblank_on(crtc);
  
-       return dev_priv->cdclk_pll.ref * ratio;
+       intel_encoders_enable(crtc, pipe_config, old_state);
  }
  
- static int glk_de_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
+ static void i9xx_set_pll_dividers(struct intel_crtc *crtc)
  {
-       int ratio;
-       if (cdclk == dev_priv->cdclk_pll.ref)
-               return 0;
-       switch (cdclk) {
-       default:
-               MISSING_CASE(cdclk);
-       case  79200:
-       case 158400:
-       case 316800:
-               ratio = 33;
-               break;
-       }
+       struct drm_device *dev = crtc->base.dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
  
-       return dev_priv->cdclk_pll.ref * ratio;
+       I915_WRITE(FP0(crtc->pipe), crtc->config->dpll_hw_state.fp0);
+       I915_WRITE(FP1(crtc->pipe), crtc->config->dpll_hw_state.fp1);
  }
  
- static void bxt_de_pll_disable(struct drm_i915_private *dev_priv)
+ static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
+                            struct drm_atomic_state *old_state)
  {
-       I915_WRITE(BXT_DE_PLL_ENABLE, 0);
+       struct drm_crtc *crtc = pipe_config->base.crtc;
+       struct drm_device *dev = crtc->dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       enum pipe pipe = intel_crtc->pipe;
  
-       /* Timeout 200us */
-       if (intel_wait_for_register(dev_priv,
-                                   BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 0,
-                                   1))
-               DRM_ERROR("timeout waiting for DE PLL unlock\n");
+       if (WARN_ON(intel_crtc->active))
+               return;
  
-       dev_priv->cdclk_pll.vco = 0;
- }
+       i9xx_set_pll_dividers(intel_crtc);
  
- static void bxt_de_pll_enable(struct drm_i915_private *dev_priv, int vco)
- {
-       int ratio = DIV_ROUND_CLOSEST(vco, dev_priv->cdclk_pll.ref);
-       u32 val;
+       if (intel_crtc_has_dp_encoder(intel_crtc->config))
+               intel_dp_set_m_n(intel_crtc, M1_N1);
  
-       val = I915_READ(BXT_DE_PLL_CTL);
-       val &= ~BXT_DE_PLL_RATIO_MASK;
-       val |= BXT_DE_PLL_RATIO(ratio);
-       I915_WRITE(BXT_DE_PLL_CTL, val);
+       intel_set_pipe_timings(intel_crtc);
+       intel_set_pipe_src_size(intel_crtc);
  
-       I915_WRITE(BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE);
+       i9xx_set_pipeconf(intel_crtc);
  
-       /* Timeout 200us */
-       if (intel_wait_for_register(dev_priv,
-                                   BXT_DE_PLL_ENABLE,
-                                   BXT_DE_PLL_LOCK,
-                                   BXT_DE_PLL_LOCK,
-                                   1))
-               DRM_ERROR("timeout waiting for DE PLL lock\n");
+       intel_crtc->active = true;
  
-       dev_priv->cdclk_pll.vco = vco;
- }
+       if (!IS_GEN2(dev_priv))
+               intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
  
- static void bxt_set_cdclk(struct drm_i915_private *dev_priv, int cdclk)
- {
-       u32 val, divider;
-       int vco, ret;
+       intel_encoders_pre_enable(crtc, pipe_config, old_state);
  
-       if (IS_GEMINILAKE(dev_priv))
-               vco = glk_de_pll_vco(dev_priv, cdclk);
-       else
-               vco = bxt_de_pll_vco(dev_priv, cdclk);
+       i9xx_enable_pll(intel_crtc);
  
-       DRM_DEBUG_DRIVER("Changing CDCLK to %d kHz (VCO %d kHz)\n", cdclk, vco);
+       i9xx_pfit_enable(intel_crtc);
  
-       /* cdclk = vco / 2 / div{1,1.5,2,4} */
-       switch (DIV_ROUND_CLOSEST(vco, cdclk)) {
-       case 8:
-               divider = BXT_CDCLK_CD2X_DIV_SEL_4;
-               break;
-       case 4:
-               divider = BXT_CDCLK_CD2X_DIV_SEL_2;
-               break;
-       case 3:
-               WARN(IS_GEMINILAKE(dev_priv), "Unsupported divider\n");
-               divider = BXT_CDCLK_CD2X_DIV_SEL_1_5;
-               break;
-       case 2:
-               divider = BXT_CDCLK_CD2X_DIV_SEL_1;
-               break;
-       default:
-               WARN_ON(cdclk != dev_priv->cdclk_pll.ref);
-               WARN_ON(vco != 0);
+       intel_color_load_luts(&pipe_config->base);
  
-               divider = BXT_CDCLK_CD2X_DIV_SEL_1;
-               break;
-       }
+       intel_update_watermarks(intel_crtc);
+       intel_enable_pipe(intel_crtc);
  
-       /* Inform power controller of upcoming frequency change */
-       mutex_lock(&dev_priv->rps.hw_lock);
-       ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
-                                     0x80000000);
-       mutex_unlock(&dev_priv->rps.hw_lock);
+       assert_vblank_disabled(crtc);
+       drm_crtc_vblank_on(crtc);
  
-       if (ret) {
-               DRM_ERROR("PCode CDCLK freq change notify failed (err %d, freq %d)\n",
-                         ret, cdclk);
-               return;
-       }
+       intel_encoders_enable(crtc, pipe_config, old_state);
+ }
  
-       if (dev_priv->cdclk_pll.vco != 0 &&
-           dev_priv->cdclk_pll.vco != vco)
-               bxt_de_pll_disable(dev_priv);
-       if (dev_priv->cdclk_pll.vco != vco)
-               bxt_de_pll_enable(dev_priv, vco);
-       val = divider | skl_cdclk_decimal(cdclk);
-       /*
-        * FIXME if only the cd2x divider needs changing, it could be done
-        * without shutting off the pipe (if only one pipe is active).
-        */
-       val |= BXT_CDCLK_CD2X_PIPE_NONE;
-       /*
-        * Disable SSA Precharge when CD clock frequency < 500 MHz,
-        * enable otherwise.
-        */
-       if (cdclk >= 500000)
-               val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
-       I915_WRITE(CDCLK_CTL, val);
-       mutex_lock(&dev_priv->rps.hw_lock);
-       ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
-                                     DIV_ROUND_UP(cdclk, 25000));
-       mutex_unlock(&dev_priv->rps.hw_lock);
-       if (ret) {
-               DRM_ERROR("PCode CDCLK freq set failed, (err %d, freq %d)\n",
-                         ret, cdclk);
-               return;
-       }
-       intel_update_cdclk(dev_priv);
- }
- static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv)
- {
-       u32 cdctl, expected;
-       intel_update_cdclk(dev_priv);
-       if (dev_priv->cdclk_pll.vco == 0 ||
-           dev_priv->cdclk_freq == dev_priv->cdclk_pll.ref)
-               goto sanitize;
-       /* DPLL okay; verify the cdclock
-        *
-        * Some BIOS versions leave an incorrect decimal frequency value and
-        * set reserved MBZ bits in CDCLK_CTL at least during exiting from S4,
-        * so sanitize this register.
-        */
-       cdctl = I915_READ(CDCLK_CTL);
-       /*
-        * Let's ignore the pipe field, since BIOS could have configured the
-        * dividers both synching to an active pipe, or asynchronously
-        * (PIPE_NONE).
-        */
-       cdctl &= ~BXT_CDCLK_CD2X_PIPE_NONE;
-       expected = (cdctl & BXT_CDCLK_CD2X_DIV_SEL_MASK) |
-                  skl_cdclk_decimal(dev_priv->cdclk_freq);
-       /*
-        * Disable SSA Precharge when CD clock frequency < 500 MHz,
-        * enable otherwise.
-        */
-       if (dev_priv->cdclk_freq >= 500000)
-               expected |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
-       if (cdctl == expected)
-               /* All well; nothing to sanitize */
-               return;
- sanitize:
-       DRM_DEBUG_KMS("Sanitizing cdclk programmed by pre-os\n");
-       /* force cdclk programming */
-       dev_priv->cdclk_freq = 0;
-       /* force full PLL disable + enable */
-       dev_priv->cdclk_pll.vco = -1;
- }
- void bxt_init_cdclk(struct drm_i915_private *dev_priv)
- {
-       int cdclk;
-       bxt_sanitize_cdclk(dev_priv);
-       if (dev_priv->cdclk_freq != 0 && dev_priv->cdclk_pll.vco != 0)
-               return;
-       /*
-        * FIXME:
-        * - The initial CDCLK needs to be read from VBT.
-        *   Need to make this change after VBT has changes for BXT.
-        */
-       if (IS_GEMINILAKE(dev_priv))
-               cdclk = glk_calc_cdclk(0);
-       else
-               cdclk = bxt_calc_cdclk(0);
-       bxt_set_cdclk(dev_priv, cdclk);
- }
- void bxt_uninit_cdclk(struct drm_i915_private *dev_priv)
- {
-       bxt_set_cdclk(dev_priv, dev_priv->cdclk_pll.ref);
- }
- static int skl_calc_cdclk(int max_pixclk, int vco)
- {
-       if (vco == 8640000) {
-               if (max_pixclk > 540000)
-                       return 617143;
-               else if (max_pixclk > 432000)
-                       return 540000;
-               else if (max_pixclk > 308571)
-                       return 432000;
-               else
-                       return 308571;
-       } else {
-               if (max_pixclk > 540000)
-                       return 675000;
-               else if (max_pixclk > 450000)
-                       return 540000;
-               else if (max_pixclk > 337500)
-                       return 450000;
-               else
-                       return 337500;
-       }
- }
- static void
- skl_dpll0_update(struct drm_i915_private *dev_priv)
- {
-       u32 val;
-       dev_priv->cdclk_pll.ref = 24000;
-       dev_priv->cdclk_pll.vco = 0;
-       val = I915_READ(LCPLL1_CTL);
-       if ((val & LCPLL_PLL_ENABLE) == 0)
-               return;
-       if (WARN_ON((val & LCPLL_PLL_LOCK) == 0))
-               return;
-       val = I915_READ(DPLL_CTRL1);
-       if (WARN_ON((val & (DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) |
-                           DPLL_CTRL1_SSC(SKL_DPLL0) |
-                           DPLL_CTRL1_OVERRIDE(SKL_DPLL0))) !=
-                   DPLL_CTRL1_OVERRIDE(SKL_DPLL0)))
-               return;
-       switch (val & DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0)) {
-       case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, SKL_DPLL0):
-       case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, SKL_DPLL0):
-       case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, SKL_DPLL0):
-       case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, SKL_DPLL0):
-               dev_priv->cdclk_pll.vco = 8100000;
-               break;
-       case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, SKL_DPLL0):
-       case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, SKL_DPLL0):
-               dev_priv->cdclk_pll.vco = 8640000;
-               break;
-       default:
-               MISSING_CASE(val & DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0));
-               break;
-       }
- }
- void skl_set_preferred_cdclk_vco(struct drm_i915_private *dev_priv, int vco)
- {
-       bool changed = dev_priv->skl_preferred_vco_freq != vco;
-       dev_priv->skl_preferred_vco_freq = vco;
-       if (changed)
-               intel_update_max_cdclk(dev_priv);
- }
- static void
- skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco)
- {
-       int min_cdclk = skl_calc_cdclk(0, vco);
-       u32 val;
-       WARN_ON(vco != 8100000 && vco != 8640000);
-       /* select the minimum CDCLK before enabling DPLL 0 */
-       val = CDCLK_FREQ_337_308 | skl_cdclk_decimal(min_cdclk);
-       I915_WRITE(CDCLK_CTL, val);
-       POSTING_READ(CDCLK_CTL);
-       /*
-        * We always enable DPLL0 with the lowest link rate possible, but still
-        * taking into account the VCO required to operate the eDP panel at the
-        * desired frequency. The usual DP link rates operate with a VCO of
-        * 8100 while the eDP 1.4 alternate link rates need a VCO of 8640.
-        * The modeset code is responsible for the selection of the exact link
-        * rate later on, with the constraint of choosing a frequency that
-        * works with vco.
-        */
-       val = I915_READ(DPLL_CTRL1);
-       val &= ~(DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) | DPLL_CTRL1_SSC(SKL_DPLL0) |
-                DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0));
-       val |= DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
-       if (vco == 8640000)
-               val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
-                                           SKL_DPLL0);
-       else
-               val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
-                                           SKL_DPLL0);
-       I915_WRITE(DPLL_CTRL1, val);
-       POSTING_READ(DPLL_CTRL1);
-       I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) | LCPLL_PLL_ENABLE);
-       if (intel_wait_for_register(dev_priv,
-                                   LCPLL1_CTL, LCPLL_PLL_LOCK, LCPLL_PLL_LOCK,
-                                   5))
-               DRM_ERROR("DPLL0 not locked\n");
-       dev_priv->cdclk_pll.vco = vco;
-       /* We'll want to keep using the current vco from now on. */
-       skl_set_preferred_cdclk_vco(dev_priv, vco);
- }
- static void
- skl_dpll0_disable(struct drm_i915_private *dev_priv)
- {
-       I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) & ~LCPLL_PLL_ENABLE);
-       if (intel_wait_for_register(dev_priv,
-                                  LCPLL1_CTL, LCPLL_PLL_LOCK, 0,
-                                  1))
-               DRM_ERROR("Couldn't disable DPLL0\n");
-       dev_priv->cdclk_pll.vco = 0;
- }
- static void skl_set_cdclk(struct drm_i915_private *dev_priv, int cdclk, int vco)
- {
-       u32 freq_select, pcu_ack;
-       int ret;
-       WARN_ON((cdclk == 24000) != (vco == 0));
-       DRM_DEBUG_DRIVER("Changing CDCLK to %d kHz (VCO %d kHz)\n", cdclk, vco);
-       mutex_lock(&dev_priv->rps.hw_lock);
-       ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL,
-                               SKL_CDCLK_PREPARE_FOR_CHANGE,
-                               SKL_CDCLK_READY_FOR_CHANGE,
-                               SKL_CDCLK_READY_FOR_CHANGE, 3);
-       mutex_unlock(&dev_priv->rps.hw_lock);
-       if (ret) {
-               DRM_ERROR("Failed to inform PCU about cdclk change (%d)\n",
-                         ret);
-               return;
-       }
-       /* set CDCLK_CTL */
-       switch (cdclk) {
-       case 450000:
-       case 432000:
-               freq_select = CDCLK_FREQ_450_432;
-               pcu_ack = 1;
-               break;
-       case 540000:
-               freq_select = CDCLK_FREQ_540;
-               pcu_ack = 2;
-               break;
-       case 308571:
-       case 337500:
-       default:
-               freq_select = CDCLK_FREQ_337_308;
-               pcu_ack = 0;
-               break;
-       case 617143:
-       case 675000:
-               freq_select = CDCLK_FREQ_675_617;
-               pcu_ack = 3;
-               break;
-       }
-       if (dev_priv->cdclk_pll.vco != 0 &&
-           dev_priv->cdclk_pll.vco != vco)
-               skl_dpll0_disable(dev_priv);
-       if (dev_priv->cdclk_pll.vco != vco)
-               skl_dpll0_enable(dev_priv, vco);
-       I915_WRITE(CDCLK_CTL, freq_select | skl_cdclk_decimal(cdclk));
-       POSTING_READ(CDCLK_CTL);
-       /* inform PCU of the change */
-       mutex_lock(&dev_priv->rps.hw_lock);
-       sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, pcu_ack);
-       mutex_unlock(&dev_priv->rps.hw_lock);
-       intel_update_cdclk(dev_priv);
- }
- static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv);
- void skl_uninit_cdclk(struct drm_i915_private *dev_priv)
- {
-       skl_set_cdclk(dev_priv, dev_priv->cdclk_pll.ref, 0);
- }
- void skl_init_cdclk(struct drm_i915_private *dev_priv)
- {
-       int cdclk, vco;
-       skl_sanitize_cdclk(dev_priv);
-       if (dev_priv->cdclk_freq != 0 && dev_priv->cdclk_pll.vco != 0) {
-               /*
-                * Use the current vco as our initial
-                * guess as to what the preferred vco is.
-                */
-               if (dev_priv->skl_preferred_vco_freq == 0)
-                       skl_set_preferred_cdclk_vco(dev_priv,
-                                                   dev_priv->cdclk_pll.vco);
-               return;
-       }
-       vco = dev_priv->skl_preferred_vco_freq;
-       if (vco == 0)
-               vco = 8100000;
-       cdclk = skl_calc_cdclk(0, vco);
-       skl_set_cdclk(dev_priv, cdclk, vco);
- }
- static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
- {
-       uint32_t cdctl, expected;
-       /*
-        * check if the pre-os intialized the display
-        * There is SWF18 scratchpad register defined which is set by the
-        * pre-os which can be used by the OS drivers to check the status
-        */
-       if ((I915_READ(SWF_ILK(0x18)) & 0x00FFFFFF) == 0)
-               goto sanitize;
-       intel_update_cdclk(dev_priv);
-       /* Is PLL enabled and locked ? */
-       if (dev_priv->cdclk_pll.vco == 0 ||
-           dev_priv->cdclk_freq == dev_priv->cdclk_pll.ref)
-               goto sanitize;
-       /* DPLL okay; verify the cdclock
-        *
-        * Noticed in some instances that the freq selection is correct but
-        * decimal part is programmed wrong from BIOS where pre-os does not
-        * enable display. Verify the same as well.
-        */
-       cdctl = I915_READ(CDCLK_CTL);
-       expected = (cdctl & CDCLK_FREQ_SEL_MASK) |
-               skl_cdclk_decimal(dev_priv->cdclk_freq);
-       if (cdctl == expected)
-               /* All well; nothing to sanitize */
-               return;
- sanitize:
-       DRM_DEBUG_KMS("Sanitizing cdclk programmed by pre-os\n");
-       /* force cdclk programming */
-       dev_priv->cdclk_freq = 0;
-       /* force full PLL disable + enable */
-       dev_priv->cdclk_pll.vco = -1;
- }
- /* Adjust CDclk dividers to allow high res or save power if possible */
- static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
- {
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       u32 val, cmd;
-       WARN_ON(dev_priv->display.get_display_clock_speed(dev_priv)
-                                       != dev_priv->cdclk_freq);
-       if (cdclk >= 320000) /* jump to highest voltage for 400MHz too */
-               cmd = 2;
-       else if (cdclk == 266667)
-               cmd = 1;
-       else
-               cmd = 0;
-       mutex_lock(&dev_priv->rps.hw_lock);
-       val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
-       val &= ~DSPFREQGUAR_MASK;
-       val |= (cmd << DSPFREQGUAR_SHIFT);
-       vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
-       if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
-                     DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT),
-                    50)) {
-               DRM_ERROR("timed out waiting for CDclk change\n");
-       }
-       mutex_unlock(&dev_priv->rps.hw_lock);
-       mutex_lock(&dev_priv->sb_lock);
-       if (cdclk == 400000) {
-               u32 divider;
-               divider = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1;
-               /* adjust cdclk divider */
-               val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
-               val &= ~CCK_FREQUENCY_VALUES;
-               val |= divider;
-               vlv_cck_write(dev_priv, CCK_DISPLAY_CLOCK_CONTROL, val);
-               if (wait_for((vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL) &
-                             CCK_FREQUENCY_STATUS) == (divider << CCK_FREQUENCY_STATUS_SHIFT),
-                            50))
-                       DRM_ERROR("timed out waiting for CDclk change\n");
-       }
-       /* adjust self-refresh exit latency value */
-       val = vlv_bunit_read(dev_priv, BUNIT_REG_BISOC);
-       val &= ~0x7f;
-       /*
-        * For high bandwidth configs, we set a higher latency in the bunit
-        * so that the core display fetch happens in time to avoid underruns.
-        */
-       if (cdclk == 400000)
-               val |= 4500 / 250; /* 4.5 usec */
-       else
-               val |= 3000 / 250; /* 3.0 usec */
-       vlv_bunit_write(dev_priv, BUNIT_REG_BISOC, val);
-       mutex_unlock(&dev_priv->sb_lock);
-       intel_update_cdclk(dev_priv);
- }
- static void cherryview_set_cdclk(struct drm_device *dev, int cdclk)
- {
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       u32 val, cmd;
-       WARN_ON(dev_priv->display.get_display_clock_speed(dev_priv)
-                                               != dev_priv->cdclk_freq);
-       switch (cdclk) {
-       case 333333:
-       case 320000:
-       case 266667:
-       case 200000:
-               break;
-       default:
-               MISSING_CASE(cdclk);
-               return;
-       }
-       /*
-        * Specs are full of misinformation, but testing on actual
-        * hardware has shown that we just need to write the desired
-        * CCK divider into the Punit register.
-        */
-       cmd = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1;
-       mutex_lock(&dev_priv->rps.hw_lock);
-       val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
-       val &= ~DSPFREQGUAR_MASK_CHV;
-       val |= (cmd << DSPFREQGUAR_SHIFT_CHV);
-       vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
-       if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
-                     DSPFREQSTAT_MASK_CHV) == (cmd << DSPFREQSTAT_SHIFT_CHV),
-                    50)) {
-               DRM_ERROR("timed out waiting for CDclk change\n");
-       }
-       mutex_unlock(&dev_priv->rps.hw_lock);
-       intel_update_cdclk(dev_priv);
- }
- static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
-                                int max_pixclk)
- {
-       int freq_320 = (dev_priv->hpll_freq <<  1) % 320000 != 0 ? 333333 : 320000;
-       int limit = IS_CHERRYVIEW(dev_priv) ? 95 : 90;
-       /*
-        * Really only a few cases to deal with, as only 4 CDclks are supported:
-        *   200MHz
-        *   267MHz
-        *   320/333MHz (depends on HPLL freq)
-        *   400MHz (VLV only)
-        * So we check to see whether we're above 90% (VLV) or 95% (CHV)
-        * of the lower bin and adjust if needed.
-        *
-        * We seem to get an unstable or solid color picture at 200MHz.
-        * Not sure what's wrong. For now use 200MHz only when all pipes
-        * are off.
-        */
-       if (!IS_CHERRYVIEW(dev_priv) &&
-           max_pixclk > freq_320*limit/100)
-               return 400000;
-       else if (max_pixclk > 266667*limit/100)
-               return freq_320;
-       else if (max_pixclk > 0)
-               return 266667;
-       else
-               return 200000;
- }
- static int glk_calc_cdclk(int max_pixclk)
- {
-       if (max_pixclk > 2 * 158400)
-               return 316800;
-       else if (max_pixclk > 2 * 79200)
-               return 158400;
-       else
-               return 79200;
- }
- static int bxt_calc_cdclk(int max_pixclk)
- {
-       if (max_pixclk > 576000)
-               return 624000;
-       else if (max_pixclk > 384000)
-               return 576000;
-       else if (max_pixclk > 288000)
-               return 384000;
-       else if (max_pixclk > 144000)
-               return 288000;
-       else
-               return 144000;
- }
- /* Compute the max pixel clock for new configuration. */
- static int intel_mode_max_pixclk(struct drm_device *dev,
-                                struct drm_atomic_state *state)
- {
-       struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct drm_crtc *crtc;
-       struct drm_crtc_state *crtc_state;
-       unsigned max_pixclk = 0, i;
-       enum pipe pipe;
-       memcpy(intel_state->min_pixclk, dev_priv->min_pixclk,
-              sizeof(intel_state->min_pixclk));
-       for_each_crtc_in_state(state, crtc, crtc_state, i) {
-               int pixclk = 0;
-               if (crtc_state->enable)
-                       pixclk = crtc_state->adjusted_mode.crtc_clock;
-               intel_state->min_pixclk[i] = pixclk;
-       }
-       for_each_pipe(dev_priv, pipe)
-               max_pixclk = max(intel_state->min_pixclk[pipe], max_pixclk);
-       return max_pixclk;
- }
- static int valleyview_modeset_calc_cdclk(struct drm_atomic_state *state)
- {
-       struct drm_device *dev = state->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       int max_pixclk = intel_mode_max_pixclk(dev, state);
-       struct intel_atomic_state *intel_state =
-               to_intel_atomic_state(state);
-       intel_state->cdclk = intel_state->dev_cdclk =
-               valleyview_calc_cdclk(dev_priv, max_pixclk);
-       if (!intel_state->active_crtcs)
-               intel_state->dev_cdclk = valleyview_calc_cdclk(dev_priv, 0);
-       return 0;
- }
- static int bxt_modeset_calc_cdclk(struct drm_atomic_state *state)
- {
-       struct drm_i915_private *dev_priv = to_i915(state->dev);
-       int max_pixclk = ilk_max_pixel_rate(state);
-       struct intel_atomic_state *intel_state =
-               to_intel_atomic_state(state);
-       int cdclk;
-       if (IS_GEMINILAKE(dev_priv))
-               cdclk = glk_calc_cdclk(max_pixclk);
-       else
-               cdclk = bxt_calc_cdclk(max_pixclk);
-       intel_state->cdclk = intel_state->dev_cdclk = cdclk;
-       if (!intel_state->active_crtcs) {
-               if (IS_GEMINILAKE(dev_priv))
-                       cdclk = glk_calc_cdclk(0);
-               else
-                       cdclk = bxt_calc_cdclk(0);
-               intel_state->dev_cdclk = cdclk;
-       }
-       return 0;
- }
- static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv)
- {
-       unsigned int credits, default_credits;
-       if (IS_CHERRYVIEW(dev_priv))
-               default_credits = PFI_CREDIT(12);
-       else
-               default_credits = PFI_CREDIT(8);
-       if (dev_priv->cdclk_freq >= dev_priv->czclk_freq) {
-               /* CHV suggested value is 31 or 63 */
-               if (IS_CHERRYVIEW(dev_priv))
-                       credits = PFI_CREDIT_63;
-               else
-                       credits = PFI_CREDIT(15);
-       } else {
-               credits = default_credits;
-       }
-       /*
-        * WA - write default credits before re-programming
-        * FIXME: should we also set the resend bit here?
-        */
-       I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE |
-                  default_credits);
-       I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE |
-                  credits | PFI_CREDIT_RESEND);
-       /*
-        * FIXME is this guaranteed to clear
-        * immediately or should we poll for it?
-        */
-       WARN_ON(I915_READ(GCI_CONTROL) & PFI_CREDIT_RESEND);
- }
- static void valleyview_modeset_commit_cdclk(struct drm_atomic_state *old_state)
- {
-       struct drm_device *dev = old_state->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct intel_atomic_state *old_intel_state =
-               to_intel_atomic_state(old_state);
-       unsigned req_cdclk = old_intel_state->dev_cdclk;
-       /*
-        * FIXME: We can end up here with all power domains off, yet
-        * with a CDCLK frequency other than the minimum. To account
-        * for this take the PIPE-A power domain, which covers the HW
-        * blocks needed for the following programming. This can be
-        * removed once it's guaranteed that we get here either with
-        * the minimum CDCLK set, or the required power domains
-        * enabled.
-        */
-       intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
-       if (IS_CHERRYVIEW(dev_priv))
-               cherryview_set_cdclk(dev, req_cdclk);
-       else
-               valleyview_set_cdclk(dev, req_cdclk);
-       vlv_program_pfi_credits(dev_priv);
-       intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A);
- }
- static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
-                                  struct drm_atomic_state *old_state)
- {
-       struct drm_crtc *crtc = pipe_config->base.crtc;
-       struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       int pipe = intel_crtc->pipe;
-       if (WARN_ON(intel_crtc->active))
-               return;
-       if (intel_crtc_has_dp_encoder(intel_crtc->config))
-               intel_dp_set_m_n(intel_crtc, M1_N1);
-       intel_set_pipe_timings(intel_crtc);
-       intel_set_pipe_src_size(intel_crtc);
-       if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
-               struct drm_i915_private *dev_priv = to_i915(dev);
-               I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
-               I915_WRITE(CHV_CANVAS(pipe), 0);
-       }
-       i9xx_set_pipeconf(intel_crtc);
-       intel_crtc->active = true;
-       intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
-       intel_encoders_pre_pll_enable(crtc, pipe_config, old_state);
-       if (IS_CHERRYVIEW(dev_priv)) {
-               chv_prepare_pll(intel_crtc, intel_crtc->config);
-               chv_enable_pll(intel_crtc, intel_crtc->config);
-       } else {
-               vlv_prepare_pll(intel_crtc, intel_crtc->config);
-               vlv_enable_pll(intel_crtc, intel_crtc->config);
-       }
-       intel_encoders_pre_enable(crtc, pipe_config, old_state);
-       i9xx_pfit_enable(intel_crtc);
-       intel_color_load_luts(&pipe_config->base);
-       intel_update_watermarks(intel_crtc);
-       intel_enable_pipe(intel_crtc);
-       assert_vblank_disabled(crtc);
-       drm_crtc_vblank_on(crtc);
-       intel_encoders_enable(crtc, pipe_config, old_state);
- }
- static void i9xx_set_pll_dividers(struct intel_crtc *crtc)
- {
-       struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       I915_WRITE(FP0(crtc->pipe), crtc->config->dpll_hw_state.fp0);
-       I915_WRITE(FP1(crtc->pipe), crtc->config->dpll_hw_state.fp1);
- }
- static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
-                            struct drm_atomic_state *old_state)
- {
-       struct drm_crtc *crtc = pipe_config->base.crtc;
-       struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       enum pipe pipe = intel_crtc->pipe;
-       if (WARN_ON(intel_crtc->active))
-               return;
-       i9xx_set_pll_dividers(intel_crtc);
-       if (intel_crtc_has_dp_encoder(intel_crtc->config))
-               intel_dp_set_m_n(intel_crtc, M1_N1);
-       intel_set_pipe_timings(intel_crtc);
-       intel_set_pipe_src_size(intel_crtc);
-       i9xx_set_pipeconf(intel_crtc);
-       intel_crtc->active = true;
-       if (!IS_GEN2(dev_priv))
-               intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
-       intel_encoders_pre_enable(crtc, pipe_config, old_state);
-       i9xx_enable_pll(intel_crtc);
-       i9xx_pfit_enable(intel_crtc);
-       intel_color_load_luts(&pipe_config->base);
-       intel_update_watermarks(intel_crtc);
-       intel_enable_pipe(intel_crtc);
-       assert_vblank_disabled(crtc);
-       drm_crtc_vblank_on(crtc);
-       intel_encoders_enable(crtc, pipe_config, old_state);
- }
- static void i9xx_pfit_disable(struct intel_crtc *crtc)
- {
-       struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+ static void i9xx_pfit_disable(struct intel_crtc *crtc)
+ {
+       struct drm_device *dev = crtc->base.dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
  
        if (!crtc->config->gmch_pfit.control)
                return;
@@@ -6857,6 -5845,9 +5846,9 @@@ static void i9xx_crtc_disable(struct in
  
        if (!IS_GEN2(dev_priv))
                intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
+       if (!dev_priv->display.initial_watermarks)
+               intel_update_watermarks(intel_crtc);
  }
  
  static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct drm_i915_private *dev_priv = to_i915(crtc->dev);
        enum intel_display_power_domain domain;
-       unsigned long domains;
+       u64 domains;
        struct drm_atomic_state *state;
        struct intel_crtc_state *crtc_state;
        int ret;
@@@ -7173,7 -6164,7 +6165,7 @@@ static bool pipe_config_supports_ips(st
         *
         * Should measure whether using a lower cdclk w/o IPS
         */
-       return ilk_pipe_pixel_rate(pipe_config) <=
+       return pipe_config->pixel_rate <=
                dev_priv->max_cdclk_freq * 95 / 100;
  }
  
@@@ -7188,490 -6179,118 +6180,118 @@@ static void hsw_compute_ips_config(stru
                pipe_config_supports_ips(dev_priv, pipe_config);
  }
  
- static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
- {
-       const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       /* GDG double wide on either pipe, otherwise pipe A only */
-       return INTEL_INFO(dev_priv)->gen < 4 &&
-               (crtc->pipe == PIPE_A || IS_I915G(dev_priv));
- }
- static int intel_crtc_compute_config(struct intel_crtc *crtc,
-                                    struct intel_crtc_state *pipe_config)
- {
-       struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
-       int clock_limit = dev_priv->max_dotclk_freq;
-       if (INTEL_GEN(dev_priv) < 4) {
-               clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
-               /*
-                * Enable double wide mode when the dot clock
-                * is > 90% of the (display) core speed.
-                */
-               if (intel_crtc_supports_double_wide(crtc) &&
-                   adjusted_mode->crtc_clock > clock_limit) {
-                       clock_limit = dev_priv->max_dotclk_freq;
-                       pipe_config->double_wide = true;
-               }
-       }
-       if (adjusted_mode->crtc_clock > clock_limit) {
-               DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
-                             adjusted_mode->crtc_clock, clock_limit,
-                             yesno(pipe_config->double_wide));
-               return -EINVAL;
-       }
-       /*
-        * Pipe horizontal size must be even in:
-        * - DVO ganged mode
-        * - LVDS dual channel mode
-        * - Double wide pipe
-        */
-       if ((intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) &&
-            intel_is_dual_link_lvds(dev)) || pipe_config->double_wide)
-               pipe_config->pipe_src_w &= ~1;
-       /* Cantiga+ cannot handle modes with a hsync front porch of 0.
-        * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
-        */
-       if ((INTEL_GEN(dev_priv) > 4 || IS_G4X(dev_priv)) &&
-               adjusted_mode->crtc_hsync_start == adjusted_mode->crtc_hdisplay)
-               return -EINVAL;
-       if (HAS_IPS(dev_priv))
-               hsw_compute_ips_config(crtc, pipe_config);
-       if (pipe_config->has_pch_encoder)
-               return ironlake_fdi_compute_config(crtc, pipe_config);
-       return 0;
- }
- static int skylake_get_display_clock_speed(struct drm_i915_private *dev_priv)
- {
-       u32 cdctl;
-       skl_dpll0_update(dev_priv);
-       if (dev_priv->cdclk_pll.vco == 0)
-               return dev_priv->cdclk_pll.ref;
-       cdctl = I915_READ(CDCLK_CTL);
-       if (dev_priv->cdclk_pll.vco == 8640000) {
-               switch (cdctl & CDCLK_FREQ_SEL_MASK) {
-               case CDCLK_FREQ_450_432:
-                       return 432000;
-               case CDCLK_FREQ_337_308:
-                       return 308571;
-               case CDCLK_FREQ_540:
-                       return 540000;
-               case CDCLK_FREQ_675_617:
-                       return 617143;
-               default:
-                       MISSING_CASE(cdctl & CDCLK_FREQ_SEL_MASK);
-               }
-       } else {
-               switch (cdctl & CDCLK_FREQ_SEL_MASK) {
-               case CDCLK_FREQ_450_432:
-                       return 450000;
-               case CDCLK_FREQ_337_308:
-                       return 337500;
-               case CDCLK_FREQ_540:
-                       return 540000;
-               case CDCLK_FREQ_675_617:
-                       return 675000;
-               default:
-                       MISSING_CASE(cdctl & CDCLK_FREQ_SEL_MASK);
-               }
-       }
-       return dev_priv->cdclk_pll.ref;
- }
- static void bxt_de_pll_update(struct drm_i915_private *dev_priv)
- {
-       u32 val;
-       dev_priv->cdclk_pll.ref = 19200;
-       dev_priv->cdclk_pll.vco = 0;
-       val = I915_READ(BXT_DE_PLL_ENABLE);
-       if ((val & BXT_DE_PLL_PLL_ENABLE) == 0)
-               return;
-       if (WARN_ON((val & BXT_DE_PLL_LOCK) == 0))
-               return;
-       val = I915_READ(BXT_DE_PLL_CTL);
-       dev_priv->cdclk_pll.vco = (val & BXT_DE_PLL_RATIO_MASK) *
-               dev_priv->cdclk_pll.ref;
- }
- static int broxton_get_display_clock_speed(struct drm_i915_private *dev_priv)
- {
-       u32 divider;
-       int div, vco;
-       bxt_de_pll_update(dev_priv);
-       vco = dev_priv->cdclk_pll.vco;
-       if (vco == 0)
-               return dev_priv->cdclk_pll.ref;
-       divider = I915_READ(CDCLK_CTL) & BXT_CDCLK_CD2X_DIV_SEL_MASK;
-       switch (divider) {
-       case BXT_CDCLK_CD2X_DIV_SEL_1:
-               div = 2;
-               break;
-       case BXT_CDCLK_CD2X_DIV_SEL_1_5:
-               WARN(IS_GEMINILAKE(dev_priv), "Unsupported divider\n");
-               div = 3;
-               break;
-       case BXT_CDCLK_CD2X_DIV_SEL_2:
-               div = 4;
-               break;
-       case BXT_CDCLK_CD2X_DIV_SEL_4:
-               div = 8;
-               break;
-       default:
-               MISSING_CASE(divider);
-               return dev_priv->cdclk_pll.ref;
-       }
-       return DIV_ROUND_CLOSEST(vco, div);
- }
- static int broadwell_get_display_clock_speed(struct drm_i915_private *dev_priv)
- {
-       uint32_t lcpll = I915_READ(LCPLL_CTL);
-       uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
-       if (lcpll & LCPLL_CD_SOURCE_FCLK)
-               return 800000;
-       else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
-               return 450000;
-       else if (freq == LCPLL_CLK_FREQ_450)
-               return 450000;
-       else if (freq == LCPLL_CLK_FREQ_54O_BDW)
-               return 540000;
-       else if (freq == LCPLL_CLK_FREQ_337_5_BDW)
-               return 337500;
-       else
-               return 675000;
- }
- static int haswell_get_display_clock_speed(struct drm_i915_private *dev_priv)
- {
-       uint32_t lcpll = I915_READ(LCPLL_CTL);
-       uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
-       if (lcpll & LCPLL_CD_SOURCE_FCLK)
-               return 800000;
-       else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
-               return 450000;
-       else if (freq == LCPLL_CLK_FREQ_450)
-               return 450000;
-       else if (IS_HSW_ULT(dev_priv))
-               return 337500;
-       else
-               return 540000;
- }
- static int valleyview_get_display_clock_speed(struct drm_i915_private *dev_priv)
- {
-       return vlv_get_cck_clock_hpll(dev_priv, "cdclk",
-                                     CCK_DISPLAY_CLOCK_CONTROL);
- }
- static int ilk_get_display_clock_speed(struct drm_i915_private *dev_priv)
- {
-       return 450000;
- }
- static int i945_get_display_clock_speed(struct drm_i915_private *dev_priv)
- {
-       return 400000;
- }
- static int i915_get_display_clock_speed(struct drm_i915_private *dev_priv)
- {
-       return 333333;
- }
- static int i9xx_misc_get_display_clock_speed(struct drm_i915_private *dev_priv)
- {
-       return 200000;
- }
- static int pnv_get_display_clock_speed(struct drm_i915_private *dev_priv)
- {
-       struct pci_dev *pdev = dev_priv->drm.pdev;
-       u16 gcfgc = 0;
-       pci_read_config_word(pdev, GCFGC, &gcfgc);
-       switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
-       case GC_DISPLAY_CLOCK_267_MHZ_PNV:
-               return 266667;
-       case GC_DISPLAY_CLOCK_333_MHZ_PNV:
-               return 333333;
-       case GC_DISPLAY_CLOCK_444_MHZ_PNV:
-               return 444444;
-       case GC_DISPLAY_CLOCK_200_MHZ_PNV:
-               return 200000;
-       default:
-               DRM_ERROR("Unknown pnv display core clock 0x%04x\n", gcfgc);
-       case GC_DISPLAY_CLOCK_133_MHZ_PNV:
-               return 133333;
-       case GC_DISPLAY_CLOCK_167_MHZ_PNV:
-               return 166667;
-       }
- }
- static int i915gm_get_display_clock_speed(struct drm_i915_private *dev_priv)
- {
-       struct pci_dev *pdev = dev_priv->drm.pdev;
-       u16 gcfgc = 0;
-       pci_read_config_word(pdev, GCFGC, &gcfgc);
-       if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
-               return 133333;
-       else {
-               switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
-               case GC_DISPLAY_CLOCK_333_MHZ:
-                       return 333333;
-               default:
-               case GC_DISPLAY_CLOCK_190_200_MHZ:
-                       return 190000;
-               }
-       }
- }
- static int i865_get_display_clock_speed(struct drm_i915_private *dev_priv)
+ static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
  {
-       return 266667;
+       const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       /* GDG double wide on either pipe, otherwise pipe A only */
+       return INTEL_INFO(dev_priv)->gen < 4 &&
+               (crtc->pipe == PIPE_A || IS_I915G(dev_priv));
  }
  
- static int i85x_get_display_clock_speed(struct drm_i915_private *dev_priv)
+ static uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
  {
-       struct pci_dev *pdev = dev_priv->drm.pdev;
-       u16 hpllcc = 0;
+       uint32_t pixel_rate;
+       pixel_rate = pipe_config->base.adjusted_mode.crtc_clock;
  
        /*
-        * 852GM/852GMV only supports 133 MHz and the HPLLCC
-        * encoding is different :(
-        * FIXME is this the right way to detect 852GM/852GMV?
+        * We only use IF-ID interlacing. If we ever use
+        * PF-ID we'll need to adjust the pixel_rate here.
         */
-       if (pdev->revision == 0x1)
-               return 133333;
  
-       pci_bus_read_config_word(pdev->bus,
-                                PCI_DEVFN(0, 3), HPLLCC, &hpllcc);
+       if (pipe_config->pch_pfit.enabled) {
+               uint64_t pipe_w, pipe_h, pfit_w, pfit_h;
+               uint32_t pfit_size = pipe_config->pch_pfit.size;
  
-       /* Assume that the hardware is in the high speed state.  This
-        * should be the default.
-        */
-       switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
-       case GC_CLOCK_133_200:
-       case GC_CLOCK_133_200_2:
-       case GC_CLOCK_100_200:
-               return 200000;
-       case GC_CLOCK_166_250:
-               return 250000;
-       case GC_CLOCK_100_133:
-               return 133333;
-       case GC_CLOCK_133_266:
-       case GC_CLOCK_133_266_2:
-       case GC_CLOCK_166_266:
-               return 266667;
-       }
-       /* Shouldn't happen */
-       return 0;
- }
- static int i830_get_display_clock_speed(struct drm_i915_private *dev_priv)
- {
-       return 133333;
- }
+               pipe_w = pipe_config->pipe_src_w;
+               pipe_h = pipe_config->pipe_src_h;
  
- static unsigned int intel_hpll_vco(struct drm_i915_private *dev_priv)
- {
-       static const unsigned int blb_vco[8] = {
-               [0] = 3200000,
-               [1] = 4000000,
-               [2] = 5333333,
-               [3] = 4800000,
-               [4] = 6400000,
-       };
-       static const unsigned int pnv_vco[8] = {
-               [0] = 3200000,
-               [1] = 4000000,
-               [2] = 5333333,
-               [3] = 4800000,
-               [4] = 2666667,
-       };
-       static const unsigned int cl_vco[8] = {
-               [0] = 3200000,
-               [1] = 4000000,
-               [2] = 5333333,
-               [3] = 6400000,
-               [4] = 3333333,
-               [5] = 3566667,
-               [6] = 4266667,
-       };
-       static const unsigned int elk_vco[8] = {
-               [0] = 3200000,
-               [1] = 4000000,
-               [2] = 5333333,
-               [3] = 4800000,
-       };
-       static const unsigned int ctg_vco[8] = {
-               [0] = 3200000,
-               [1] = 4000000,
-               [2] = 5333333,
-               [3] = 6400000,
-               [4] = 2666667,
-               [5] = 4266667,
-       };
-       const unsigned int *vco_table;
-       unsigned int vco;
-       uint8_t tmp = 0;
-       /* FIXME other chipsets? */
-       if (IS_GM45(dev_priv))
-               vco_table = ctg_vco;
-       else if (IS_G4X(dev_priv))
-               vco_table = elk_vco;
-       else if (IS_I965GM(dev_priv))
-               vco_table = cl_vco;
-       else if (IS_PINEVIEW(dev_priv))
-               vco_table = pnv_vco;
-       else if (IS_G33(dev_priv))
-               vco_table = blb_vco;
-       else
-               return 0;
+               pfit_w = (pfit_size >> 16) & 0xFFFF;
+               pfit_h = pfit_size & 0xFFFF;
+               if (pipe_w < pfit_w)
+                       pipe_w = pfit_w;
+               if (pipe_h < pfit_h)
+                       pipe_h = pfit_h;
  
-       tmp = I915_READ(IS_MOBILE(dev_priv) ? HPLLVCO_MOBILE : HPLLVCO);
+               if (WARN_ON(!pfit_w || !pfit_h))
+                       return pixel_rate;
  
-       vco = vco_table[tmp & 0x7];
-       if (vco == 0)
-               DRM_ERROR("Bad HPLL VCO (HPLLVCO=0x%02x)\n", tmp);
-       else
-               DRM_DEBUG_KMS("HPLL VCO %u kHz\n", vco);
+               pixel_rate = div_u64((uint64_t) pixel_rate * pipe_w * pipe_h,
+                                    pfit_w * pfit_h);
+       }
  
-       return vco;
+       return pixel_rate;
  }
  
- static int gm45_get_display_clock_speed(struct drm_i915_private *dev_priv)
+ static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
  {
-       struct pci_dev *pdev = dev_priv->drm.pdev;
-       unsigned int cdclk_sel, vco = intel_hpll_vco(dev_priv);
-       uint16_t tmp = 0;
-       pci_read_config_word(pdev, GCFGC, &tmp);
-       cdclk_sel = (tmp >> 12) & 0x1;
+       struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
  
-       switch (vco) {
-       case 2666667:
-       case 4000000:
-       case 5333333:
-               return cdclk_sel ? 333333 : 222222;
-       case 3200000:
-               return cdclk_sel ? 320000 : 228571;
-       default:
-               DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u, CFGC=0x%04x\n", vco, tmp);
-               return 222222;
-       }
+       if (HAS_GMCH_DISPLAY(dev_priv))
+               /* FIXME calculate proper pipe pixel rate for GMCH pfit */
+               crtc_state->pixel_rate =
+                       crtc_state->base.adjusted_mode.crtc_clock;
+       else
+               crtc_state->pixel_rate =
+                       ilk_pipe_pixel_rate(crtc_state);
  }
  
- static int i965gm_get_display_clock_speed(struct drm_i915_private *dev_priv)
+ static int intel_crtc_compute_config(struct intel_crtc *crtc,
+                                    struct intel_crtc_state *pipe_config)
  {
-       struct pci_dev *pdev = dev_priv->drm.pdev;
-       static const uint8_t div_3200[] = { 16, 10,  8 };
-       static const uint8_t div_4000[] = { 20, 12, 10 };
-       static const uint8_t div_5333[] = { 24, 16, 14 };
-       const uint8_t *div_table;
-       unsigned int cdclk_sel, vco = intel_hpll_vco(dev_priv);
-       uint16_t tmp = 0;
-       pci_read_config_word(pdev, GCFGC, &tmp);
-       cdclk_sel = ((tmp >> 8) & 0x1f) - 1;
+       struct drm_device *dev = crtc->base.dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+       int clock_limit = dev_priv->max_dotclk_freq;
  
-       if (cdclk_sel >= ARRAY_SIZE(div_3200))
-               goto fail;
+       if (INTEL_GEN(dev_priv) < 4) {
+               clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
  
-       switch (vco) {
-       case 3200000:
-               div_table = div_3200;
-               break;
-       case 4000000:
-               div_table = div_4000;
-               break;
-       case 5333333:
-               div_table = div_5333;
-               break;
-       default:
-               goto fail;
+               /*
+                * Enable double wide mode when the dot clock
+                * is > 90% of the (display) core speed.
+                */
+               if (intel_crtc_supports_double_wide(crtc) &&
+                   adjusted_mode->crtc_clock > clock_limit) {
+                       clock_limit = dev_priv->max_dotclk_freq;
+                       pipe_config->double_wide = true;
+               }
        }
  
-       return DIV_ROUND_CLOSEST(vco, div_table[cdclk_sel]);
- fail:
-       DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%04x\n", vco, tmp);
-       return 200000;
- }
- static int g33_get_display_clock_speed(struct drm_i915_private *dev_priv)
- {
-       struct pci_dev *pdev = dev_priv->drm.pdev;
-       static const uint8_t div_3200[] = { 12, 10,  8,  7, 5, 16 };
-       static const uint8_t div_4000[] = { 14, 12, 10,  8, 6, 20 };
-       static const uint8_t div_4800[] = { 20, 14, 12, 10, 8, 24 };
-       static const uint8_t div_5333[] = { 20, 16, 12, 12, 8, 28 };
-       const uint8_t *div_table;
-       unsigned int cdclk_sel, vco = intel_hpll_vco(dev_priv);
-       uint16_t tmp = 0;
+       if (adjusted_mode->crtc_clock > clock_limit) {
+               DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
+                             adjusted_mode->crtc_clock, clock_limit,
+                             yesno(pipe_config->double_wide));
+               return -EINVAL;
+       }
  
-       pci_read_config_word(pdev, GCFGC, &tmp);
+       /*
+        * Pipe horizontal size must be even in:
+        * - DVO ganged mode
+        * - LVDS dual channel mode
+        * - Double wide pipe
+        */
+       if ((intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) &&
+            intel_is_dual_link_lvds(dev)) || pipe_config->double_wide)
+               pipe_config->pipe_src_w &= ~1;
  
-       cdclk_sel = (tmp >> 4) & 0x7;
+       /* Cantiga+ cannot handle modes with a hsync front porch of 0.
+        * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
+        */
+       if ((INTEL_GEN(dev_priv) > 4 || IS_G4X(dev_priv)) &&
+               adjusted_mode->crtc_hsync_start == adjusted_mode->crtc_hdisplay)
+               return -EINVAL;
  
-       if (cdclk_sel >= ARRAY_SIZE(div_3200))
-               goto fail;
+       intel_crtc_compute_pixel_rate(pipe_config);
  
-       switch (vco) {
-       case 3200000:
-               div_table = div_3200;
-               break;
-       case 4000000:
-               div_table = div_4000;
-               break;
-       case 4800000:
-               div_table = div_4800;
-               break;
-       case 5333333:
-               div_table = div_5333;
-               break;
-       default:
-               goto fail;
-       }
+       if (HAS_IPS(dev_priv))
+               hsw_compute_ips_config(crtc, pipe_config);
  
-       return DIV_ROUND_CLOSEST(vco, div_table[cdclk_sel]);
+       if (pipe_config->has_pch_encoder)
+               return ironlake_fdi_compute_config(crtc, pipe_config);
  
- fail:
-       DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%08x\n", vco, tmp);
-       return 190476;
+       return 0;
  }
  
  static void
@@@ -8768,7 -7387,8 +7388,8 @@@ i9xx_get_initial_plane_config(struct in
        val = I915_READ(DSPSTRIDE(pipe));
        fb->pitches[0] = val & 0xffffffc0;
  
-       aligned_height = intel_fb_align_height(dev, fb->height,
+       aligned_height = intel_fb_align_height(dev_priv,
+                                              fb->height,
                                               fb->format->format,
                                               fb->modifier);
  
@@@ -9809,7 -8429,8 +8430,8 @@@ skylake_get_initial_plane_config(struc
                                                fb->format->format);
        fb->pitches[0] = (val & 0x3ff) * stride_mult;
  
-       aligned_height = intel_fb_align_height(dev, fb->height,
+       aligned_height = intel_fb_align_height(dev_priv,
+                                              fb->height,
                                               fb->format->format,
                                               fb->modifier);
  
@@@ -9907,7 -8528,8 +8529,8 @@@ ironlake_get_initial_plane_config(struc
        val = I915_READ(DSPSTRIDE(pipe));
        fb->pitches[0] = val & 0xffffffc0;
  
-       aligned_height = intel_fb_align_height(dev, fb->height,
+       aligned_height = intel_fb_align_height(dev_priv,
+                                              fb->height,
                                               fb->format->format,
                                               fb->modifier);
  
@@@ -10166,312 -8788,73 +8789,73 @@@ static void hsw_restore_lcpll(struct dr
                                    5))
                DRM_ERROR("LCPLL not locked yet\n");
  
-       if (val & LCPLL_CD_SOURCE_FCLK) {
-               val = I915_READ(LCPLL_CTL);
-               val &= ~LCPLL_CD_SOURCE_FCLK;
-               I915_WRITE(LCPLL_CTL, val);
-               if (wait_for_us((I915_READ(LCPLL_CTL) &
-                                LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
-                       DRM_ERROR("Switching back to LCPLL failed\n");
-       }
-       intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
-       intel_update_cdclk(dev_priv);
- }
- /*
-  * Package states C8 and deeper are really deep PC states that can only be
-  * reached when all the devices on the system allow it, so even if the graphics
-  * device allows PC8+, it doesn't mean the system will actually get to these
-  * states. Our driver only allows PC8+ when going into runtime PM.
-  *
-  * The requirements for PC8+ are that all the outputs are disabled, the power
-  * well is disabled and most interrupts are disabled, and these are also
-  * requirements for runtime PM. When these conditions are met, we manually do
-  * the other conditions: disable the interrupts, clocks and switch LCPLL refclk
-  * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
-  * hang the machine.
-  *
-  * When we really reach PC8 or deeper states (not just when we allow it) we lose
-  * the state of some registers, so when we come back from PC8+ we need to
-  * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
-  * need to take care of the registers kept by RC6. Notice that this happens even
-  * if we don't put the device in PCI D3 state (which is what currently happens
-  * because of the runtime PM support).
-  *
-  * For more, read "Display Sequences for Package C8" on the hardware
-  * documentation.
-  */
- void hsw_enable_pc8(struct drm_i915_private *dev_priv)
- {
-       uint32_t val;
-       DRM_DEBUG_KMS("Enabling package C8+\n");
-       if (HAS_PCH_LPT_LP(dev_priv)) {
-               val = I915_READ(SOUTH_DSPCLK_GATE_D);
-               val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
-               I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
-       }
-       lpt_disable_clkout_dp(dev_priv);
-       hsw_disable_lcpll(dev_priv, true, true);
- }
- void hsw_disable_pc8(struct drm_i915_private *dev_priv)
- {
-       uint32_t val;
-       DRM_DEBUG_KMS("Disabling package C8+\n");
-       hsw_restore_lcpll(dev_priv);
-       lpt_init_pch_refclk(dev_priv);
-       if (HAS_PCH_LPT_LP(dev_priv)) {
-               val = I915_READ(SOUTH_DSPCLK_GATE_D);
-               val |= PCH_LP_PARTITION_LEVEL_DISABLE;
-               I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
-       }
- }
- static void bxt_modeset_commit_cdclk(struct drm_atomic_state *old_state)
- {
-       struct drm_device *dev = old_state->dev;
-       struct intel_atomic_state *old_intel_state =
-               to_intel_atomic_state(old_state);
-       unsigned int req_cdclk = old_intel_state->dev_cdclk;
-       bxt_set_cdclk(to_i915(dev), req_cdclk);
- }
- static int bdw_adjust_min_pipe_pixel_rate(struct intel_crtc_state *crtc_state,
-                                         int pixel_rate)
- {
-       struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
-       /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
-       if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled)
-               pixel_rate = DIV_ROUND_UP(pixel_rate * 100, 95);
-       /* BSpec says "Do not use DisplayPort with CDCLK less than
-        * 432 MHz, audio enabled, port width x4, and link rate
-        * HBR2 (5.4 GHz), or else there may be audio corruption or
-        * screen corruption."
-        */
-       if (intel_crtc_has_dp_encoder(crtc_state) &&
-           crtc_state->has_audio &&
-           crtc_state->port_clock >= 540000 &&
-           crtc_state->lane_count == 4)
-               pixel_rate = max(432000, pixel_rate);
-       return pixel_rate;
- }
- /* compute the max rate for new configuration */
- static int ilk_max_pixel_rate(struct drm_atomic_state *state)
- {
-       struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
-       struct drm_i915_private *dev_priv = to_i915(state->dev);
-       struct drm_crtc *crtc;
-       struct drm_crtc_state *cstate;
-       struct intel_crtc_state *crtc_state;
-       unsigned max_pixel_rate = 0, i;
-       enum pipe pipe;
-       memcpy(intel_state->min_pixclk, dev_priv->min_pixclk,
-              sizeof(intel_state->min_pixclk));
-       for_each_crtc_in_state(state, crtc, cstate, i) {
-               int pixel_rate;
-               crtc_state = to_intel_crtc_state(cstate);
-               if (!crtc_state->base.enable) {
-                       intel_state->min_pixclk[i] = 0;
-                       continue;
-               }
-               pixel_rate = ilk_pipe_pixel_rate(crtc_state);
-               if (IS_BROADWELL(dev_priv) || IS_GEN9(dev_priv))
-                       pixel_rate = bdw_adjust_min_pipe_pixel_rate(crtc_state,
-                                                                   pixel_rate);
-               intel_state->min_pixclk[i] = pixel_rate;
-       }
-       for_each_pipe(dev_priv, pipe)
-               max_pixel_rate = max(intel_state->min_pixclk[pipe], max_pixel_rate);
-       return max_pixel_rate;
- }
- static void broadwell_set_cdclk(struct drm_device *dev, int cdclk)
- {
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       uint32_t val, data;
-       int ret;
-       if (WARN((I915_READ(LCPLL_CTL) &
-                 (LCPLL_PLL_DISABLE | LCPLL_PLL_LOCK |
-                  LCPLL_CD_CLOCK_DISABLE | LCPLL_ROOT_CD_CLOCK_DISABLE |
-                  LCPLL_CD2X_CLOCK_DISABLE | LCPLL_POWER_DOWN_ALLOW |
-                  LCPLL_CD_SOURCE_FCLK)) != LCPLL_PLL_LOCK,
-                "trying to change cdclk frequency with cdclk not enabled\n"))
-               return;
-       mutex_lock(&dev_priv->rps.hw_lock);
-       ret = sandybridge_pcode_write(dev_priv,
-                                     BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ, 0x0);
-       mutex_unlock(&dev_priv->rps.hw_lock);
-       if (ret) {
-               DRM_ERROR("failed to inform pcode about cdclk change\n");
-               return;
-       }
-       val = I915_READ(LCPLL_CTL);
-       val |= LCPLL_CD_SOURCE_FCLK;
-       I915_WRITE(LCPLL_CTL, val);
-       if (wait_for_us(I915_READ(LCPLL_CTL) &
-                       LCPLL_CD_SOURCE_FCLK_DONE, 1))
-               DRM_ERROR("Switching to FCLK failed\n");
-       val = I915_READ(LCPLL_CTL);
-       val &= ~LCPLL_CLK_FREQ_MASK;
-       switch (cdclk) {
-       case 450000:
-               val |= LCPLL_CLK_FREQ_450;
-               data = 0;
-               break;
-       case 540000:
-               val |= LCPLL_CLK_FREQ_54O_BDW;
-               data = 1;
-               break;
-       case 337500:
-               val |= LCPLL_CLK_FREQ_337_5_BDW;
-               data = 2;
-               break;
-       case 675000:
-               val |= LCPLL_CLK_FREQ_675_BDW;
-               data = 3;
-               break;
-       default:
-               WARN(1, "invalid cdclk frequency\n");
-               return;
-       }
-       I915_WRITE(LCPLL_CTL, val);
-       val = I915_READ(LCPLL_CTL);
-       val &= ~LCPLL_CD_SOURCE_FCLK;
-       I915_WRITE(LCPLL_CTL, val);
-       if (wait_for_us((I915_READ(LCPLL_CTL) &
-                       LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
-               DRM_ERROR("Switching back to LCPLL failed\n");
-       mutex_lock(&dev_priv->rps.hw_lock);
-       sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, data);
-       mutex_unlock(&dev_priv->rps.hw_lock);
-       I915_WRITE(CDCLK_FREQ, DIV_ROUND_CLOSEST(cdclk, 1000) - 1);
-       intel_update_cdclk(dev_priv);
-       WARN(cdclk != dev_priv->cdclk_freq,
-            "cdclk requested %d kHz but got %d kHz\n",
-            cdclk, dev_priv->cdclk_freq);
- }
- static int broadwell_calc_cdclk(int max_pixclk)
- {
-       if (max_pixclk > 540000)
-               return 675000;
-       else if (max_pixclk > 450000)
-               return 540000;
-       else if (max_pixclk > 337500)
-               return 450000;
-       else
-               return 337500;
- }
- static int broadwell_modeset_calc_cdclk(struct drm_atomic_state *state)
- {
-       struct drm_i915_private *dev_priv = to_i915(state->dev);
-       struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
-       int max_pixclk = ilk_max_pixel_rate(state);
-       int cdclk;
-       /*
-        * FIXME should also account for plane ratio
-        * once 64bpp pixel formats are supported.
-        */
-       cdclk = broadwell_calc_cdclk(max_pixclk);
-       if (cdclk > dev_priv->max_cdclk_freq) {
-               DRM_DEBUG_KMS("requested cdclk (%d kHz) exceeds max (%d kHz)\n",
-                             cdclk, dev_priv->max_cdclk_freq);
-               return -EINVAL;
-       }
-       intel_state->cdclk = intel_state->dev_cdclk = cdclk;
-       if (!intel_state->active_crtcs)
-               intel_state->dev_cdclk = broadwell_calc_cdclk(0);
-       return 0;
- }
+       if (val & LCPLL_CD_SOURCE_FCLK) {
+               val = I915_READ(LCPLL_CTL);
+               val &= ~LCPLL_CD_SOURCE_FCLK;
+               I915_WRITE(LCPLL_CTL, val);
  
- static void broadwell_modeset_commit_cdclk(struct drm_atomic_state *old_state)
- {
-       struct drm_device *dev = old_state->dev;
-       struct intel_atomic_state *old_intel_state =
-               to_intel_atomic_state(old_state);
-       unsigned req_cdclk = old_intel_state->dev_cdclk;
+               if (wait_for_us((I915_READ(LCPLL_CTL) &
+                                LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
+                       DRM_ERROR("Switching back to LCPLL failed\n");
+       }
  
-       broadwell_set_cdclk(dev, req_cdclk);
+       intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+       intel_update_cdclk(dev_priv);
  }
  
- static int skl_modeset_calc_cdclk(struct drm_atomic_state *state)
+ /*
+  * Package states C8 and deeper are really deep PC states that can only be
+  * reached when all the devices on the system allow it, so even if the graphics
+  * device allows PC8+, it doesn't mean the system will actually get to these
+  * states. Our driver only allows PC8+ when going into runtime PM.
+  *
+  * The requirements for PC8+ are that all the outputs are disabled, the power
+  * well is disabled and most interrupts are disabled, and these are also
+  * requirements for runtime PM. When these conditions are met, we manually do
+  * the other conditions: disable the interrupts, clocks and switch LCPLL refclk
+  * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
+  * hang the machine.
+  *
+  * When we really reach PC8 or deeper states (not just when we allow it) we lose
+  * the state of some registers, so when we come back from PC8+ we need to
+  * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
+  * need to take care of the registers kept by RC6. Notice that this happens even
+  * if we don't put the device in PCI D3 state (which is what currently happens
+  * because of the runtime PM support).
+  *
+  * For more, read "Display Sequences for Package C8" on the hardware
+  * documentation.
+  */
+ void hsw_enable_pc8(struct drm_i915_private *dev_priv)
  {
-       struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
-       struct drm_i915_private *dev_priv = to_i915(state->dev);
-       const int max_pixclk = ilk_max_pixel_rate(state);
-       int vco = intel_state->cdclk_pll_vco;
-       int cdclk;
+       uint32_t val;
  
-       /*
-        * FIXME should also account for plane ratio
-        * once 64bpp pixel formats are supported.
-        */
-       cdclk = skl_calc_cdclk(max_pixclk, vco);
+       DRM_DEBUG_KMS("Enabling package C8+\n");
  
-       /*
-        * FIXME move the cdclk caclulation to
-        * compute_config() so we can fail gracegully.
-        */
-       if (cdclk > dev_priv->max_cdclk_freq) {
-               DRM_ERROR("requested cdclk (%d kHz) exceeds max (%d kHz)\n",
-                         cdclk, dev_priv->max_cdclk_freq);
-               cdclk = dev_priv->max_cdclk_freq;
+       if (HAS_PCH_LPT_LP(dev_priv)) {
+               val = I915_READ(SOUTH_DSPCLK_GATE_D);
+               val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
+               I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
        }
  
-       intel_state->cdclk = intel_state->dev_cdclk = cdclk;
-       if (!intel_state->active_crtcs)
-               intel_state->dev_cdclk = skl_calc_cdclk(0, vco);
-       return 0;
+       lpt_disable_clkout_dp(dev_priv);
+       hsw_disable_lcpll(dev_priv, true, true);
  }
  
static void skl_modeset_commit_cdclk(struct drm_atomic_state *old_state)
void hsw_disable_pc8(struct drm_i915_private *dev_priv)
  {
-       struct drm_i915_private *dev_priv = to_i915(old_state->dev);
-       struct intel_atomic_state *intel_state = to_intel_atomic_state(old_state);
-       unsigned int req_cdclk = intel_state->dev_cdclk;
-       unsigned int req_vco = intel_state->cdclk_pll_vco;
+       uint32_t val;
+       DRM_DEBUG_KMS("Disabling package C8+\n");
+       hsw_restore_lcpll(dev_priv);
+       lpt_init_pch_refclk(dev_priv);
  
-       skl_set_cdclk(dev_priv, req_cdclk, req_vco);
+       if (HAS_PCH_LPT_LP(dev_priv)) {
+               val = I915_READ(SOUTH_DSPCLK_GATE_D);
+               val |= PCH_LP_PARTITION_LEVEL_DISABLE;
+               I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
+       }
  }
  
  static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
@@@ -10565,7 -8948,7 +8949,7 @@@ static void haswell_get_ddi_pll(struct 
  
  static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
                                     struct intel_crtc_state *pipe_config,
-                                    unsigned long *power_domain_mask)
+                                    u64 *power_domain_mask)
  {
        struct drm_device *dev = crtc->base.dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
        power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
        if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
                return false;
-       *power_domain_mask |= BIT(power_domain);
+       *power_domain_mask |= BIT_ULL(power_domain);
  
        tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
  
  
  static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
                                         struct intel_crtc_state *pipe_config,
-                                        unsigned long *power_domain_mask)
+                                        u64 *power_domain_mask)
  {
        struct drm_device *dev = crtc->base.dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
                power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
                if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
                        continue;
-               *power_domain_mask |= BIT(power_domain);
+               *power_domain_mask |= BIT_ULL(power_domain);
  
                /*
                 * The PLL needs to be enabled with a valid divider
@@@ -10674,7 -9057,7 +9058,7 @@@ static void haswell_get_ddi_port_state(
  
        port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
  
-       if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
+       if (IS_GEN9_BC(dev_priv))
                skylake_get_ddi_pll(dev_priv, port, pipe_config);
        else if (IS_GEN9_LP(dev_priv))
                bxt_get_ddi_pll(dev_priv, port, pipe_config);
@@@ -10709,13 -9092,13 +9093,13 @@@ static bool haswell_get_pipe_config(str
  {
        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
        enum intel_display_power_domain power_domain;
-       unsigned long power_domain_mask;
+       u64 power_domain_mask;
        bool active;
  
        power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
        if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
                return false;
-       power_domain_mask = BIT(power_domain);
+       power_domain_mask = BIT_ULL(power_domain);
  
        pipe_config->shared_dpll = NULL;
  
  
        power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
        if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
-               power_domain_mask |= BIT(power_domain);
+               power_domain_mask |= BIT_ULL(power_domain);
                if (INTEL_GEN(dev_priv) >= 9)
                        skylake_get_pfit_config(crtc, pipe_config);
                else
@@@ -10972,9 -9355,8 +9356,8 @@@ static struct drm_display_mode load_det
  };
  
  struct drm_framebuffer *
- __intel_framebuffer_create(struct drm_device *dev,
-                          struct drm_mode_fb_cmd2 *mode_cmd,
-                          struct drm_i915_gem_object *obj)
+ intel_framebuffer_create(struct drm_i915_gem_object *obj,
+                        struct drm_mode_fb_cmd2 *mode_cmd)
  {
        struct intel_framebuffer *intel_fb;
        int ret;
        if (!intel_fb)
                return ERR_PTR(-ENOMEM);
  
-       ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
+       ret = intel_framebuffer_init(intel_fb, obj, mode_cmd);
        if (ret)
                goto err;
  
        return ERR_PTR(ret);
  }
  
- static struct drm_framebuffer *
- intel_framebuffer_create(struct drm_device *dev,
-                        struct drm_mode_fb_cmd2 *mode_cmd,
-                        struct drm_i915_gem_object *obj)
- {
-       struct drm_framebuffer *fb;
-       int ret;
-       ret = i915_mutex_lock_interruptible(dev);
-       if (ret)
-               return ERR_PTR(ret);
-       fb = __intel_framebuffer_create(dev, mode_cmd, obj);
-       mutex_unlock(&dev->struct_mutex);
-       return fb;
- }
  static u32
  intel_framebuffer_pitch_for_width(int width, int bpp)
  {
@@@ -11045,7 -9410,7 +9411,7 @@@ intel_framebuffer_create_for_mode(struc
                                                                bpp);
        mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
  
-       fb = intel_framebuffer_create(dev, &mode_cmd, obj);
+       fb = intel_framebuffer_create(obj, &mode_cmd);
        if (IS_ERR(fb))
                i915_gem_object_put(obj);
  
@@@ -11324,7 -9689,7 +9690,7 @@@ void intel_release_load_detect_pipe(str
        if (!state)
                return;
  
 -      ret = drm_atomic_commit(state);
 +      ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
        if (ret)
                DRM_DEBUG_KMS("Couldn't release load detect pipe: %i\n", ret);
        drm_atomic_state_put(state);
@@@ -11731,14 -10096,12 +10097,12 @@@ static int intel_gen2_queue_flip(struc
                                 struct drm_i915_gem_request *req,
                                 uint32_t flags)
  {
-       struct intel_ring *ring = req->ring;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       u32 flip_mask;
-       int ret;
+       u32 flip_mask, *cs;
  
-       ret = intel_ring_begin(req, 6);
-       if (ret)
-               return ret;
+       cs = intel_ring_begin(req, 6);
+       if (IS_ERR(cs))
+               return PTR_ERR(cs);
  
        /* Can't queue multiple flips, so wait for the previous
         * one to finish before executing the next.
                flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
        else
                flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
-       intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
-       intel_ring_emit(ring, MI_NOOP);
-       intel_ring_emit(ring, MI_DISPLAY_FLIP |
-                       MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
-       intel_ring_emit(ring, fb->pitches[0]);
-       intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset);
-       intel_ring_emit(ring, 0); /* aux display base address, unused */
+       *cs++ = MI_WAIT_FOR_EVENT | flip_mask;
+       *cs++ = MI_NOOP;
+       *cs++ = MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane);
+       *cs++ = fb->pitches[0];
+       *cs++ = intel_crtc->flip_work->gtt_offset;
+       *cs++ = 0; /* aux display base address, unused */
  
        return 0;
  }
@@@ -11765,26 -10127,23 +10128,23 @@@ static int intel_gen3_queue_flip(struc
                                 struct drm_i915_gem_request *req,
                                 uint32_t flags)
  {
-       struct intel_ring *ring = req->ring;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       u32 flip_mask;
-       int ret;
+       u32 flip_mask, *cs;
  
-       ret = intel_ring_begin(req, 6);
-       if (ret)
-               return ret;
+       cs = intel_ring_begin(req, 6);
+       if (IS_ERR(cs))
+               return PTR_ERR(cs);
  
        if (intel_crtc->plane)
                flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
        else
                flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
-       intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
-       intel_ring_emit(ring, MI_NOOP);
-       intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 |
-                       MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
-       intel_ring_emit(ring, fb->pitches[0]);
-       intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset);
-       intel_ring_emit(ring, MI_NOOP);
+       *cs++ = MI_WAIT_FOR_EVENT | flip_mask;
+       *cs++ = MI_NOOP;
+       *cs++ = MI_DISPLAY_FLIP_I915 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane);
+       *cs++ = fb->pitches[0];
+       *cs++ = intel_crtc->flip_work->gtt_offset;
+       *cs++ = MI_NOOP;
  
        return 0;
  }
@@@ -11796,25 -10155,22 +10156,22 @@@ static int intel_gen4_queue_flip(struc
                                 struct drm_i915_gem_request *req,
                                 uint32_t flags)
  {
-       struct intel_ring *ring = req->ring;
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       uint32_t pf, pipesrc;
-       int ret;
+       u32 pf, pipesrc, *cs;
  
-       ret = intel_ring_begin(req, 4);
-       if (ret)
-               return ret;
+       cs = intel_ring_begin(req, 4);
+       if (IS_ERR(cs))
+               return PTR_ERR(cs);
  
        /* i965+ uses the linear or tiled offsets from the
         * Display Registers (which do not change across a page-flip)
         * so we need only reprogram the base address.
         */
-       intel_ring_emit(ring, MI_DISPLAY_FLIP |
-                       MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
-       intel_ring_emit(ring, fb->pitches[0]);
-       intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset |
-                       intel_fb_modifier_to_tiling(fb->modifier));
+       *cs++ = MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane);
+       *cs++ = fb->pitches[0];
+       *cs++ = intel_crtc->flip_work->gtt_offset |
+               intel_fb_modifier_to_tiling(fb->modifier);
  
        /* XXX Enabling the panel-fitter across page-flip is so far
         * untested on non-native modes, so ignore it for now.
         */
        pf = 0;
        pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
-       intel_ring_emit(ring, pf | pipesrc);
+       *cs++ = pf | pipesrc;
  
        return 0;
  }
@@@ -11834,21 -10190,17 +10191,17 @@@ static int intel_gen6_queue_flip(struc
                                 struct drm_i915_gem_request *req,
                                 uint32_t flags)
  {
-       struct intel_ring *ring = req->ring;
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       uint32_t pf, pipesrc;
-       int ret;
+       u32 pf, pipesrc, *cs;
  
-       ret = intel_ring_begin(req, 4);
-       if (ret)
-               return ret;
+       cs = intel_ring_begin(req, 4);
+       if (IS_ERR(cs))
+               return PTR_ERR(cs);
  
-       intel_ring_emit(ring, MI_DISPLAY_FLIP |
-                       MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
-       intel_ring_emit(ring, fb->pitches[0] |
-                       intel_fb_modifier_to_tiling(fb->modifier));
-       intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset);
+       *cs++ = MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane);
+       *cs++ = fb->pitches[0] | intel_fb_modifier_to_tiling(fb->modifier);
+       *cs++ = intel_crtc->flip_work->gtt_offset;
  
        /* Contrary to the suggestions in the documentation,
         * "Enable Panel Fitter" does not seem to be required when page
         */
        pf = 0;
        pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
-       intel_ring_emit(ring, pf | pipesrc);
+       *cs++ = pf | pipesrc;
  
        return 0;
  }
@@@ -11871,9 -10223,8 +10224,8 @@@ static int intel_gen7_queue_flip(struc
                                 uint32_t flags)
  {
        struct drm_i915_private *dev_priv = to_i915(dev);
-       struct intel_ring *ring = req->ring;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       uint32_t plane_bit = 0;
+       u32 *cs, plane_bit = 0;
        int len, ret;
  
        switch (intel_crtc->plane) {
        if (ret)
                return ret;
  
-       ret = intel_ring_begin(req, len);
-       if (ret)
-               return ret;
+       cs = intel_ring_begin(req, len);
+       if (IS_ERR(cs))
+               return PTR_ERR(cs);
  
        /* Unmask the flip-done completion message. Note that the bspec says that
         * we should do this for both the BCS and RCS, and that we must not unmask
         * to zero does lead to lockups within MI_DISPLAY_FLIP.
         */
        if (req->engine->id == RCS) {
-               intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
-               intel_ring_emit_reg(ring, DERRMR);
-               intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
-                                         DERRMR_PIPEB_PRI_FLIP_DONE |
-                                         DERRMR_PIPEC_PRI_FLIP_DONE));
+               *cs++ = MI_LOAD_REGISTER_IMM(1);
+               *cs++ = i915_mmio_reg_offset(DERRMR);
+               *cs++ = ~(DERRMR_PIPEA_PRI_FLIP_DONE |
+                         DERRMR_PIPEB_PRI_FLIP_DONE |
+                         DERRMR_PIPEC_PRI_FLIP_DONE);
                if (IS_GEN8(dev_priv))
-                       intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8 |
-                                             MI_SRM_LRM_GLOBAL_GTT);
+                       *cs++ = MI_STORE_REGISTER_MEM_GEN8 |
+                               MI_SRM_LRM_GLOBAL_GTT;
                else
-                       intel_ring_emit(ring, MI_STORE_REGISTER_MEM |
-                                             MI_SRM_LRM_GLOBAL_GTT);
-               intel_ring_emit_reg(ring, DERRMR);
-               intel_ring_emit(ring,
-                               i915_ggtt_offset(req->engine->scratch) + 256);
+                       *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
+               *cs++ = i915_mmio_reg_offset(DERRMR);
+               *cs++ = i915_ggtt_offset(req->engine->scratch) + 256;
                if (IS_GEN8(dev_priv)) {
-                       intel_ring_emit(ring, 0);
-                       intel_ring_emit(ring, MI_NOOP);
+                       *cs++ = 0;
+                       *cs++ = MI_NOOP;
                }
        }
  
-       intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
-       intel_ring_emit(ring, fb->pitches[0] |
-                       intel_fb_modifier_to_tiling(fb->modifier));
-       intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset);
-       intel_ring_emit(ring, (MI_NOOP));
+       *cs++ = MI_DISPLAY_FLIP_I915 | plane_bit;
+       *cs++ = fb->pitches[0] | intel_fb_modifier_to_tiling(fb->modifier);
+       *cs++ = intel_crtc->flip_work->gtt_offset;
+       *cs++ = MI_NOOP;
  
        return 0;
  }
@@@ -12147,6 -10495,7 +10496,7 @@@ void intel_check_page_flip(struct drm_i
        spin_unlock(&dev->event_lock);
  }
  
+ __maybe_unused
  static int intel_crtc_page_flip(struct drm_crtc *crtc,
                                struct drm_framebuffer *fb,
                                struct drm_pending_vblank_event *event,
@@@ -12441,11 -10790,11 +10791,11 @@@ int intel_plane_atomic_calc_changes(str
        struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc_state);
        struct drm_crtc *crtc = crtc_state->crtc;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       struct drm_plane *plane = plane_state->plane;
+       struct intel_plane *plane = to_intel_plane(plane_state->plane);
        struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_plane_state *old_plane_state =
-               to_intel_plane_state(plane->state);
+               to_intel_plane_state(plane->base.state);
        bool mode_changed = needs_modeset(crtc_state);
        bool was_crtc_enabled = crtc->state->active;
        bool is_crtc_enabled = crtc_state->active;
        struct drm_framebuffer *fb = plane_state->fb;
        int ret;
  
-       if (INTEL_GEN(dev_priv) >= 9 && plane->type != DRM_PLANE_TYPE_CURSOR) {
+       if (INTEL_GEN(dev_priv) >= 9 && plane->id != PLANE_CURSOR) {
                ret = skl_update_scaler_plane(
                        to_intel_crtc_state(crtc_state),
                        to_intel_plane_state(plane_state));
         * per-plane wm computation to the .check_plane() hook, and
         * only combine the results from all planes in the current place?
         */
-       if (!is_crtc_enabled)
+       if (!is_crtc_enabled) {
                plane_state->visible = visible = false;
+               to_intel_crtc_state(crtc_state)->active_planes &= ~BIT(plane->id);
+       }
  
        if (!was_visible && !visible)
                return 0;
        turn_on = visible && (!was_visible || mode_changed);
  
        DRM_DEBUG_ATOMIC("[CRTC:%d:%s] has [PLANE:%d:%s] with fb %i\n",
-                        intel_crtc->base.base.id,
-                        intel_crtc->base.name,
-                        plane->base.id, plane->name,
+                        intel_crtc->base.base.id, intel_crtc->base.name,
+                        plane->base.base.id, plane->base.name,
                         fb ? fb->base.id : -1);
  
        DRM_DEBUG_ATOMIC("[PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
-                        plane->base.id, plane->name,
+                        plane->base.base.id, plane->base.name,
                         was_visible, visible,
                         turn_off, turn_on, mode_changed);
  
        if (turn_on) {
-               pipe_config->update_wm_pre = true;
+               if (INTEL_GEN(dev_priv) < 5)
+                       pipe_config->update_wm_pre = true;
  
                /* must disable cxsr around plane enable/disable */
-               if (plane->type != DRM_PLANE_TYPE_CURSOR)
+               if (plane->id != PLANE_CURSOR)
                        pipe_config->disable_cxsr = true;
        } else if (turn_off) {
-               pipe_config->update_wm_post = true;
+               if (INTEL_GEN(dev_priv) < 5)
+                       pipe_config->update_wm_post = true;
  
                /* must disable cxsr around plane enable/disable */
-               if (plane->type != DRM_PLANE_TYPE_CURSOR)
+               if (plane->id != PLANE_CURSOR)
                        pipe_config->disable_cxsr = true;
-       } else if (intel_wm_need_update(plane, plane_state)) {
-               /* FIXME bollocks */
-               pipe_config->update_wm_pre = true;
-               pipe_config->update_wm_post = true;
+       } else if (intel_wm_need_update(&plane->base, plane_state)) {
+               if (INTEL_GEN(dev_priv) < 5) {
+                       /* FIXME bollocks */
+                       pipe_config->update_wm_pre = true;
+                       pipe_config->update_wm_post = true;
+               }
        }
  
-       /* Pre-gen9 platforms need two-step watermark updates */
-       if ((pipe_config->update_wm_pre || pipe_config->update_wm_post) &&
-           INTEL_GEN(dev_priv) < 9 && dev_priv->display.optimize_watermarks)
-               to_intel_crtc_state(crtc_state)->wm.need_postvbl_update = true;
        if (visible || was_visible)
-               pipe_config->fb_bits |= to_intel_plane(plane)->frontbuffer_bit;
+               pipe_config->fb_bits |= plane->frontbuffer_bit;
  
        /*
         * WaCxSRDisabledForSpriteScaling:ivb
         * cstate->update_wm was already set above, so this flag will
         * take effect when we commit and program watermarks.
         */
-       if (plane->type == DRM_PLANE_TYPE_OVERLAY && IS_IVYBRIDGE(dev_priv) &&
+       if (plane->id == PLANE_SPRITE0 && IS_IVYBRIDGE(dev_priv) &&
            needs_scaling(to_intel_plane_state(plane_state)) &&
            !needs_scaling(old_plane_state))
                pipe_config->disable_lp_wm = true;
@@@ -12642,7 -10991,7 +10992,7 @@@ static int intel_crtc_atomic_check(stru
                        ret = skl_update_scaler_crtc(pipe_config);
  
                if (!ret)
-                       ret = intel_atomic_setup_scalers(dev, intel_crtc,
+                       ret = intel_atomic_setup_scalers(dev_priv, intel_crtc,
                                                         pipe_config);
        }
  
@@@ -12800,9 -11149,10 +11150,10 @@@ static void intel_dump_pipe_config(stru
        DRM_DEBUG_KMS("adjusted mode:\n");
        drm_mode_debug_printmodeline(&pipe_config->base.adjusted_mode);
        intel_dump_crtc_timings(&pipe_config->base.adjusted_mode);
-       DRM_DEBUG_KMS("port clock: %d, pipe src size: %dx%d\n",
+       DRM_DEBUG_KMS("port clock: %d, pipe src size: %dx%d, pixel rate %d\n",
                      pipe_config->port_clock,
-                     pipe_config->pipe_src_w, pipe_config->pipe_src_h);
+                     pipe_config->pipe_src_w, pipe_config->pipe_src_h,
+                     pipe_config->pixel_rate);
  
        if (INTEL_GEN(dev_priv) >= 9)
                DRM_DEBUG_KMS("num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
@@@ -12920,10 -11270,13 +11271,13 @@@ static bool check_digital_port_conflict
  static void
  clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
  {
+       struct drm_i915_private *dev_priv =
+               to_i915(crtc_state->base.crtc->dev);
        struct drm_crtc_state tmp_state;
        struct intel_crtc_scaler_state scaler_state;
        struct intel_dpll_hw_state dpll_hw_state;
        struct intel_shared_dpll *shared_dpll;
+       struct intel_crtc_wm_state wm_state;
        bool force_thru;
  
        /* FIXME: before the switch to atomic started, a new pipe_config was
        shared_dpll = crtc_state->shared_dpll;
        dpll_hw_state = crtc_state->dpll_hw_state;
        force_thru = crtc_state->pch_pfit.force_thru;
+       if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+               wm_state = crtc_state->wm;
  
        memset(crtc_state, 0, sizeof *crtc_state);
  
        crtc_state->shared_dpll = shared_dpll;
        crtc_state->dpll_hw_state = dpll_hw_state;
        crtc_state->pch_pfit.force_thru = force_thru;
+       if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+               crtc_state->wm = wm_state;
  }
  
  static int
@@@ -13060,8 -11417,11 +11418,11 @@@ encoder_retry
        }
  
        /* Dithering seems to not pass-through bits correctly when it should, so
-        * only enable it on 6bpc panels. */
-       pipe_config->dither = pipe_config->pipe_bpp == 6*3;
+        * only enable it on 6bpc panels and when its not a compliance
+        * test requesting 6bpc video pattern.
+        */
+       pipe_config->dither = (pipe_config->pipe_bpp == 6*3) &&
+               !pipe_config->dither_force_disable;
        DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
                      base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
  
@@@ -13375,6 -11735,7 +11736,7 @@@ intel_pipe_config_compare(struct drm_i9
                }
  
                PIPE_CONF_CHECK_I(scaler_state.scaler_id);
+               PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate);
        }
  
        /* BDW+ don't expose a synchronous way to read the state */
@@@ -13666,6 -12027,8 +12028,8 @@@ verify_crtc_state(struct drm_crtc *crtc
                }
        }
  
+       intel_crtc_compute_pixel_rate(pipe_config);
        if (!new_crtc_state->active)
                return;
  
@@@ -13993,6 -12356,8 +12357,8 @@@ static int intel_modeset_checks(struct 
  
        intel_state->modeset = true;
        intel_state->active_crtcs = dev_priv->active_crtcs;
+       intel_state->cdclk.logical = dev_priv->cdclk.logical;
+       intel_state->cdclk.actual = dev_priv->cdclk.actual;
  
        for_each_crtc_in_state(state, crtc, crtc_state, i) {
                if (crtc_state->active)
         * adjusted_mode bits in the crtc directly.
         */
        if (dev_priv->display.modeset_calc_cdclk) {
-               if (!intel_state->cdclk_pll_vco)
-                       intel_state->cdclk_pll_vco = dev_priv->cdclk_pll.vco;
-               if (!intel_state->cdclk_pll_vco)
-                       intel_state->cdclk_pll_vco = dev_priv->skl_preferred_vco_freq;
                ret = dev_priv->display.modeset_calc_cdclk(state);
                if (ret < 0)
                        return ret;
  
                /*
-                * Writes to dev_priv->atomic_cdclk_freq must protected by
+                * Writes to dev_priv->cdclk.logical must protected by
                 * holding all the crtc locks, even if we don't end up
                 * touching the hardware
                 */
-               if (intel_state->cdclk != dev_priv->atomic_cdclk_freq) {
+               if (!intel_cdclk_state_compare(&dev_priv->cdclk.logical,
+                                              &intel_state->cdclk.logical)) {
                        ret = intel_lock_all_pipes(state);
                        if (ret < 0)
                                return ret;
                }
  
                /* All pipes must be switched off while we change the cdclk. */
-               if (intel_state->dev_cdclk != dev_priv->cdclk_freq ||
-                   intel_state->cdclk_pll_vco != dev_priv->cdclk_pll.vco) {
+               if (!intel_cdclk_state_compare(&dev_priv->cdclk.actual,
+                                              &intel_state->cdclk.actual)) {
                        ret = intel_modeset_all_pipes(state);
                        if (ret < 0)
                                return ret;
                }
  
-               DRM_DEBUG_KMS("New cdclk calculated to be atomic %u, actual %u\n",
-                             intel_state->cdclk, intel_state->dev_cdclk);
+               DRM_DEBUG_KMS("New cdclk calculated to be logical %u kHz, actual %u kHz\n",
+                             intel_state->cdclk.logical.cdclk,
+                             intel_state->cdclk.actual.cdclk);
        } else {
-               to_intel_atomic_state(state)->cdclk = dev_priv->atomic_cdclk_freq;
+               to_intel_atomic_state(state)->cdclk.logical = dev_priv->cdclk.logical;
        }
  
        intel_modeset_clear_plls(state);
@@@ -14146,7 -12508,7 +12509,7 @@@ static int intel_atomic_check(struct dr
                if (ret)
                        return ret;
        } else {
-               intel_state->cdclk = dev_priv->atomic_cdclk_freq;
+               intel_state->cdclk.logical = dev_priv->cdclk.logical;
        }
  
        ret = drm_atomic_helper_check_planes(dev, state);
@@@ -14253,12 -12615,7 +12616,7 @@@ static bool needs_vblank_wait(struct in
        if (crtc_state->update_wm_post)
                return true;
  
-       /*
-        * cxsr is re-enabled after vblank.
-        * This is already handled by crtc_state->update_wm_post,
-        * but added for clarity.
-        */
-       if (crtc_state->disable_cxsr)
+       if (crtc_state->wm.need_postvbl_update)
                return true;
  
        return false;
@@@ -14380,6 -12737,24 +12738,24 @@@ static void skl_update_crtcs(struct drm
        } while (progress);
  }
  
+ static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
+ {
+       struct intel_atomic_state *state, *next;
+       struct llist_node *freed;
+       freed = llist_del_all(&dev_priv->atomic_helper.free_list);
+       llist_for_each_entry_safe(state, next, freed, freed)
+               drm_atomic_state_put(&state->base);
+ }
+ static void intel_atomic_helper_free_state_worker(struct work_struct *work)
+ {
+       struct drm_i915_private *dev_priv =
+               container_of(work, typeof(*dev_priv), atomic_helper.free_work);
+       intel_atomic_helper_free_state(dev_priv);
+ }
  static void intel_atomic_commit_tail(struct drm_atomic_state *state)
  {
        struct drm_device *dev = state->dev;
        struct drm_crtc *crtc;
        struct intel_crtc_state *intel_cstate;
        bool hw_check = intel_state->modeset;
-       unsigned long put_domains[I915_MAX_PIPES] = {};
+       u64 put_domains[I915_MAX_PIPES] = {};
        unsigned crtc_vblank_mask = 0;
        int i;
  
                                /*
                                 * Make sure we don't call initial_watermarks
                                 * for ILK-style watermark updates.
+                                *
+                                * No clue what this is supposed to achieve.
                                 */
-                               if (dev_priv->display.atomic_update_watermarks)
+                               if (INTEL_GEN(dev_priv) >= 9)
                                        dev_priv->display.initial_watermarks(intel_state,
                                                                             to_intel_crtc_state(crtc->state));
-                               else
-                                       intel_update_watermarks(intel_crtc);
                        }
                }
        }
        if (intel_state->modeset) {
                drm_atomic_helper_update_legacy_modeset_state(state->dev, state);
  
-               if (dev_priv->display.modeset_commit_cdclk &&
-                   (intel_state->dev_cdclk != dev_priv->cdclk_freq ||
-                    intel_state->cdclk_pll_vco != dev_priv->cdclk_pll.vco))
-                       dev_priv->display.modeset_commit_cdclk(state);
+               intel_set_cdclk(dev_priv, &dev_priv->cdclk.actual);
  
                /*
                 * SKL workaround: bspec recommends we disable the SAGV when we
         * can happen also when the device is completely off.
         */
        intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
+       intel_atomic_helper_free_state(dev_priv);
  }
  
  static void intel_atomic_commit_work(struct work_struct *work)
@@@ -14615,6 -12989,17 +12990,17 @@@ static int intel_atomic_commit(struct d
        struct drm_i915_private *dev_priv = to_i915(dev);
        int ret = 0;
  
+       /*
+        * The intel_legacy_cursor_update() fast path takes care
+        * of avoiding the vblank waits for simple cursor
+        * movement and flips. For cursor on/off and size changes,
+        * we want to perform the vblank waits so that watermark
+        * updates happen during the correct frames. Gen9+ have
+        * double buffered watermarks and so shouldn't need this.
+        */
+       if (INTEL_GEN(dev_priv) < 9)
+               state->legacy_cursor_update = false;
        ret = drm_atomic_helper_setup_commit(state, nonblock);
        if (ret)
                return ret;
                memcpy(dev_priv->min_pixclk, intel_state->min_pixclk,
                       sizeof(intel_state->min_pixclk));
                dev_priv->active_crtcs = intel_state->active_crtcs;
-               dev_priv->atomic_cdclk_freq = intel_state->cdclk;
+               dev_priv->cdclk.logical = intel_state->cdclk.logical;
+               dev_priv->cdclk.actual = intel_state->cdclk.actual;
        }
  
        drm_atomic_state_get(state);
@@@ -14739,7 -13125,7 +13126,7 @@@ static const struct drm_crtc_funcs inte
        .set_config = drm_atomic_helper_set_config,
        .set_property = drm_atomic_helper_crtc_set_property,
        .destroy = intel_crtc_destroy,
-       .page_flip = intel_crtc_page_flip,
+       .page_flip = drm_atomic_helper_page_flip,
        .atomic_duplicate_state = intel_crtc_duplicate_state,
        .atomic_destroy_state = intel_crtc_destroy_state,
        .set_crc_source = intel_crtc_set_crc_source,
@@@ -14771,6 -13157,29 +13158,29 @@@ intel_prepare_plane_fb(struct drm_plan
        struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb);
        int ret;
  
+       if (obj) {
+               if (plane->type == DRM_PLANE_TYPE_CURSOR &&
+                   INTEL_INFO(dev_priv)->cursor_needs_physical) {
+                       const int align = IS_I830(dev_priv) ? 16 * 1024 : 256;
+                       ret = i915_gem_object_attach_phys(obj, align);
+                       if (ret) {
+                               DRM_DEBUG_KMS("failed to attach phys object\n");
+                               return ret;
+                       }
+               } else {
+                       struct i915_vma *vma;
+                       vma = intel_pin_and_fence_fb_obj(fb, new_state->rotation);
+                       if (IS_ERR(vma)) {
+                               DRM_DEBUG_KMS("failed to pin object\n");
+                               return PTR_ERR(vma);
+                       }
+                       to_intel_plane_state(new_state)->vma = vma;
+               }
+       }
        if (!obj && !old_obj)
                return 0;
  
                i915_gem_object_wait_priority(obj, 0, I915_PRIORITY_DISPLAY);
        }
  
-       if (plane->type == DRM_PLANE_TYPE_CURSOR &&
-           INTEL_INFO(dev_priv)->cursor_needs_physical) {
-               int align = IS_I830(dev_priv) ? 16 * 1024 : 256;
-               ret = i915_gem_object_attach_phys(obj, align);
-               if (ret) {
-                       DRM_DEBUG_KMS("failed to attach phys object\n");
-                       return ret;
-               }
-       } else {
-               struct i915_vma *vma;
-               vma = intel_pin_and_fence_fb_obj(fb, new_state->rotation);
-               if (IS_ERR(vma)) {
-                       DRM_DEBUG_KMS("failed to pin object\n");
-                       return PTR_ERR(vma);
-               }
-               to_intel_plane_state(new_state)->vma = vma;
-       }
        return 0;
  }
  
@@@ -14870,16 -13259,22 +13260,22 @@@ intel_cleanup_plane_fb(struct drm_plan
  int
  skl_max_scale(struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state)
  {
+       struct drm_i915_private *dev_priv;
        int max_scale;
-       int crtc_clock, cdclk;
+       int crtc_clock, max_dotclk;
  
        if (!intel_crtc || !crtc_state->base.enable)
                return DRM_PLANE_HELPER_NO_SCALING;
  
+       dev_priv = to_i915(intel_crtc->base.dev);
        crtc_clock = crtc_state->base.adjusted_mode.crtc_clock;
-       cdclk = to_intel_atomic_state(crtc_state->base.state)->cdclk;
+       max_dotclk = to_intel_atomic_state(crtc_state->base.state)->cdclk.logical.cdclk;
  
-       if (WARN_ON_ONCE(!crtc_clock || cdclk < crtc_clock))
+       if (IS_GEMINILAKE(dev_priv))
+               max_dotclk *= 2;
+       if (WARN_ON_ONCE(!crtc_clock || max_dotclk < crtc_clock))
                return DRM_PLANE_HELPER_NO_SCALING;
  
        /*
         *            or
         *    cdclk/crtc_clock
         */
-       max_scale = min((1 << 16) * 3 - 1, (1 << 8) * ((cdclk << 8) / crtc_clock));
+       max_scale = min((1 << 16) * 3 - 1,
+                       (1 << 8) * ((max_dotclk << 8) / crtc_clock));
  
        return max_scale;
  }
@@@ -15038,8 -13434,7 +13435,7 @@@ intel_legacy_cursor_update(struct drm_p
            old_plane_state->src_h != src_h ||
            old_plane_state->crtc_w != crtc_w ||
            old_plane_state->crtc_h != crtc_h ||
-           !old_plane_state->visible ||
-           old_plane_state->fb->modifier != fb->modifier)
+           !old_plane_state->fb != !fb)
                goto slow;
  
        new_plane_state = intel_plane_duplicate_state(plane);
        if (ret)
                goto out_free;
  
-       /* Visibility changed, must take slowpath. */
-       if (!new_plane_state->visible)
-               goto slow_free;
        ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex);
        if (ret)
                goto out_free;
        new_plane_state->fb = old_fb;
        to_intel_plane_state(new_plane_state)->vma = old_vma;
  
-       intel_plane->update_plane(plane,
-                                 to_intel_crtc_state(crtc->state),
-                                 to_intel_plane_state(plane->state));
+       if (plane->state->visible) {
+               trace_intel_update_plane(plane, to_intel_crtc(crtc));
+               intel_plane->update_plane(plane,
+                                         to_intel_crtc_state(crtc->state),
+                                         to_intel_plane_state(plane->state));
+       } else {
+               trace_intel_disable_plane(plane, to_intel_crtc(crtc));
+               intel_plane->disable_plane(plane, crtc);
+       }
  
        intel_cleanup_plane_fb(plane, new_plane_state);
  
@@@ -15117,8 -13514,6 +13515,6 @@@ out_free
        intel_plane_destroy_state(plane, new_plane_state);
        return ret;
  
- slow_free:
-       intel_plane_destroy_state(plane, new_plane_state);
  slow:
        return drm_atomic_helper_update_plane(plane, crtc, fb,
                                              crtc_x, crtc_y, crtc_w, crtc_h,
@@@ -15492,8 -13887,6 +13888,6 @@@ static int intel_crtc_init(struct drm_i
        intel_crtc->cursor_cntl = ~0;
        intel_crtc->cursor_size = ~0;
  
-       intel_crtc->wm.cxsr_allowed = true;
        /* initialize shared scalers */
        intel_crtc_init_scalers(intel_crtc, crtc_state);
  
@@@ -15681,7 -14074,7 +14075,7 @@@ static void intel_setup_outputs(struct 
                 */
                found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
                /* WaIgnoreDDIAStrap: skl */
-               if (found || IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
+               if (found || IS_GEN9_BC(dev_priv))
                        intel_ddi_init(dev_priv, PORT_A);
  
                /* DDI B, C and D detection is indicated by the SFUSE_STRAP
                /*
                 * On SKL we don't have a way to detect DDI-E so we rely on VBT.
                 */
-               if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) &&
+               if (IS_GEN9_BC(dev_priv) &&
                    (dev_priv->vbt.ddi_port_info[PORT_E].supports_dp ||
                     dev_priv->vbt.ddi_port_info[PORT_E].supports_dvi ||
                     dev_priv->vbt.ddi_port_info[PORT_E].supports_hdmi))
  
  static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
  {
-       struct drm_device *dev = fb->dev;
        struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
  
        drm_framebuffer_cleanup(fb);
-       mutex_lock(&dev->struct_mutex);
+       i915_gem_object_lock(intel_fb->obj);
        WARN_ON(!intel_fb->obj->framebuffer_references--);
+       i915_gem_object_unlock(intel_fb->obj);
        i915_gem_object_put(intel_fb->obj);
-       mutex_unlock(&dev->struct_mutex);
        kfree(intel_fb);
  }
  
@@@ -15862,15 -14257,10 +14258,10 @@@ static int intel_user_framebuffer_dirty
                                        struct drm_clip_rect *clips,
                                        unsigned num_clips)
  {
-       struct drm_device *dev = fb->dev;
-       struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
-       struct drm_i915_gem_object *obj = intel_fb->obj;
+       struct drm_i915_gem_object *obj = intel_fb_obj(fb);
  
-       mutex_lock(&dev->struct_mutex);
-       if (obj->pin_display && obj->cache_dirty)
-               i915_gem_clflush_object(obj, true);
-       intel_fb_obj_flush(obj, false, ORIGIN_DIRTYFB);
-       mutex_unlock(&dev->struct_mutex);
+       i915_gem_object_flush_if_display(obj);
+       intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
  
        return 0;
  }
@@@ -15885,7 -14275,7 +14276,7 @@@ stati
  u32 intel_fb_pitch_limit(struct drm_i915_private *dev_priv,
                         uint64_t fb_modifier, uint32_t pixel_format)
  {
-       u32 gen = INTEL_INFO(dev_priv)->gen;
+       u32 gen = INTEL_GEN(dev_priv);
  
        if (gen >= 9) {
                int cpp = drm_format_plane_cpp(pixel_format, 0);
                 *  pixels and 32K bytes."
                 */
                return min(8192 * cpp, 32768);
-       } else if (gen >= 5 && !IS_VALLEYVIEW(dev_priv) &&
-                  !IS_CHERRYVIEW(dev_priv)) {
+       } else if (gen >= 5 && !HAS_GMCH_DISPLAY(dev_priv)) {
                return 32*1024;
        } else if (gen >= 4) {
                if (fb_modifier == I915_FORMAT_MOD_X_TILED)
        }
  }
  
- static int intel_framebuffer_init(struct drm_device *dev,
-                                 struct intel_framebuffer *intel_fb,
-                                 struct drm_mode_fb_cmd2 *mode_cmd,
-                                 struct drm_i915_gem_object *obj)
+ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
+                                 struct drm_i915_gem_object *obj,
+                                 struct drm_mode_fb_cmd2 *mode_cmd)
  {
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       unsigned int tiling = i915_gem_object_get_tiling(obj);
-       int ret;
-       u32 pitch_limit, stride_alignment;
+       struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
        struct drm_format_name_buf format_name;
+       u32 pitch_limit, stride_alignment;
+       unsigned int tiling, stride;
+       int ret = -EINVAL;
  
-       WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+       i915_gem_object_lock(obj);
+       obj->framebuffer_references++;
+       tiling = i915_gem_object_get_tiling(obj);
+       stride = i915_gem_object_get_stride(obj);
+       i915_gem_object_unlock(obj);
  
        if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
                /*
                if (tiling != I915_TILING_NONE &&
                    tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
                        DRM_DEBUG("tiling_mode doesn't match fb modifier\n");
-                       return -EINVAL;
+                       goto err;
                }
        } else {
                if (tiling == I915_TILING_X) {
                        mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
                } else if (tiling == I915_TILING_Y) {
                        DRM_DEBUG("No Y tiling for legacy addfb\n");
-                       return -EINVAL;
+                       goto err;
                }
        }
  
                if (INTEL_GEN(dev_priv) < 9) {
                        DRM_DEBUG("Unsupported tiling 0x%llx!\n",
                                  mode_cmd->modifier[0]);
-                       return -EINVAL;
+                       goto err;
                }
        case DRM_FORMAT_MOD_NONE:
        case I915_FORMAT_MOD_X_TILED:
        default:
                DRM_DEBUG("Unsupported fb modifier 0x%llx!\n",
                          mode_cmd->modifier[0]);
-               return -EINVAL;
+               goto err;
        }
  
        /*
        if (INTEL_INFO(dev_priv)->gen < 4 &&
            tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
                DRM_DEBUG("tiling_mode must match fb modifier exactly on gen2/3\n");
-               return -EINVAL;
+               goto err;
        }
  
        stride_alignment = intel_fb_stride_alignment(dev_priv,
        if (mode_cmd->pitches[0] & (stride_alignment - 1)) {
                DRM_DEBUG("pitch (%d) must be at least %u byte aligned\n",
                          mode_cmd->pitches[0], stride_alignment);
-               return -EINVAL;
+               goto err;
        }
  
        pitch_limit = intel_fb_pitch_limit(dev_priv, mode_cmd->modifier[0],
                          mode_cmd->modifier[0] != DRM_FORMAT_MOD_NONE ?
                          "tiled" : "linear",
                          mode_cmd->pitches[0], pitch_limit);
-               return -EINVAL;
+               goto err;
        }
  
        /*
         * If there's a fence, enforce that
         * the fb pitch and fence stride match.
         */
-       if (tiling != I915_TILING_NONE &&
-           mode_cmd->pitches[0] != i915_gem_object_get_stride(obj)) {
+       if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] !=  stride) {
                DRM_DEBUG("pitch (%d) must match tiling stride (%d)\n",
-                         mode_cmd->pitches[0],
-                         i915_gem_object_get_stride(obj));
-               return -EINVAL;
+                         mode_cmd->pitches[0], stride);
+               goto err;
        }
  
        /* Reject formats not supported by any plane early. */
                if (INTEL_GEN(dev_priv) > 3) {
                        DRM_DEBUG("unsupported pixel format: %s\n",
                                  drm_get_format_name(mode_cmd->pixel_format, &format_name));
-                       return -EINVAL;
+                       goto err;
                }
                break;
        case DRM_FORMAT_ABGR8888:
                    INTEL_GEN(dev_priv) < 9) {
                        DRM_DEBUG("unsupported pixel format: %s\n",
                                  drm_get_format_name(mode_cmd->pixel_format, &format_name));
-                       return -EINVAL;
+                       goto err;
                }
                break;
        case DRM_FORMAT_XBGR8888:
                if (INTEL_GEN(dev_priv) < 4) {
                        DRM_DEBUG("unsupported pixel format: %s\n",
                                  drm_get_format_name(mode_cmd->pixel_format, &format_name));
-                       return -EINVAL;
+                       goto err;
                }
                break;
        case DRM_FORMAT_ABGR2101010:
                if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
                        DRM_DEBUG("unsupported pixel format: %s\n",
                                  drm_get_format_name(mode_cmd->pixel_format, &format_name));
-                       return -EINVAL;
+                       goto err;
                }
                break;
        case DRM_FORMAT_YUYV:
                if (INTEL_GEN(dev_priv) < 5) {
                        DRM_DEBUG("unsupported pixel format: %s\n",
                                  drm_get_format_name(mode_cmd->pixel_format, &format_name));
-                       return -EINVAL;
+                       goto err;
                }
                break;
        default:
                DRM_DEBUG("unsupported pixel format: %s\n",
                          drm_get_format_name(mode_cmd->pixel_format, &format_name));
-               return -EINVAL;
+               goto err;
        }
  
        /* FIXME need to adjust LINOFF/TILEOFF accordingly. */
        if (mode_cmd->offsets[0] != 0)
-               return -EINVAL;
+               goto err;
  
-       drm_helper_mode_fill_fb_struct(dev, &intel_fb->base, mode_cmd);
+       drm_helper_mode_fill_fb_struct(&dev_priv->drm,
+                                      &intel_fb->base, mode_cmd);
        intel_fb->obj = obj;
  
        ret = intel_fill_fb_info(dev_priv, &intel_fb->base);
        if (ret)
-               return ret;
+               goto err;
  
-       ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
+       ret = drm_framebuffer_init(obj->base.dev,
+                                  &intel_fb->base,
+                                  &intel_fb_funcs);
        if (ret) {
                DRM_ERROR("framebuffer init failed %d\n", ret);
-               return ret;
+               goto err;
        }
  
-       intel_fb->obj->framebuffer_references++;
        return 0;
+ err:
+       i915_gem_object_lock(obj);
+       obj->framebuffer_references--;
+       i915_gem_object_unlock(obj);
+       return ret;
  }
  
  static struct drm_framebuffer *
@@@ -16093,7 -14490,7 +14491,7 @@@ intel_user_framebuffer_create(struct dr
        if (!obj)
                return ERR_PTR(-ENOENT);
  
-       fb = intel_framebuffer_create(dev, &mode_cmd, obj);
+       fb = intel_framebuffer_create(obj, &mode_cmd);
        if (IS_ERR(fb))
                i915_gem_object_put(obj);
  
@@@ -16127,6 -14524,8 +14525,8 @@@ static const struct drm_mode_config_fun
   */
  void intel_init_display_hooks(struct drm_i915_private *dev_priv)
  {
+       intel_init_cdclk_hooks(dev_priv);
        if (INTEL_INFO(dev_priv)->gen >= 9) {
                dev_priv->display.get_pipe_config = haswell_get_pipe_config;
                dev_priv->display.get_initial_plane_config =
                dev_priv->display.crtc_disable = i9xx_crtc_disable;
        }
  
-       /* Returns the core display clock speed */
-       if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
-               dev_priv->display.get_display_clock_speed =
-                       skylake_get_display_clock_speed;
-       else if (IS_GEN9_LP(dev_priv))
-               dev_priv->display.get_display_clock_speed =
-                       broxton_get_display_clock_speed;
-       else if (IS_BROADWELL(dev_priv))
-               dev_priv->display.get_display_clock_speed =
-                       broadwell_get_display_clock_speed;
-       else if (IS_HASWELL(dev_priv))
-               dev_priv->display.get_display_clock_speed =
-                       haswell_get_display_clock_speed;
-       else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
-               dev_priv->display.get_display_clock_speed =
-                       valleyview_get_display_clock_speed;
-       else if (IS_GEN5(dev_priv))
-               dev_priv->display.get_display_clock_speed =
-                       ilk_get_display_clock_speed;
-       else if (IS_I945G(dev_priv) || IS_I965G(dev_priv) ||
-                IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv))
-               dev_priv->display.get_display_clock_speed =
-                       i945_get_display_clock_speed;
-       else if (IS_GM45(dev_priv))
-               dev_priv->display.get_display_clock_speed =
-                       gm45_get_display_clock_speed;
-       else if (IS_I965GM(dev_priv))
-               dev_priv->display.get_display_clock_speed =
-                       i965gm_get_display_clock_speed;
-       else if (IS_PINEVIEW(dev_priv))
-               dev_priv->display.get_display_clock_speed =
-                       pnv_get_display_clock_speed;
-       else if (IS_G33(dev_priv) || IS_G4X(dev_priv))
-               dev_priv->display.get_display_clock_speed =
-                       g33_get_display_clock_speed;
-       else if (IS_I915G(dev_priv))
-               dev_priv->display.get_display_clock_speed =
-                       i915_get_display_clock_speed;
-       else if (IS_I945GM(dev_priv) || IS_I845G(dev_priv))
-               dev_priv->display.get_display_clock_speed =
-                       i9xx_misc_get_display_clock_speed;
-       else if (IS_I915GM(dev_priv))
-               dev_priv->display.get_display_clock_speed =
-                       i915gm_get_display_clock_speed;
-       else if (IS_I865G(dev_priv))
-               dev_priv->display.get_display_clock_speed =
-                       i865_get_display_clock_speed;
-       else if (IS_I85X(dev_priv))
-               dev_priv->display.get_display_clock_speed =
-                       i85x_get_display_clock_speed;
-       else { /* 830 */
-               WARN(!IS_I830(dev_priv), "Unknown platform. Assuming 133 MHz CDCLK\n");
-               dev_priv->display.get_display_clock_speed =
-                       i830_get_display_clock_speed;
-       }
        if (IS_GEN5(dev_priv)) {
                dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
        } else if (IS_GEN6(dev_priv)) {
                dev_priv->display.fdi_link_train = hsw_fdi_link_train;
        }
  
-       if (IS_BROADWELL(dev_priv)) {
-               dev_priv->display.modeset_commit_cdclk =
-                       broadwell_modeset_commit_cdclk;
-               dev_priv->display.modeset_calc_cdclk =
-                       broadwell_modeset_calc_cdclk;
-       } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
-               dev_priv->display.modeset_commit_cdclk =
-                       valleyview_modeset_commit_cdclk;
-               dev_priv->display.modeset_calc_cdclk =
-                       valleyview_modeset_calc_cdclk;
-       } else if (IS_GEN9_LP(dev_priv)) {
-               dev_priv->display.modeset_commit_cdclk =
-                       bxt_modeset_commit_cdclk;
-               dev_priv->display.modeset_calc_cdclk =
-                       bxt_modeset_calc_cdclk;
-       } else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
-               dev_priv->display.modeset_commit_cdclk =
-                       skl_modeset_commit_cdclk;
-               dev_priv->display.modeset_calc_cdclk =
-                       skl_modeset_calc_cdclk;
-       }
        if (dev_priv->info.gen >= 9)
                dev_priv->display.update_crtcs = skl_update_crtcs;
        else
@@@ -16510,8 -14831,7 +14832,7 @@@ void intel_modeset_init_hw(struct drm_d
        struct drm_i915_private *dev_priv = to_i915(dev);
  
        intel_update_cdclk(dev_priv);
-       dev_priv->atomic_cdclk_freq = dev_priv->cdclk_freq;
+       dev_priv->cdclk.logical = dev_priv->cdclk.actual = dev_priv->cdclk.hw;
  
        intel_init_clock_gating(dev_priv);
  }
@@@ -16566,7 -14886,8 +14887,8 @@@ retry
         * intermediate watermarks (since we don't trust the current
         * watermarks).
         */
-       intel_state->skip_intermediate_wm = true;
+       if (!HAS_GMCH_DISPLAY(dev_priv))
+               intel_state->skip_intermediate_wm = true;
  
        ret = intel_atomic_check(dev, state);
        if (ret) {
        drm_modeset_acquire_fini(&ctx);
  }
  
- static void intel_atomic_helper_free_state(struct work_struct *work)
- {
-       struct drm_i915_private *dev_priv =
-               container_of(work, typeof(*dev_priv), atomic_helper.free_work);
-       struct intel_atomic_state *state, *next;
-       struct llist_node *freed;
-       freed = llist_del_all(&dev_priv->atomic_helper.free_list);
-       llist_for_each_entry_safe(state, next, freed, freed)
-               drm_atomic_state_put(&state->base);
- }
  int intel_modeset_init(struct drm_device *dev)
  {
        struct drm_i915_private *dev_priv = to_i915(dev);
        dev->mode_config.funcs = &intel_mode_funcs;
  
        INIT_WORK(&dev_priv->atomic_helper.free_work,
-                 intel_atomic_helper_free_state);
+                 intel_atomic_helper_free_state_worker);
  
        intel_init_quirks(dev);
  
                }
        }
  
-       intel_update_czclk(dev_priv);
-       intel_update_cdclk(dev_priv);
-       dev_priv->atomic_cdclk_freq = dev_priv->cdclk_freq;
        intel_shared_dpll_init(dev);
  
+       intel_update_czclk(dev_priv);
+       intel_modeset_init_hw(dev);
        if (dev_priv->max_cdclk_freq == 0)
                intel_update_max_cdclk(dev_priv);
  
         * Note that we need to do this after reconstructing the BIOS fb's
         * since the watermark calculation done here will use pstate->fb.
         */
-       sanitize_watermarks(dev);
+       if (!HAS_GMCH_DISPLAY(dev_priv))
+               sanitize_watermarks(dev);
  
        return 0;
  }
@@@ -16844,6 -15153,7 +15154,7 @@@ static void intel_sanitize_crtc(struct 
                        if (plane->base.type == DRM_PLANE_TYPE_PRIMARY)
                                continue;
  
+                       trace_intel_disable_plane(&plane->base, crtc);
                        plane->disable_plane(&plane->base, &crtc->base);
                }
        }
@@@ -16990,15 -15300,14 +15301,14 @@@ static bool primary_get_hw_state(struc
  /* FIXME read out full plane state for all planes */
  static void readout_plane_state(struct intel_crtc *crtc)
  {
-       struct drm_plane *primary = crtc->base.primary;
-       struct intel_plane_state *plane_state =
-               to_intel_plane_state(primary->state);
+       struct intel_plane *primary = to_intel_plane(crtc->base.primary);
+       bool visible;
  
-       plane_state->base.visible = crtc->active &&
-               primary_get_hw_state(to_intel_plane(primary));
+       visible = crtc->active && primary_get_hw_state(primary);
  
-       if (plane_state->base.visible)
-               crtc->base.state->plane_mask |= 1 << drm_plane_index(primary);
+       intel_set_plane_visible(to_intel_crtc_state(crtc->base.state),
+                               to_intel_plane_state(primary->base.state),
+                               visible);
  }
  
  static void intel_modeset_readout_hw_state(struct drm_device *dev)
                         */
                        crtc_state->base.mode.private_flags = I915_MODE_FLAG_INHERITED;
  
-                       if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
-                               pixclk = ilk_pipe_pixel_rate(crtc_state);
-                       else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
-                               pixclk = crtc_state->base.adjusted_mode.crtc_clock;
+                       intel_crtc_compute_pixel_rate(crtc_state);
+                       if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv) ||
+                           IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+                               pixclk = crtc_state->pixel_rate;
                        else
                                WARN_ON(dev_priv->display.modeset_calc_cdclk);
  
        }
  }
  
+ static void
+ get_encoder_power_domains(struct drm_i915_private *dev_priv)
+ {
+       struct intel_encoder *encoder;
+       for_each_intel_encoder(&dev_priv->drm, encoder) {
+               u64 get_domains;
+               enum intel_display_power_domain domain;
+               if (!encoder->get_power_domains)
+                       continue;
+               get_domains = encoder->get_power_domains(encoder);
+               for_each_power_domain(domain, get_domains)
+                       intel_display_power_get(dev_priv, domain);
+       }
+ }
  /* Scan out the current hw modeset state,
   * and sanitizes it to the current state
   */
@@@ -17167,6 -15495,8 +15496,8 @@@ intel_modeset_setup_hw_state(struct drm
        intel_modeset_readout_hw_state(dev);
  
        /* HW state is read out, now we need to sanitize this mess. */
+       get_encoder_power_domains(dev_priv);
        for_each_intel_encoder(dev, encoder) {
                intel_sanitize_encoder(encoder);
        }
                pll->on = false;
        }
  
-       if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+       if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
                vlv_wm_get_hw_state(dev);
-       else if (IS_GEN9(dev_priv))
+               vlv_wm_sanitize(dev_priv);
+       } else if (IS_GEN9(dev_priv)) {
                skl_wm_get_hw_state(dev);
-       else if (HAS_PCH_SPLIT(dev_priv))
+       } else if (HAS_PCH_SPLIT(dev_priv)) {
                ilk_wm_get_hw_state(dev);
+       }
  
        for_each_intel_crtc(dev, crtc) {
-               unsigned long put_domains;
+               u64 put_domains;
  
                put_domains = modeset_get_crtc_power_domains(&crtc->base, crtc->config);
                if (WARN_ON(put_domains))
        }
        intel_display_set_init_power(dev_priv, false);
  
+       intel_power_domains_verify_state(dev_priv);
        intel_fbc_init_pipe_state(dev_priv);
  }
  
@@@ -17241,7 -15575,7 +15576,7 @@@ void intel_display_resume(struct drm_de
        }
  
        if (!ret)
 -              ret = __intel_display_resume(dev, state);
 +              ret = __intel_display_resume(dev, state, &ctx);
  
        drm_modeset_drop_locks(&ctx);
        drm_modeset_acquire_fini(&ctx);
@@@ -17259,8 -15593,6 +15594,6 @@@ void intel_modeset_gem_init(struct drm_
  
        intel_init_gt_powersave(dev_priv);
  
-       intel_modeset_init_hw(dev);
        intel_setup_overlay(dev_priv);
  }
  
@@@ -17492,9 -15824,9 +15825,9 @@@ intel_display_capture_error_state(struc
  
  void
  intel_display_print_error_state(struct drm_i915_error_state_buf *m,
-                               struct drm_i915_private *dev_priv,
                                struct intel_display_error_state *error)
  {
+       struct drm_i915_private *dev_priv = m->i915;
        int i;
  
        if (!error)
index 064582963ff61345e61f50f7ed40932fc083e514,3ef044efe98c15548d2144c34b87d080c3993721..0f766f83a31b1c522860254e11d439474794189b
@@@ -28,7 -28,6 +28,7 @@@
  #include <linux/async.h>
  #include <linux/i2c.h>
  #include <linux/hdmi.h>
 +#include <linux/sched/clock.h>
  #include <drm/i915_drm.h>
  #include "i915_drv.h"
  #include <drm/drm_crtc.h>
@@@ -242,6 -241,9 +242,9 @@@ struct intel_encoder 
         * be set correctly before calling this function. */
        void (*get_config)(struct intel_encoder *,
                           struct intel_crtc_state *pipe_config);
+       /* Returns a mask of power domains that need to be referenced as part
+        * of the hardware state readout code. */
+       u64 (*get_power_domains)(struct intel_encoder *encoder);
        /*
         * Called during system suspend after all pending requests for the
         * encoder are flushed (for example for DP AUX transactions) and
        void (*suspend)(struct intel_encoder *);
        int crtc_mask;
        enum hpd_pin hpd_pin;
+       enum intel_display_power_domain power_domain;
        /* for communication with audio component; protected by av_mutex */
        const struct drm_connector *audio_connector;
  };
@@@ -334,13 -337,20 +338,20 @@@ struct dpll 
  struct intel_atomic_state {
        struct drm_atomic_state base;
  
-       unsigned int cdclk;
-       /*
-        * Calculated device cdclk, can be different from cdclk
-        * only when all crtc's are DPMS off.
-        */
-       unsigned int dev_cdclk;
+       struct {
+               /*
+                * Logical state of cdclk (used for all scaling, watermark,
+                * etc. calculations and checks). This is computed as if all
+                * enabled crtcs were active.
+                */
+               struct intel_cdclk_state logical;
+               /*
+                * Actual state of cdclk, can be different from the logical
+                * state only when all crtc's are DPMS off.
+                */
+               struct intel_cdclk_state actual;
+       } cdclk;
  
        bool dpll_set, modeset;
  
        unsigned int active_crtcs;
        unsigned int min_pixclk[I915_MAX_PIPES];
  
-       /* SKL/KBL Only */
-       unsigned int cdclk_pll_vco;
        struct intel_shared_dpll_state shared_dpll[I915_NUM_PLLS];
  
        /*
@@@ -485,6 -492,24 +493,24 @@@ struct skl_pipe_wm 
        uint32_t linetime;
  };
  
+ enum vlv_wm_level {
+       VLV_WM_LEVEL_PM2,
+       VLV_WM_LEVEL_PM5,
+       VLV_WM_LEVEL_DDR_DVFS,
+       NUM_VLV_WM_LEVELS,
+ };
+ struct vlv_wm_state {
+       struct vlv_pipe_wm wm[NUM_VLV_WM_LEVELS];
+       struct vlv_sr_wm sr[NUM_VLV_WM_LEVELS];
+       uint8_t num_levels;
+       bool cxsr;
+ };
+ struct vlv_fifo_state {
+       u16 plane[I915_MAX_PLANES];
+ };
  struct intel_crtc_wm_state {
        union {
                struct {
                        struct skl_pipe_wm optimal;
                        struct skl_ddb_entry ddb;
                } skl;
+               struct {
+                       /* "raw" watermarks (not inverted) */
+                       struct vlv_pipe_wm raw[NUM_VLV_WM_LEVELS];
+                       /* intermediate watermarks (inverted) */
+                       struct vlv_wm_state intermediate;
+                       /* optimal watermarks (inverted) */
+                       struct vlv_wm_state optimal;
+                       /* display FIFO split */
+                       struct vlv_fifo_state fifo_state;
+               } vlv;
        };
  
        /*
@@@ -539,12 -575,19 +576,19 @@@ struct intel_crtc_state 
        bool disable_cxsr;
        bool update_wm_pre, update_wm_post; /* watermarks are updated */
        bool fb_changed; /* fb on any of the planes is changed */
+       bool fifo_changed; /* FIFO split is changed */
  
        /* Pipe source size (ie. panel fitter input size)
         * All planes will be positioned inside this space,
         * and get clipped at the edges. */
        int pipe_src_w, pipe_src_h;
  
+       /*
+        * Pipe pixel rate, adjusted for
+        * panel fitter/pipe scaler downscaling.
+        */
+       unsigned int pixel_rate;
        /* Whether to set up the PCH/FDI. Note that we never allow sharing
         * between pch encoders and cpu encoders. */
        bool has_pch_encoder;
         */
        bool dither;
  
+       /*
+        * Dither gets enabled for 18bpp which causes CRC mismatch errors for
+        * compliance video pattern tests.
+        * Disable dither only if it is a compliance test request for
+        * 18bpp.
+        */
+       bool dither_force_disable;
        /* Controls for the clock computation, to override various stages. */
        bool clock_set;
  
  
        /* Gamma mode programmed on the pipe */
        uint32_t gamma_mode;
- };
  
- struct vlv_wm_state {
-       struct vlv_pipe_wm wm[3];
-       struct vlv_sr_wm sr[3];
-       uint8_t num_active_planes;
-       uint8_t num_levels;
-       uint8_t level;
-       bool cxsr;
+       /* bitmask of visible planes (enum plane_id) */
+       u8 active_planes;
  };
  
  struct intel_crtc {
        bool active;
        bool lowfreq_avail;
        u8 plane_ids_mask;
-       unsigned long enabled_power_domains;
+       unsigned long long enabled_power_domains;
        struct intel_overlay *overlay;
        struct intel_flip_work *flip_work;
  
                /* watermarks currently being used  */
                union {
                        struct intel_pipe_wm ilk;
+                       struct vlv_wm_state vlv;
                } active;
-               /* allow CxSR on this pipe */
-               bool cxsr_allowed;
        } wm;
  
        int scanline_offset;
  
        /* scalers available on this crtc */
        int num_scalers;
-       struct vlv_wm_state wm_state;
- };
- struct intel_plane_wm_parameters {
-       uint32_t horiz_pixels;
-       uint32_t vert_pixels;
-       /*
-        *   For packed pixel formats:
-        *     bytes_per_pixel - holds bytes per pixel
-        *   For planar pixel formats:
-        *     bytes_per_pixel - holds bytes per pixel for uv-plane
-        *     y_bytes_per_pixel - holds bytes per pixel for y-plane
-        */
-       uint8_t bytes_per_pixel;
-       uint8_t y_bytes_per_pixel;
-       bool enabled;
-       bool scaled;
-       u64 tiling;
-       unsigned int rotation;
-       uint16_t fifo_size;
  };
  
  struct intel_plane {
        int max_downscale;
        uint32_t frontbuffer_bit;
  
-       /* Since we need to change the watermarks before/after
-        * enabling/disabling the planes, we need to store the parameters here
-        * as the other pieces of the struct may not reflect the values we want
-        * for the watermark calculations. Currently only Haswell uses this.
-        */
-       struct intel_plane_wm_parameters wm;
        /*
         * NOTE: Do not place new plane state fields here (e.g., when adding
         * new plane properties).  New runtime state should now be placed in
@@@ -891,12 -906,17 +907,17 @@@ struct intel_dp_desc 
  
  struct intel_dp_compliance_data {
        unsigned long edid;
+       uint8_t video_pattern;
+       uint16_t hdisplay, vdisplay;
+       uint8_t bpc;
  };
  
  struct intel_dp_compliance {
        unsigned long test_type;
        struct intel_dp_compliance_data test_data;
        bool test_active;
+       int test_link_rate;
+       u8 test_lane_count;
  };
  
  struct intel_dp {
        bool has_audio;
        bool detect_done;
        bool channel_eq_status;
+       bool reset_link_params;
        enum hdmi_force_audio force_audio;
        bool limited_color_range;
        bool color_range_auto;
        /* sink or branch descriptor */
        struct intel_dp_desc desc;
        struct drm_dp_aux aux;
+       enum intel_display_power_domain aux_power_domain;
        uint8_t train_set[4];
        int panel_power_up_delay;
        int panel_power_down_delay;
  struct intel_lspcon {
        bool active;
        enum drm_lspcon_mode mode;
-       bool desc_valid;
  };
  
  struct intel_digital_port {
        enum irqreturn (*hpd_pulse)(struct intel_digital_port *, bool);
        bool release_cl2_override;
        uint8_t max_lanes;
+       enum intel_display_power_domain ddi_io_power_domain;
  };
  
  struct intel_dp_mst_encoder {
@@@ -1097,7 -1119,19 +1120,19 @@@ intel_attached_encoder(struct drm_conne
  static inline struct intel_digital_port *
  enc_to_dig_port(struct drm_encoder *encoder)
  {
-       return container_of(encoder, struct intel_digital_port, base.base);
+       struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
+       switch (intel_encoder->type) {
+       case INTEL_OUTPUT_UNKNOWN:
+               WARN_ON(!HAS_DDI(to_i915(encoder->dev)));
+       case INTEL_OUTPUT_DP:
+       case INTEL_OUTPUT_EDP:
+       case INTEL_OUTPUT_HDMI:
+               return container_of(encoder, struct intel_digital_port,
+                                   base.base);
+       default:
+               return NULL;
+       }
  }
  
  static inline struct intel_dp_mst_encoder *
@@@ -1185,18 -1219,19 +1220,19 @@@ void intel_ddi_fdi_post_disable(struct 
                                struct intel_crtc_state *old_crtc_state,
                                struct drm_connector_state *old_conn_state);
  void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder);
- void hsw_fdi_link_train(struct drm_crtc *crtc);
+ void hsw_fdi_link_train(struct intel_crtc *crtc,
+                       const struct intel_crtc_state *crtc_state);
  void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port);
  enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder);
  bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe);
- void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc);
+ void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state);
  void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
                                       enum transcoder cpu_transcoder);
- void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc);
- void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc);
+ void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state);
+ void intel_ddi_disable_pipe_clock(const  struct intel_crtc_state *crtc_state);
  bool intel_ddi_pll_select(struct intel_crtc *crtc,
                          struct intel_crtc_state *crtc_state);
- void intel_ddi_set_pipe_settings(struct drm_crtc *crtc);
+ void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state);
  void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp);
  bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
  bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv,
@@@ -1209,11 -1244,12 +1245,12 @@@ intel_ddi_get_crtc_new_encoder(struct i
  void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder);
  void intel_ddi_clock_get(struct intel_encoder *encoder,
                         struct intel_crtc_state *pipe_config);
- void intel_ddi_set_vc_payload_alloc(struct drm_crtc *crtc, bool state);
+ void intel_ddi_set_vc_payload_alloc(const struct intel_crtc_state *crtc_state,
+                                   bool state);
  uint32_t ddi_signal_levels(struct intel_dp *intel_dp);
- struct intel_shared_dpll *intel_ddi_get_link_dpll(struct intel_dp *intel_dp,
-                                                 int clock);
- unsigned int intel_fb_align_height(struct drm_device *dev,
+ u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder);
+ unsigned int intel_fb_align_height(struct drm_i915_private *dev_priv,
                                   unsigned int height,
                                   uint32_t pixel_format,
                                   uint64_t fb_format_modifier);
@@@ -1228,15 -1264,25 +1265,27 @@@ void intel_audio_codec_enable(struct in
  void intel_audio_codec_disable(struct intel_encoder *encoder);
  void i915_audio_component_init(struct drm_i915_private *dev_priv);
  void i915_audio_component_cleanup(struct drm_i915_private *dev_priv);
 +void intel_audio_init(struct drm_i915_private *dev_priv);
 +void intel_audio_deinit(struct drm_i915_private *dev_priv);
  
+ /* intel_cdclk.c */
+ void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv);
+ void intel_update_max_cdclk(struct drm_i915_private *dev_priv);
+ void intel_update_cdclk(struct drm_i915_private *dev_priv);
+ void intel_update_rawclk(struct drm_i915_private *dev_priv);
+ bool intel_cdclk_state_compare(const struct intel_cdclk_state *a,
+                              const struct intel_cdclk_state *b);
+ void intel_set_cdclk(struct drm_i915_private *dev_priv,
+                    const struct intel_cdclk_state *cdclk_state);
  /* intel_display.c */
  enum transcoder intel_crtc_pch_transcoder(struct intel_crtc *crtc);
- void skl_set_preferred_cdclk_vco(struct drm_i915_private *dev_priv, int vco);
  void intel_update_rawclk(struct drm_i915_private *dev_priv);
+ int vlv_get_hpll_vco(struct drm_i915_private *dev_priv);
  int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
                      const char *name, u32 reg, int ref_freq);
+ int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
+                          const char *name, u32 reg);
  void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv);
  void lpt_disable_iclkip(struct drm_i915_private *dev_priv);
  extern const struct drm_plane_funcs intel_plane_funcs;
@@@ -1311,9 -1357,8 +1360,8 @@@ struct i915_vma 
  intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation);
  void intel_unpin_fb_vma(struct i915_vma *vma);
  struct drm_framebuffer *
- __intel_framebuffer_create(struct drm_device *dev,
-                          struct drm_mode_fb_cmd2 *mode_cmd,
-                          struct drm_i915_gem_object *obj);
+ intel_framebuffer_create(struct drm_i915_gem_object *obj,
+                        struct drm_mode_fb_cmd2 *mode_cmd);
  void intel_finish_page_flip_cs(struct drm_i915_private *dev_priv, int pipe);
  void intel_finish_page_flip_mmio(struct drm_i915_private *dev_priv, int pipe);
  void intel_check_page_flip(struct drm_i915_private *dev_priv, int pipe);
@@@ -1388,10 -1433,7 +1436,7 @@@ int chv_calc_dpll_params(int refclk, st
  bool intel_crtc_active(struct intel_crtc *crtc);
  void hsw_enable_ips(struct intel_crtc *crtc);
  void hsw_disable_ips(struct intel_crtc *crtc);
- enum intel_display_power_domain
- intel_display_port_power_domain(struct intel_encoder *intel_encoder);
- enum intel_display_power_domain
- intel_display_port_aux_power_domain(struct intel_encoder *intel_encoder);
+ enum intel_display_power_domain intel_port_to_power_domain(enum port port);
  void intel_mode_from_pipe_config(struct drm_display_mode *mode,
                                 struct intel_crtc_state *pipe_config);
  
@@@ -1664,6 -1706,7 +1709,7 @@@ int intel_power_domains_init(struct drm
  void intel_power_domains_fini(struct drm_i915_private *);
  void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume);
  void intel_power_domains_suspend(struct drm_i915_private *dev_priv);
+ void intel_power_domains_verify_state(struct drm_i915_private *dev_priv);
  void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume);
  void bxt_display_core_uninit(struct drm_i915_private *dev_priv);
  void intel_runtime_pm_enable(struct drm_i915_private *dev_priv);
@@@ -1692,10 -1735,8 +1738,8 @@@ static inline voi
  assert_rpm_wakelock_held(struct drm_i915_private *dev_priv)
  {
        assert_rpm_device_not_suspended(dev_priv);
-       /* FIXME: Needs to be converted back to WARN_ONCE, but currently causes
-        * too much noise. */
-       if (!atomic_read(&dev_priv->pm.wakeref_count))
-               DRM_DEBUG_DRIVER("RPM wakelock ref not held during HW access");
+       WARN_ONCE(!atomic_read(&dev_priv->pm.wakeref_count),
+                 "RPM wakelock ref not held during HW access");
  }
  
  /**
@@@ -1783,6 -1824,7 +1827,7 @@@ void skl_ddb_get_hw_state(struct drm_i9
                          struct skl_ddb_allocation *ddb /* out */);
  void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc,
                              struct skl_pipe_wm *out);
+ void vlv_wm_sanitize(struct drm_i915_private *dev_priv);
  bool intel_can_enable_sagv(struct drm_atomic_state *state);
  int intel_enable_sagv(struct drm_i915_private *dev_priv);
  int intel_disable_sagv(struct drm_i915_private *dev_priv);
@@@ -1791,7 -1833,6 +1836,6 @@@ bool skl_wm_level_equals(const struct s
  bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry **entries,
                                 const struct skl_ddb_entry *ddb,
                                 int ignore);
- uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config);
  bool ilk_disable_lp_wm(struct drm_device *dev);
  int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6);
  static inline int intel_enable_rc6(void)
@@@ -1865,9 -1906,9 +1909,9 @@@ intel_atomic_get_existing_plane_state(s
        return to_intel_plane_state(plane_state);
  }
  
- int intel_atomic_setup_scalers(struct drm_device *dev,
-       struct intel_crtc *intel_crtc,
-       struct intel_crtc_state *crtc_state);
+ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
+                              struct intel_crtc *intel_crtc,
+                              struct intel_crtc_state *crtc_state);
  
  /* intel_atomic_plane.c */
  struct intel_plane_state *intel_create_plane_state(struct drm_plane *plane);
@@@ -1891,6 -1932,7 +1935,6 @@@ void lspcon_wait_pcon_mode(struct intel
  
  /* intel_pipe_crc.c */
  int intel_pipe_crc_create(struct drm_minor *minor);
 -void intel_pipe_crc_cleanup(struct drm_minor *minor);
  #ifdef CONFIG_DEBUG_FS
  int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name,
                              size_t *values_cnt);
index 281c5c48a84d96a16998a79b0d2e1deff9a2194c,82993a87654c28ec6be0883698d1a8f67a5abac2..f7e9a4e6959534f867f4b796218e038e7e759760
  #include <drm/i915_drm.h>
  #include "i915_drv.h"
  
+ static void intel_fbdev_invalidate(struct intel_fbdev *ifbdev)
+ {
+       struct drm_i915_gem_object *obj = ifbdev->fb->obj;
+       unsigned int origin = ifbdev->vma->fence ? ORIGIN_GTT : ORIGIN_CPU;
+       intel_fb_obj_invalidate(obj, origin);
+ }
  static int intel_fbdev_set_par(struct fb_info *info)
  {
        struct drm_fb_helper *fb_helper = info->par;
        int ret;
  
        ret = drm_fb_helper_set_par(info);
-       if (ret == 0) {
-               mutex_lock(&fb_helper->dev->struct_mutex);
-               intel_fb_obj_invalidate(ifbdev->fb->obj, ORIGIN_GTT);
-               mutex_unlock(&fb_helper->dev->struct_mutex);
-       }
+       if (ret == 0)
+               intel_fbdev_invalidate(ifbdev);
  
        return ret;
  }
@@@ -71,12 -75,8 +75,8 @@@ static int intel_fbdev_blank(int blank
        int ret;
  
        ret = drm_fb_helper_blank(blank, info);
-       if (ret == 0) {
-               mutex_lock(&fb_helper->dev->struct_mutex);
-               intel_fb_obj_invalidate(ifbdev->fb->obj, ORIGIN_GTT);
-               mutex_unlock(&fb_helper->dev->struct_mutex);
-       }
+       if (ret == 0)
+               intel_fbdev_invalidate(ifbdev);
  
        return ret;
  }
@@@ -87,15 -87,11 +87,11 @@@ static int intel_fbdev_pan_display(stru
        struct drm_fb_helper *fb_helper = info->par;
        struct intel_fbdev *ifbdev =
                container_of(fb_helper, struct intel_fbdev, helper);
        int ret;
-       ret = drm_fb_helper_pan_display(var, info);
  
-       if (ret == 0) {
-               mutex_lock(&fb_helper->dev->struct_mutex);
-               intel_fb_obj_invalidate(ifbdev->fb->obj, ORIGIN_GTT);
-               mutex_unlock(&fb_helper->dev->struct_mutex);
-       }
+       ret = drm_fb_helper_pan_display(var, info);
+       if (ret == 0)
+               intel_fbdev_invalidate(ifbdev);
  
        return ret;
  }
@@@ -121,7 -117,7 +117,7 @@@ static int intelfb_alloc(struct drm_fb_
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct i915_ggtt *ggtt = &dev_priv->ggtt;
        struct drm_mode_fb_cmd2 mode_cmd = {};
-       struct drm_i915_gem_object *obj = NULL;
+       struct drm_i915_gem_object *obj;
        int size, ret;
  
        /* we don't do packed 24bpp */
        mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
                                                          sizes->surface_depth);
  
-       mutex_lock(&dev->struct_mutex);
        size = mode_cmd.pitches[0] * mode_cmd.height;
        size = PAGE_ALIGN(size);
  
        /* If the FB is too big, just don't use it since fbdev is not very
         * important and we should probably use that space with FBC or other
         * features. */
+       obj = NULL;
        if (size * 2 < ggtt->stolen_usable_size)
                obj = i915_gem_object_create_stolen(dev_priv, size);
        if (obj == NULL)
        if (IS_ERR(obj)) {
                DRM_ERROR("failed to allocate framebuffer\n");
                ret = PTR_ERR(obj);
-               goto out;
+               goto err;
        }
  
-       fb = __intel_framebuffer_create(dev, &mode_cmd, obj);
+       fb = intel_framebuffer_create(obj, &mode_cmd);
        if (IS_ERR(fb)) {
-               i915_gem_object_put(obj);
                ret = PTR_ERR(fb);
-               goto out;
+               goto err_obj;
        }
  
-       mutex_unlock(&dev->struct_mutex);
        ifbdev->fb = to_intel_framebuffer(fb);
  
        return 0;
  
- out:
-       mutex_unlock(&dev->struct_mutex);
+ err_obj:
+       i915_gem_object_put(obj);
+ err:
        return ret;
  }
  
@@@ -253,7 -246,7 +246,7 @@@ static int intelfb_create(struct drm_fb
        if (IS_ERR(vaddr)) {
                DRM_ERROR("Failed to remap framebuffer into virtual memory\n");
                ret = PTR_ERR(vaddr);
 -              goto out_destroy_fbi;
 +              goto out_unpin;
        }
        info->screen_base = vaddr;
        info->screen_size = vma->node.size;
        vga_switcheroo_client_fb_set(pdev, info);
        return 0;
  
 -out_destroy_fbi:
 -      drm_fb_helper_release_fbi(helper);
  out_unpin:
        intel_unpin_fb_vma(vma);
  out_unlock:
@@@ -355,23 -350,23 +348,23 @@@ static bool intel_fb_initial_config(str
                                    bool *enabled, int width, int height)
  {
        struct drm_i915_private *dev_priv = to_i915(fb_helper->dev);
-       unsigned long conn_configured, mask;
+       unsigned long conn_configured, conn_seq, mask;
        unsigned int count = min(fb_helper->connector_count, BITS_PER_LONG);
        int i, j;
        bool *save_enabled;
        bool fallback = true;
        int num_connectors_enabled = 0;
        int num_connectors_detected = 0;
-       int pass = 0;
  
        save_enabled = kcalloc(count, sizeof(bool), GFP_KERNEL);
        if (!save_enabled)
                return false;
  
        memcpy(save_enabled, enabled, count);
-       mask = BIT(count) - 1;
+       mask = GENMASK(count - 1, 0);
        conn_configured = 0;
  retry:
+       conn_seq = conn_configured;
        for (i = 0; i < count; i++) {
                struct drm_fb_helper_connector *fb_conn;
                struct drm_connector *connector;
                if (conn_configured & BIT(i))
                        continue;
  
-               if (pass == 0 && !connector->has_tile)
+               if (conn_seq == 0 && !connector->has_tile)
                        continue;
  
                if (connector->status == connector_status_connected)
                conn_configured |= BIT(i);
        }
  
-       if ((conn_configured & mask) != mask) {
-               pass++;
+       if ((conn_configured & mask) != mask && conn_configured != conn_seq)
                goto retry;
-       }
  
        /*
         * If the BIOS didn't enable everything it could, fall back to have the
@@@ -541,6 -534,7 +532,6 @@@ static void intel_fbdev_destroy(struct 
         */
  
        drm_fb_helper_unregister_fbi(&ifbdev->helper);
 -      drm_fb_helper_release_fbi(&ifbdev->helper);
  
        drm_fb_helper_fini(&ifbdev->helper);
  
@@@ -628,7 -622,7 +619,7 @@@ static bool intel_fbdev_init_bios(struc
                }
  
                cur_size = intel_crtc->config->base.adjusted_mode.crtc_vdisplay;
-               cur_size = intel_fb_align_height(dev, cur_size,
+               cur_size = intel_fb_align_height(to_i915(dev), cur_size,
                                                 fb->base.format->format,
                                                 fb->base.modifier);
                cur_size *= fb->base.pitches[0];
@@@ -838,11 -832,6 +829,6 @@@ void intel_fbdev_restore_mode(struct dr
        if (!ifbdev->fb)
                return;
  
-       if (drm_fb_helper_restore_fbdev_mode_unlocked(&ifbdev->helper)) {
-               DRM_DEBUG("failed to restore crtc mode\n");
-       } else {
-               mutex_lock(&dev->struct_mutex);
-               intel_fb_obj_invalidate(ifbdev->fb->obj, ORIGIN_GTT);
-               mutex_unlock(&dev->struct_mutex);
-       }
+       if (drm_fb_helper_restore_fbdev_mode_unlocked(&ifbdev->helper) == 0)
+               intel_fbdev_invalidate(ifbdev);
  }
index ebae2bd839189c07588e88a526f3f804d08157b3,048f76d329bb7723c28918a79e4582d122f97e63..c2184f755ec6e62f6b9c0971da0cf94569f05366
@@@ -36,7 -36,6 +36,7 @@@
  #include <drm/drm_edid.h>
  #include "intel_drv.h"
  #include <drm/i915_drm.h>
 +#include <drm/intel_lpe_audio.h>
  #include "i915_drv.h"
  
  static struct drm_device *intel_hdmi_to_dev(struct intel_hdmi *intel_hdmi)
@@@ -902,12 -901,11 +902,11 @@@ static bool intel_hdmi_get_hw_state(str
        struct drm_device *dev = encoder->base.dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
-       enum intel_display_power_domain power_domain;
        u32 tmp;
        bool ret;
  
-       power_domain = intel_display_port_power_domain(encoder);
-       if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+       if (!intel_display_power_get_if_enabled(dev_priv,
+                                               encoder->power_domain))
                return false;
  
        ret = false;
        ret = true;
  
  out:
-       intel_display_power_put(dev_priv, power_domain);
+       intel_display_power_put(dev_priv, encoder->power_domain);
  
        return ret;
  }
@@@ -1869,14 -1867,7 +1868,7 @@@ void intel_hdmi_init_connector(struct i
  
        switch (port) {
        case PORT_B:
-               /*
-                * On BXT A0/A1, sw needs to activate DDIA HPD logic and
-                * interrupts to check the external panel connection.
-                */
-               if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
-                       intel_encoder->hpd_pin = HPD_PORT_A;
-               else
-                       intel_encoder->hpd_pin = HPD_PORT_B;
+               intel_encoder->hpd_pin = HPD_PORT_B;
                break;
        case PORT_C:
                intel_encoder->hpd_pin = HPD_PORT_C;
@@@ -1988,6 -1979,7 +1980,7 @@@ void intel_hdmi_init(struct drm_i915_pr
        }
  
        intel_encoder->type = INTEL_OUTPUT_HDMI;
+       intel_encoder->power_domain = intel_port_to_power_domain(port);
        intel_encoder->port = port;
        if (IS_CHERRYVIEW(dev_priv)) {
                if (port == PORT_D)
index 5aa524e32df776970445ef5258ba5ca4628928d7,c782b7878288d21a026a1a60f793c06d65fb8f21..9fd9c70baeed826fb2088843cc53053a0ac9541a
@@@ -36,6 -36,31 +36,6 @@@ struct pipe_crc_info 
        enum pipe pipe;
  };
  
 -/* As the drm_debugfs_init() routines are called before dev->dev_private is
 - * allocated we need to hook into the minor for release.
 - */
 -static int drm_add_fake_info_node(struct drm_minor *minor,
 -                                struct dentry *ent, const void *key)
 -{
 -      struct drm_info_node *node;
 -
 -      node = kmalloc(sizeof(*node), GFP_KERNEL);
 -      if (node == NULL) {
 -              debugfs_remove(ent);
 -              return -ENOMEM;
 -      }
 -
 -      node->minor = minor;
 -      node->dent = ent;
 -      node->info_ent = (void *) key;
 -
 -      mutex_lock(&minor->debugfs_lock);
 -      list_add(&node->list, &minor->debugfs_list);
 -      mutex_unlock(&minor->debugfs_lock);
 -
 -      return 0;
 -}
 -
  static int i915_pipe_crc_open(struct inode *inode, struct file *filep)
  {
        struct pipe_crc_info *info = inode->i_private;
@@@ -80,7 -105,7 +80,7 @@@ static int i915_pipe_crc_release(struc
  
  static int pipe_crc_data_count(struct intel_pipe_crc *pipe_crc)
  {
-       assert_spin_locked(&pipe_crc->lock);
+       lockdep_assert_held(&pipe_crc->lock);
        return CIRC_CNT(pipe_crc->head, pipe_crc->tail,
                        INTEL_PIPE_CRC_ENTRIES_NR);
  }
@@@ -184,6 -209,22 +184,6 @@@ static struct pipe_crc_info i915_pipe_c
        },
  };
  
 -static int i915_pipe_crc_create(struct dentry *root, struct drm_minor *minor,
 -                              enum pipe pipe)
 -{
 -      struct drm_i915_private *dev_priv = to_i915(minor->dev);
 -      struct dentry *ent;
 -      struct pipe_crc_info *info = &i915_pipe_crc_data[pipe];
 -
 -      info->dev_priv = dev_priv;
 -      ent = debugfs_create_file(info->name, S_IRUGO, root, info,
 -                                &i915_pipe_crc_fops);
 -      if (!ent)
 -              return -ENOMEM;
 -
 -      return drm_add_fake_info_node(minor, ent, info);
 -}
 -
  static const char * const pipe_crc_sources[] = {
        "none",
        "plane1",
@@@ -887,22 -928,27 +887,22 @@@ void intel_display_crc_init(struct drm_
  
  int intel_pipe_crc_create(struct drm_minor *minor)
  {
 -      int ret, i;
 -
 -      for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) {
 -              ret = i915_pipe_crc_create(minor->debugfs_root, minor, i);
 -              if (ret)
 -                      return ret;
 -      }
 -
 -      return 0;
 -}
 -
 -void intel_pipe_crc_cleanup(struct drm_minor *minor)
 -{
 +      struct drm_i915_private *dev_priv = to_i915(minor->dev);
 +      struct dentry *ent;
        int i;
  
        for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) {
 -              struct drm_info_list *info_list =
 -                      (struct drm_info_list *)&i915_pipe_crc_data[i];
 +              struct pipe_crc_info *info = &i915_pipe_crc_data[i];
  
 -              drm_debugfs_remove_files(info_list, 1, minor);
 +              info->dev_priv = dev_priv;
 +              ent = debugfs_create_file(info->name, S_IRUGO,
 +                                        minor->debugfs_root, info,
 +                                        &i915_pipe_crc_fops);
 +              if (!ent)
 +                      return -ENOMEM;
        }
 +
 +      return 0;
  }
  
  int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name,