]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/commitdiff
Merge remote-tracking branch 'airlied/drm-prime-vmap' into drm-intel-next-queued
authorDaniel Vetter <daniel.vetter@ffwll.ch>
Fri, 1 Jun 2012 08:49:16 +0000 (10:49 +0200)
committerDaniel Vetter <daniel.vetter@ffwll.ch>
Fri, 1 Jun 2012 08:52:54 +0000 (10:52 +0200)
We need the latest dma-buf code from Dave Airlie so that we can pimp
the backing storage handling code in drm/i915 with Chris Wilson's
unbound tracking and stolen mem backed gem object code.

Signed-Off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
1  2 
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_sdvo.c

index 470c73219e6b1c0f13012c7d37bf9b6d22b2f9f4,c9cfc67c2cf58acdf7871a6e81fda66d3c45dedb..ccabadd2b6c39ac6b613f394ef70e1ffa1a493f5
@@@ -656,8 -656,6 +656,8 @@@ typedef struct drm_i915_private 
                /** PPGTT used for aliasing the PPGTT with the GTT */
                struct i915_hw_ppgtt *aliasing_ppgtt;
  
 +              u32 *l3_remap_info;
 +
                struct shrinker inactive_shrinker;
  
                /**
  
        struct drm_property *broadcast_rgb_property;
        struct drm_property *force_audio_property;
 +
 +      struct work_struct parity_error_work;
  } drm_i915_private_t;
  
  /* Iterate over initialised rings */
@@@ -944,6 -940,11 +944,11 @@@ struct drm_i915_gem_object 
        struct scatterlist *sg_list;
        int num_sg;
  
+       /* prime dma-buf support */
+       struct sg_table *sg_table;
+       void *dma_buf_vmapping;
+       int vmapping_count;
        /**
         * Used for performing relocations during execbuffer insertion.
         */
@@@ -1233,8 -1234,6 +1238,8 @@@ int i915_gem_get_tiling(struct drm_devi
                        struct drm_file *file_priv);
  int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
                                struct drm_file *file_priv);
 +int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
 +                      struct drm_file *file_priv);
  void i915_gem_load(struct drm_device *dev);
  int i915_gem_init_object(struct drm_gem_object *obj);
  int __must_check i915_gem_flush_ring(struct intel_ring_buffer *ring,
@@@ -1251,6 -1250,8 +1256,8 @@@ int __must_check i915_gem_object_unbind
  void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
  void i915_gem_lastclose(struct drm_device *dev);
  
+ int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
+                                 gfp_t gfpmask);
  int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
  int __must_check i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj);
  int i915_gem_object_sync(struct drm_i915_gem_object *obj,
@@@ -1311,7 -1312,6 +1318,7 @@@ int __must_check i915_gem_object_set_do
  int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
  int __must_check i915_gem_init(struct drm_device *dev);
  int __must_check i915_gem_init_hw(struct drm_device *dev);
 +void i915_gem_l3_remap(struct drm_device *dev);
  void i915_gem_init_swizzling(struct drm_device *dev);
  void i915_gem_init_ppgtt(struct drm_device *dev);
  void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
@@@ -1320,8 -1320,8 +1327,8 @@@ int __must_check i915_gem_idle(struct d
  int __must_check i915_add_request(struct intel_ring_buffer *ring,
                                  struct drm_file *file,
                                  struct drm_i915_gem_request *request);
 -int __must_check i915_wait_request(struct intel_ring_buffer *ring,
 -                                 uint32_t seqno);
 +int __must_check i915_wait_seqno(struct intel_ring_buffer *ring,
 +                               uint32_t seqno);
  int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
  int __must_check
  i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
@@@ -1349,6 -1349,13 +1356,13 @@@ i915_gem_get_unfenced_gtt_alignment(str
  int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
                                    enum i915_cache_level cache_level);
  
+ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
+                               struct dma_buf *dma_buf);
+ struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
+                               struct drm_gem_object *gem_obj, int flags);
  /* i915_gem_gtt.c */
  int __must_check i915_gem_init_aliasing_ppgtt(struct drm_device *dev);
  void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev);
index 1c08e0900eff2372ae03288f9e27b16831aa9bc9,288d7b8f49ae48858a30c6ad1f9f7ce1d6d6e600..a20ac438b8ef3945b791e82bebc32b6013dc59eb
@@@ -35,6 -35,7 +35,7 @@@
  #include <linux/slab.h>
  #include <linux/swap.h>
  #include <linux/pci.h>
+ #include <linux/dma-buf.h>
  
  static __must_check int i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj);
  static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
@@@ -538,6 -539,14 +539,14 @@@ i915_gem_pread_ioctl(struct drm_device 
                goto out;
        }
  
+       /* prime objects have no backing filp to GEM pread/pwrite
+        * pages from.
+        */
+       if (!obj->base.filp) {
+               ret = -EINVAL;
+               goto out;
+       }
        trace_i915_gem_object_pread(obj, args->offset, args->size);
  
        ret = i915_gem_shmem_pread(dev, obj, args, file);
@@@ -880,6 -889,14 +889,14 @@@ i915_gem_pwrite_ioctl(struct drm_devic
                goto out;
        }
  
+       /* prime objects have no backing filp to GEM pread/pwrite
+        * pages from.
+        */
+       if (!obj->base.filp) {
+               ret = -EINVAL;
+               goto out;
+       }
        trace_i915_gem_object_pwrite(obj, args->offset, args->size);
  
        ret = -EFAULT;
@@@ -1021,6 -1038,14 +1038,14 @@@ i915_gem_mmap_ioctl(struct drm_device *
        if (obj == NULL)
                return -ENOENT;
  
+       /* prime objects have no backing filp to GEM mmap
+        * pages from.
+        */
+       if (!obj->filp) {
+               drm_gem_object_unreference_unlocked(obj);
+               return -EINVAL;
+       }
        addr = vm_mmap(obj->filp, 0, args->size,
                       PROT_READ | PROT_WRITE, MAP_SHARED,
                       args->offset);
@@@ -1302,8 -1327,7 +1327,7 @@@ i915_gem_mmap_gtt_ioctl(struct drm_devi
        return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
  }
  
- static int
+ int
  i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
                              gfp_t gfpmask)
  {
        struct inode *inode;
        struct page *page;
  
+       if (obj->pages || obj->sg_table)
+               return 0;
        /* Get the list of pages out of our struct file.  They'll be pinned
         * at this point until we release them.
         */
@@@ -1353,6 -1380,9 +1380,9 @@@ i915_gem_object_put_pages_gtt(struct dr
        int page_count = obj->base.size / PAGE_SIZE;
        int i;
  
+       if (!obj->pages)
+               return;
        BUG_ON(obj->madv == __I915_MADV_PURGED);
  
        if (i915_gem_object_needs_bit17_swizzle(obj))
@@@ -1869,82 -1899,34 +1899,82 @@@ i915_gem_check_olr(struct intel_ring_bu
        return ret;
  }
  
 +/**
 + * __wait_seqno - wait until execution of seqno has finished
 + * @ring: the ring expected to report seqno
 + * @seqno: duh!
 + * @interruptible: do an interruptible wait (normally yes)
 + * @timeout: in - how long to wait (NULL forever); out - how much time remaining
 + *
 + * Returns 0 if the seqno was found within the alloted time. Else returns the
 + * errno with remaining time filled in timeout argument.
 + */
  static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
 -                      bool interruptible)
 +                      bool interruptible, struct timespec *timeout)
  {
        drm_i915_private_t *dev_priv = ring->dev->dev_private;
 -      int ret = 0;
 +      struct timespec before, now, wait_time={1,0};
 +      unsigned long timeout_jiffies;
 +      long end;
 +      bool wait_forever = true;
  
        if (i915_seqno_passed(ring->get_seqno(ring), seqno))
                return 0;
  
        trace_i915_gem_request_wait_begin(ring, seqno);
 +
 +      if (timeout != NULL) {
 +              wait_time = *timeout;
 +              wait_forever = false;
 +      }
 +
 +      timeout_jiffies = timespec_to_jiffies(&wait_time);
 +
        if (WARN_ON(!ring->irq_get(ring)))
                return -ENODEV;
  
 +      /* Record current time in case interrupted by signal, or wedged * */
 +      getrawmonotonic(&before);
 +
  #define EXIT_COND \
        (i915_seqno_passed(ring->get_seqno(ring), seqno) || \
        atomic_read(&dev_priv->mm.wedged))
 +      do {
 +              if (interruptible)
 +                      end = wait_event_interruptible_timeout(ring->irq_queue,
 +                                                             EXIT_COND,
 +                                                             timeout_jiffies);
 +              else
 +                      end = wait_event_timeout(ring->irq_queue, EXIT_COND,
 +                                               timeout_jiffies);
  
 -      if (interruptible)
 -              ret = wait_event_interruptible(ring->irq_queue,
 -                                             EXIT_COND);
 -      else
 -              wait_event(ring->irq_queue, EXIT_COND);
 +              if (atomic_read(&dev_priv->mm.wedged))
 +                      end = -EAGAIN;
 +      } while (end == 0 && wait_forever);
 +
 +      getrawmonotonic(&now);
  
        ring->irq_put(ring);
        trace_i915_gem_request_wait_end(ring, seqno);
  #undef EXIT_COND
  
 -      return ret;
 +      if (timeout) {
 +              struct timespec sleep_time = timespec_sub(now, before);
 +              *timeout = timespec_sub(*timeout, sleep_time);
 +      }
 +
 +      switch (end) {
 +      case -EAGAIN: /* Wedged */
 +      case -ERESTARTSYS: /* Signal */
 +              return (int)end;
 +      case 0: /* Timeout */
 +              if (timeout)
 +                      set_normalized_timespec(timeout, 0, 0);
 +              return -ETIME;
 +      default: /* Completed */
 +              WARN_ON(end < 0); /* We're not aware of other errors */
 +              return 0;
 +      }
  }
  
  /**
   * request and object lists appropriately for that event.
   */
  int
 -i915_wait_request(struct intel_ring_buffer *ring,
 -                uint32_t seqno)
 +i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
  {
        drm_i915_private_t *dev_priv = ring->dev->dev_private;
        int ret = 0;
        if (ret)
                return ret;
  
 -      ret = __wait_seqno(ring, seqno, dev_priv->mm.interruptible);
 -      if (atomic_read(&dev_priv->mm.wedged))
 -              ret = -EAGAIN;
 +      ret = __wait_seqno(ring, seqno, dev_priv->mm.interruptible, NULL);
  
        return ret;
  }
@@@ -1990,7 -1975,7 +2020,7 @@@ i915_gem_object_wait_rendering(struct d
         * it.
         */
        if (obj->active) {
 -              ret = i915_wait_request(obj->ring, obj->last_rendering_seqno);
 +              ret = i915_wait_seqno(obj->ring, obj->last_rendering_seqno);
                if (ret)
                        return ret;
                i915_gem_retire_requests_ring(obj->ring);
        return 0;
  }
  
 +/**
 + * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
 + * @DRM_IOCTL_ARGS: standard ioctl arguments
 + *
 + * Returns 0 if successful, else an error is returned with the remaining time in
 + * the timeout parameter.
 + *  -ETIME: object is still busy after timeout
 + *  -ERESTARTSYS: signal interrupted the wait
 + *  -ENONENT: object doesn't exist
 + * Also possible, but rare:
 + *  -EAGAIN: GPU wedged
 + *  -ENOMEM: damn
 + *  -ENODEV: Internal IRQ fail
 + *  -E?: The add request failed
 + *
 + * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
 + * non-zero timeout parameter the wait ioctl will wait for the given number of
 + * nanoseconds on an object becoming unbusy. Since the wait itself does so
 + * without holding struct_mutex the object may become re-busied before this
 + * function completes. A similar but shorter * race condition exists in the busy
 + * ioctl
 + */
 +int
 +i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
 +{
 +      struct drm_i915_gem_wait *args = data;
 +      struct drm_i915_gem_object *obj;
 +      struct intel_ring_buffer *ring = NULL;
 +      struct timespec timeout;
 +      u32 seqno = 0;
 +      int ret = 0;
 +
 +      timeout = ns_to_timespec(args->timeout_ns);
 +
 +      ret = i915_mutex_lock_interruptible(dev);
 +      if (ret)
 +              return ret;
 +
 +      obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->bo_handle));
 +      if (&obj->base == NULL) {
 +              mutex_unlock(&dev->struct_mutex);
 +              return -ENOENT;
 +      }
 +
 +      /* Need to make sure the object is flushed first. This non-obvious
 +       * flush is required to enforce that (active && !olr) == no wait
 +       * necessary.
 +       */
 +      ret = i915_gem_object_flush_gpu_write_domain(obj);
 +      if (ret)
 +              goto out;
 +
 +      if (obj->active) {
 +              seqno = obj->last_rendering_seqno;
 +              ring = obj->ring;
 +      }
 +
 +      if (seqno == 0)
 +               goto out;
 +
 +      ret = i915_gem_check_olr(ring, seqno);
 +      if (ret)
 +              goto out;
 +
 +      /* Do this after OLR check to make sure we make forward progress polling
 +       * on this IOCTL with a 0 timeout (like busy ioctl)
 +       */
 +      if (!args->timeout_ns) {
 +              ret = -ETIME;
 +              goto out;
 +      }
 +
 +      drm_gem_object_unreference(&obj->base);
 +      mutex_unlock(&dev->struct_mutex);
 +
 +      ret = __wait_seqno(ring, seqno, true, &timeout);
 +      WARN_ON(!timespec_valid(&timeout));
 +      args->timeout_ns = timespec_to_ns(&timeout);
 +      return ret;
 +
 +out:
 +      drm_gem_object_unreference(&obj->base);
 +      mutex_unlock(&dev->struct_mutex);
 +      return ret;
 +}
 +
  /**
   * i915_gem_object_sync - sync an object to a ring.
   *
@@@ -2164,10 -2063,8 +2194,8 @@@ i915_gem_object_unbind(struct drm_i915_
        if (obj->gtt_space == NULL)
                return 0;
  
-       if (obj->pin_count != 0) {
-               DRM_ERROR("Attempting to unbind pinned buffer\n");
-               return -EINVAL;
-       }
+       if (obj->pin_count)
+               return -EBUSY;
  
        ret = i915_gem_object_finish_gpu(obj);
        if (ret)
@@@ -2263,7 -2160,7 +2291,7 @@@ static int i915_ring_idle(struct intel_
                        return ret;
        }
  
 -      return i915_wait_request(ring, i915_gem_next_request_seqno(ring));
 +      return i915_wait_seqno(ring, i915_gem_next_request_seqno(ring));
  }
  
  int i915_gpu_idle(struct drm_device *dev)
@@@ -2467,7 -2364,7 +2495,7 @@@ i915_gem_object_flush_fence(struct drm_
        }
  
        if (obj->last_fenced_seqno) {
 -              ret = i915_wait_request(obj->ring, obj->last_fenced_seqno);
 +              ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
                if (ret)
                        return ret;
  
@@@ -3133,7 -3030,7 +3161,7 @@@ i915_gem_ring_throttle(struct drm_devic
        if (seqno == 0)
                return 0;
  
 -      ret = __wait_seqno(ring, seqno, true);
 +      ret = __wait_seqno(ring, seqno, true, NULL);
        if (ret == 0)
                queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
  
@@@ -3394,6 -3291,7 +3422,7 @@@ struct drm_i915_gem_object *i915_gem_al
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_object *obj;
        struct address_space *mapping;
+       u32 mask;
  
        obj = kzalloc(sizeof(*obj), GFP_KERNEL);
        if (obj == NULL)
                return NULL;
        }
  
+       mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
+       if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
+               /* 965gm cannot relocate objects above 4GiB. */
+               mask &= ~__GFP_HIGHMEM;
+               mask |= __GFP_DMA32;
+       }
        mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
-       mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE);
+       mapping_set_gfp_mask(mapping, mask);
  
        i915_gem_info_add_obj(dev_priv, size);
  
@@@ -3458,6 -3363,9 +3494,9 @@@ void i915_gem_free_object(struct drm_ge
  
        trace_i915_gem_object_destroy(obj);
  
+       if (gem_obj->import_attach)
+               drm_prime_gem_destroy(gem_obj, obj->sg_table);
        if (obj->phys_obj)
                i915_gem_detach_phys_object(dev, obj);
  
@@@ -3527,38 -3435,6 +3566,38 @@@ i915_gem_idle(struct drm_device *dev
        return 0;
  }
  
 +void i915_gem_l3_remap(struct drm_device *dev)
 +{
 +      drm_i915_private_t *dev_priv = dev->dev_private;
 +      u32 misccpctl;
 +      int i;
 +
 +      if (!IS_IVYBRIDGE(dev))
 +              return;
 +
 +      if (!dev_priv->mm.l3_remap_info)
 +              return;
 +
 +      misccpctl = I915_READ(GEN7_MISCCPCTL);
 +      I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
 +      POSTING_READ(GEN7_MISCCPCTL);
 +
 +      for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
 +              u32 remap = I915_READ(GEN7_L3LOG_BASE + i);
 +              if (remap && remap != dev_priv->mm.l3_remap_info[i/4])
 +                      DRM_DEBUG("0x%x was already programmed to %x\n",
 +                                GEN7_L3LOG_BASE + i, remap);
 +              if (remap && !dev_priv->mm.l3_remap_info[i/4])
 +                      DRM_DEBUG_DRIVER("Clearing remapped register\n");
 +              I915_WRITE(GEN7_L3LOG_BASE + i, dev_priv->mm.l3_remap_info[i/4]);
 +      }
 +
 +      /* Make sure all the writes land before disabling dop clock gating */
 +      POSTING_READ(GEN7_L3LOG_BASE);
 +
 +      I915_WRITE(GEN7_MISCCPCTL, misccpctl);
 +}
 +
  void i915_gem_init_swizzling(struct drm_device *dev)
  {
        drm_i915_private_t *dev_priv = dev->dev_private;
@@@ -3648,8 -3524,6 +3687,8 @@@ i915_gem_init_hw(struct drm_device *dev
        drm_i915_private_t *dev_priv = dev->dev_private;
        int ret;
  
 +      i915_gem_l3_remap(dev);
 +
        i915_gem_init_swizzling(dev);
  
        ret = intel_init_render_ring_buffer(dev);
index 6553dcc2ca7934eee4ecad01972d6874a732e48c,1417660a93ec00a0a8a24cc797acc7b7754db063..0e876646d769c6cc5c7a1e03b054dd7770f74123
@@@ -350,8 -350,8 +350,8 @@@ static void gen6_pm_rps_work(struct wor
  {
        drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
                                                    rps_work);
-       u8 new_delay = dev_priv->cur_delay;
        u32 pm_iir, pm_imr;
+       u8 new_delay;
  
        spin_lock_irq(&dev_priv->rps_lock);
        pm_iir = dev_priv->pm_iir;
        I915_WRITE(GEN6_PMIMR, 0);
        spin_unlock_irq(&dev_priv->rps_lock);
  
-       if (!pm_iir)
+       if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0)
                return;
  
        mutex_lock(&dev_priv->dev->struct_mutex);
-       if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
-               if (dev_priv->cur_delay != dev_priv->max_delay)
-                       new_delay = dev_priv->cur_delay + 1;
-               if (new_delay > dev_priv->max_delay)
-                       new_delay = dev_priv->max_delay;
-       } else if (pm_iir & (GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT)) {
-               gen6_gt_force_wake_get(dev_priv);
-               if (dev_priv->cur_delay != dev_priv->min_delay)
-                       new_delay = dev_priv->cur_delay - 1;
-               if (new_delay < dev_priv->min_delay) {
-                       new_delay = dev_priv->min_delay;
-                       I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
-                                  I915_READ(GEN6_RP_INTERRUPT_LIMITS) |
-                                  ((new_delay << 16) & 0x3f0000));
-               } else {
-                       /* Make sure we continue to get down interrupts
-                        * until we hit the minimum frequency */
-                       I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
-                                  I915_READ(GEN6_RP_INTERRUPT_LIMITS) & ~0x3f0000);
-               }
-               gen6_gt_force_wake_put(dev_priv);
-       }
+       if (pm_iir & GEN6_PM_RP_UP_THRESHOLD)
+               new_delay = dev_priv->cur_delay + 1;
+       else
+               new_delay = dev_priv->cur_delay - 1;
  
        gen6_set_rps(dev_priv->dev, new_delay);
-       dev_priv->cur_delay = new_delay;
  
-       /*
-        * rps_lock not held here because clearing is non-destructive. There is
-        * an *extremely* unlikely race with gen6_rps_enable() that is prevented
-        * by holding struct_mutex for the duration of the write.
-        */
        mutex_unlock(&dev_priv->dev->struct_mutex);
  }
  
 +
 +/**
 + * ivybridge_parity_work - Workqueue called when a parity error interrupt
 + * occurred.
 + * @work: workqueue struct
 + *
 + * Doesn't actually do anything except notify userspace. As a consequence of
 + * this event, userspace should try to remap the bad rows since statistically
 + * it is likely the same row is more likely to go bad again.
 + */
 +static void ivybridge_parity_work(struct work_struct *work)
 +{
 +      drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
 +                                                  parity_error_work);
 +      u32 error_status, row, bank, subbank;
 +      char *parity_event[5];
 +      uint32_t misccpctl;
 +      unsigned long flags;
 +
 +      /* We must turn off DOP level clock gating to access the L3 registers.
 +       * In order to prevent a get/put style interface, acquire struct mutex
 +       * any time we access those registers.
 +       */
 +      mutex_lock(&dev_priv->dev->struct_mutex);
 +
 +      misccpctl = I915_READ(GEN7_MISCCPCTL);
 +      I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
 +      POSTING_READ(GEN7_MISCCPCTL);
 +
 +      error_status = I915_READ(GEN7_L3CDERRST1);
 +      row = GEN7_PARITY_ERROR_ROW(error_status);
 +      bank = GEN7_PARITY_ERROR_BANK(error_status);
 +      subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
 +
 +      I915_WRITE(GEN7_L3CDERRST1, GEN7_PARITY_ERROR_VALID |
 +                                  GEN7_L3CDERRST1_ENABLE);
 +      POSTING_READ(GEN7_L3CDERRST1);
 +
 +      I915_WRITE(GEN7_MISCCPCTL, misccpctl);
 +
 +      spin_lock_irqsave(&dev_priv->irq_lock, flags);
 +      dev_priv->gt_irq_mask &= ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
 +      I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
 +      spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 +
 +      mutex_unlock(&dev_priv->dev->struct_mutex);
 +
 +      parity_event[0] = "L3_PARITY_ERROR=1";
 +      parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
 +      parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
 +      parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
 +      parity_event[4] = NULL;
 +
 +      kobject_uevent_env(&dev_priv->dev->primary->kdev.kobj,
 +                         KOBJ_CHANGE, parity_event);
 +
 +      DRM_DEBUG("Parity error: Row = %d, Bank = %d, Sub bank = %d.\n",
 +                row, bank, subbank);
 +
 +      kfree(parity_event[3]);
 +      kfree(parity_event[2]);
 +      kfree(parity_event[1]);
 +}
 +
 +static void ivybridge_handle_parity_error(struct drm_device *dev)
 +{
 +      drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 +      unsigned long flags;
 +
 +      if (!IS_IVYBRIDGE(dev))
 +              return;
 +
 +      spin_lock_irqsave(&dev_priv->irq_lock, flags);
 +      dev_priv->gt_irq_mask |= GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
 +      I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
 +      spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 +
 +      queue_work(dev_priv->wq, &dev_priv->parity_error_work);
 +}
 +
  static void snb_gt_irq_handler(struct drm_device *dev,
                               struct drm_i915_private *dev_priv,
                               u32 gt_iir)
                DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir);
                i915_handle_error(dev, false);
        }
 +
 +      if (gt_iir & GT_GEN7_L3_PARITY_ERROR_INTERRUPT)
 +              ivybridge_handle_parity_error(dev);
  }
  
  static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
@@@ -1723,6 -1617,7 +1700,6 @@@ static void ironlake_irq_preinstall(str
  
        atomic_set(&dev_priv->irq_received, 0);
  
 -
        I915_WRITE(HWSTAM, 0xeffe);
  
        /* XXX hotplug from PCH */
@@@ -1885,13 -1780,13 +1862,13 @@@ static int ivybridge_irq_postinstall(st
                   DE_PIPEA_VBLANK_IVB);
        POSTING_READ(DEIER);
  
 -      dev_priv->gt_irq_mask = ~0;
 +      dev_priv->gt_irq_mask = ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
  
        I915_WRITE(GTIIR, I915_READ(GTIIR));
        I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
  
        render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT |
 -              GEN6_BLITTER_USER_INTERRUPT;
 +              GEN6_BLITTER_USER_INTERRUPT | GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
        I915_WRITE(GTIER, render_irqs);
        POSTING_READ(GTIER);
  
@@@ -2240,9 -2135,9 +2217,9 @@@ static int i915_irq_postinstall(struct 
                        hotplug_en |= HDMIC_HOTPLUG_INT_EN;
                if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
                        hotplug_en |= HDMID_HOTPLUG_INT_EN;
 -              if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS)
 +              if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915)
                        hotplug_en |= SDVOC_HOTPLUG_INT_EN;
 -              if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS)
 +              if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915)
                        hotplug_en |= SDVOB_HOTPLUG_INT_EN;
                if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
                        hotplug_en |= CRT_HOTPLUG_INT_EN;
@@@ -2402,8 -2297,10 +2379,8 @@@ static void i965_irq_preinstall(struct 
  
        atomic_set(&dev_priv->irq_received, 0);
  
 -      if (I915_HAS_HOTPLUG(dev)) {
 -              I915_WRITE(PORT_HOTPLUG_EN, 0);
 -              I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
 -      }
 +      I915_WRITE(PORT_HOTPLUG_EN, 0);
 +      I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
  
        I915_WRITE(HWSTAM, 0xeffe);
        for_each_pipe(pipe)
  static int i965_irq_postinstall(struct drm_device *dev)
  {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 +      u32 hotplug_en;
        u32 enable_mask;
        u32 error_mask;
  
        /* Unmask the interrupts that we always want on. */
        dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
 +                             I915_DISPLAY_PORT_INTERRUPT |
                               I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
                               I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
                               I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
        dev_priv->pipestat[0] = 0;
        dev_priv->pipestat[1] = 0;
  
 -      if (I915_HAS_HOTPLUG(dev)) {
 -              /* Enable in IER... */
 -              enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
 -              /* and unmask in IMR */
 -              dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
 -      }
 -
        /*
         * Enable some error detection, note the instruction error mask
         * bit is reserved, so we leave it masked.
        I915_WRITE(IER, enable_mask);
        POSTING_READ(IER);
  
 -      if (I915_HAS_HOTPLUG(dev)) {
 -              u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
 -
 -              /* Note HDMI and DP share bits */
 -              if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
 -                      hotplug_en |= HDMIB_HOTPLUG_INT_EN;
 -              if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
 -                      hotplug_en |= HDMIC_HOTPLUG_INT_EN;
 -              if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
 -                      hotplug_en |= HDMID_HOTPLUG_INT_EN;
 -              if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS)
 +      /* Note HDMI and DP share hotplug bits */
 +      hotplug_en = 0;
 +      if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
 +              hotplug_en |= HDMIB_HOTPLUG_INT_EN;
 +      if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
 +              hotplug_en |= HDMIC_HOTPLUG_INT_EN;
 +      if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
 +              hotplug_en |= HDMID_HOTPLUG_INT_EN;
 +      if (IS_G4X(dev)) {
 +              if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_G4X)
                        hotplug_en |= SDVOC_HOTPLUG_INT_EN;
 -              if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS)
 +              if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_G4X)
                        hotplug_en |= SDVOB_HOTPLUG_INT_EN;
 -              if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
 -                      hotplug_en |= CRT_HOTPLUG_INT_EN;
 +      } else {
 +              if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I965)
 +                      hotplug_en |= SDVOC_HOTPLUG_INT_EN;
 +              if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I965)
 +                      hotplug_en |= SDVOB_HOTPLUG_INT_EN;
 +      }
 +      if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
 +              hotplug_en |= CRT_HOTPLUG_INT_EN;
  
 -                      /* Programming the CRT detection parameters tends
 -                         to generate a spurious hotplug event about three
 -                         seconds later.  So just do it once.
 -                      */
 -                      if (IS_G4X(dev))
 -                              hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
 -                      hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
 -              }
 +              /* Programming the CRT detection parameters tends
 +                 to generate a spurious hotplug event about three
 +                 seconds later.  So just do it once.
 +                 */
 +              if (IS_G4X(dev))
 +                      hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
 +              hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
 +      }
  
 -              /* Ignore TV since it's buggy */
 +      /* Ignore TV since it's buggy */
  
 -              I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
 -      }
 +      I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
  
        intel_opregion_enable_asle(dev);
  
@@@ -2548,7 -2446,8 +2525,7 @@@ static irqreturn_t i965_irq_handler(DRM
                ret = IRQ_HANDLED;
  
                /* Consume port.  Then clear IIR or we'll miss events */
 -              if ((I915_HAS_HOTPLUG(dev)) &&
 -                  (iir & I915_DISPLAY_PORT_INTERRUPT)) {
 +              if (iir & I915_DISPLAY_PORT_INTERRUPT) {
                        u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
  
                        DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
@@@ -2621,8 -2520,10 +2598,8 @@@ static void i965_irq_uninstall(struct d
        if (!dev_priv)
                return;
  
 -      if (I915_HAS_HOTPLUG(dev)) {
 -              I915_WRITE(PORT_HOTPLUG_EN, 0);
 -              I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
 -      }
 +      I915_WRITE(PORT_HOTPLUG_EN, 0);
 +      I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
  
        I915_WRITE(HWSTAM, 0xffffffff);
        for_each_pipe(pipe)
@@@ -2643,7 -2544,6 +2620,7 @@@ void intel_irq_init(struct drm_device *
        INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
        INIT_WORK(&dev_priv->error_work, i915_error_work_func);
        INIT_WORK(&dev_priv->rps_work, gen6_pm_rps_work);
 +      INIT_WORK(&dev_priv->parity_error_work, ivybridge_parity_work);
  
        dev->driver->get_vblank_counter = i915_get_vblank_counter;
        dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
index 1d801724c1dba409fe1393174cce6dffbd1a89b6,9147894209061dd69c12ddc9bde72f904ebae3ac..9f5148acf73c650d9dcd60313ce94eeee096fb73
@@@ -910,9 -910,10 +910,10 @@@ static void assert_pll(struct drm_i915_
  
  /* For ILK+ */
  static void assert_pch_pll(struct drm_i915_private *dev_priv,
-                          struct intel_crtc *intel_crtc, bool state)
+                          struct intel_pch_pll *pll,
+                          struct intel_crtc *crtc,
+                          bool state)
  {
-       int reg;
        u32 val;
        bool cur_state;
  
                return;
        }
  
-       if (!intel_crtc->pch_pll) {
-               WARN(1, "asserting PCH PLL enabled with no PLL\n");
+       if (WARN (!pll,
+                 "asserting PCH PLL %s with no PLL\n", state_string(state)))
                return;
-       }
  
-       if (HAS_PCH_CPT(dev_priv->dev)) {
+       val = I915_READ(pll->pll_reg);
+       cur_state = !!(val & DPLL_VCO_ENABLE);
+       WARN(cur_state != state,
+            "PCH PLL state for reg %x assertion failure (expected %s, current %s), val=%08x\n",
+            pll->pll_reg, state_string(state), state_string(cur_state), val);
+       /* Make sure the selected PLL is correctly attached to the transcoder */
+       if (crtc && HAS_PCH_CPT(dev_priv->dev)) {
                u32 pch_dpll;
  
                pch_dpll = I915_READ(PCH_DPLL_SEL);
-               /* Make sure the selected PLL is enabled to the transcoder */
-               WARN(!((pch_dpll >> (4 * intel_crtc->pipe)) & 8),
-                    "transcoder %d PLL not enabled\n", intel_crtc->pipe);
+               cur_state = pll->pll_reg == _PCH_DPLL_B;
+               if (!WARN(((pch_dpll >> (4 * crtc->pipe)) & 1) != cur_state,
+                         "PLL[%d] not attached to this transcoder %d: %08x\n",
+                         cur_state, crtc->pipe, pch_dpll)) {
+                       cur_state = !!(val >> (4*crtc->pipe + 3));
+                       WARN(cur_state != state,
+                            "PLL[%d] not %s on this transcoder %d: %08x\n",
+                            pll->pll_reg == _PCH_DPLL_B,
+                            state_string(state),
+                            crtc->pipe,
+                            val);
+               }
        }
-       reg = intel_crtc->pch_pll->pll_reg;
-       val = I915_READ(reg);
-       cur_state = !!(val & DPLL_VCO_ENABLE);
-       WARN(cur_state != state,
-            "PCH PLL state assertion failure (expected %s, current %s)\n",
-            state_string(state), state_string(cur_state));
  }
- #define assert_pch_pll_enabled(d, p) assert_pch_pll(d, p, true)
- #define assert_pch_pll_disabled(d, p) assert_pch_pll(d, p, false)
+ #define assert_pch_pll_enabled(d, p, c) assert_pch_pll(d, p, c, true)
+ #define assert_pch_pll_disabled(d, p, c) assert_pch_pll(d, p, c, false)
  
  static void assert_fdi_tx(struct drm_i915_private *dev_priv,
                          enum pipe pipe, bool state)
@@@ -1424,7 -1432,7 +1432,7 @@@ static void intel_enable_pch_pll(struc
        assert_pch_refclk_enabled(dev_priv);
  
        if (pll->active++ && pll->on) {
-               assert_pch_pll_enabled(dev_priv, intel_crtc);
+               assert_pch_pll_enabled(dev_priv, pll, NULL);
                return;
        }
  
@@@ -1460,12 -1468,12 +1468,12 @@@ static void intel_disable_pch_pll(struc
                      intel_crtc->base.base.id);
  
        if (WARN_ON(pll->active == 0)) {
-               assert_pch_pll_disabled(dev_priv, intel_crtc);
+               assert_pch_pll_disabled(dev_priv, pll, NULL);
                return;
        }
  
        if (--pll->active) {
-               assert_pch_pll_enabled(dev_priv, intel_crtc);
+               assert_pch_pll_enabled(dev_priv, pll, NULL);
                return;
        }
  
@@@ -1495,7 -1503,9 +1503,9 @@@ static void intel_enable_transcoder(str
        BUG_ON(dev_priv->info->gen < 5);
  
        /* Make sure PCH DPLL is enabled */
-       assert_pch_pll_enabled(dev_priv, to_intel_crtc(crtc));
+       assert_pch_pll_enabled(dev_priv,
+                              to_intel_crtc(crtc)->pch_pll,
+                              to_intel_crtc(crtc));
  
        /* FDI must be feeding us bits for PCH ports */
        assert_fdi_tx_enabled(dev_priv, pipe);
@@@ -4395,10 -4405,25 +4405,10 @@@ static int ironlake_crtc_mode_set(struc
                                                    &clock,
                                                    &reduced_clock);
        }
 -      /* SDVO TV has fixed PLL values depend on its clock range,
 -         this mirrors vbios setting. */
 -      if (is_sdvo && is_tv) {
 -              if (adjusted_mode->clock >= 100000
 -                  && adjusted_mode->clock < 140500) {
 -                      clock.p1 = 2;
 -                      clock.p2 = 10;
 -                      clock.n = 3;
 -                      clock.m1 = 16;
 -                      clock.m2 = 8;
 -              } else if (adjusted_mode->clock >= 140500
 -                         && adjusted_mode->clock <= 200000) {
 -                      clock.p1 = 1;
 -                      clock.p2 = 10;
 -                      clock.n = 6;
 -                      clock.m1 = 12;
 -                      clock.m2 = 8;
 -              }
 -      }
 +
 +      if (is_sdvo && is_tv)
 +              i9xx_adjust_sdvo_tv_clock(adjusted_mode, &clock);
 +
  
        /* FDI link */
        pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
                if (is_lvds && has_reduced_clock && i915_powersave) {
                        I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp2);
                        intel_crtc->lowfreq_avail = true;
 -                      if (HAS_PIPE_CXSR(dev)) {
 -                              DRM_DEBUG_KMS("enabling CxSR downclocking\n");
 -                              pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
 -                      }
                } else {
                        I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp);
 -                      if (HAS_PIPE_CXSR(dev)) {
 -                              DRM_DEBUG_KMS("disabling CxSR downclocking\n");
 -                              pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
 -                      }
                }
        }
  
@@@ -6918,7 -6951,7 +6928,7 @@@ void intel_modeset_init(struct drm_devi
        dev->mode_config.preferred_depth = 24;
        dev->mode_config.prefer_shadow = 1;
  
-       dev->mode_config.funcs = (void *)&intel_mode_funcs;
+       dev->mode_config.funcs = &intel_mode_funcs;
  
        intel_init_quirks(dev);
  
index 9b2effcc90e5c635ba971917808811f18904543a,296cfc201a81ea9a0017abfb0e97eee7eaf8dc18..c71e7890e6f6127fdd10554ea30618b0105ab3cb
@@@ -266,6 -266,9 +266,9 @@@ intel_dp_mode_valid(struct drm_connecto
        if (mode->clock < 10000)
                return MODE_CLOCK_LOW;
  
+       if (mode->flags & DRM_MODE_FLAG_DBLCLK)
+               return MODE_H_ILLEGAL;
        return MODE_OK;
  }
  
@@@ -702,6 -705,9 +705,9 @@@ intel_dp_mode_fixup(struct drm_encoder 
                mode->clock = intel_dp->panel_fixed_mode->clock;
        }
  
+       if (mode->flags & DRM_MODE_FLAG_DBLCLK)
+               return false;
        DRM_DEBUG_KMS("DP link computation with max lane count %i "
                      "max bw %02x pixel clock %iKHz\n",
                      max_lane_count, bws[max_clock], mode->clock);
@@@ -1154,11 -1160,10 +1160,10 @@@ static void ironlake_edp_panel_off(stru
  
        DRM_DEBUG_KMS("Turn eDP power off\n");
  
-       WARN(intel_dp->want_panel_vdd, "Cannot turn power off while VDD is on\n");
-       ironlake_panel_vdd_off_sync(intel_dp); /* finish any pending work */
+       WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n");
  
        pp = ironlake_get_pp_control(dev_priv);
-       pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE);
+       pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_BLC_ENABLE);
        I915_WRITE(PCH_PP_CONTROL, pp);
        POSTING_READ(PCH_PP_CONTROL);
  
@@@ -1266,18 -1271,16 +1271,16 @@@ static void intel_dp_prepare(struct drm
  {
        struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
  
+       /* Make sure the panel is off before trying to change the mode. But also
+        * ensure that we have vdd while we switch off the panel. */
+       ironlake_edp_panel_vdd_on(intel_dp);
        ironlake_edp_backlight_off(intel_dp);
        ironlake_edp_panel_off(intel_dp);
  
-       /* Wake up the sink first */
-       ironlake_edp_panel_vdd_on(intel_dp);
        intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
        intel_dp_link_down(intel_dp);
        ironlake_edp_panel_vdd_off(intel_dp, false);
-       /* Make sure the panel is off before trying to
-        * change the mode
-        */
  }
  
  static void intel_dp_commit(struct drm_encoder *encoder)
@@@ -1309,10 -1312,11 +1312,11 @@@ intel_dp_dpms(struct drm_encoder *encod
        uint32_t dp_reg = I915_READ(intel_dp->output_reg);
  
        if (mode != DRM_MODE_DPMS_ON) {
+               /* Switching the panel off requires vdd. */
+               ironlake_edp_panel_vdd_on(intel_dp);
                ironlake_edp_backlight_off(intel_dp);
                ironlake_edp_panel_off(intel_dp);
  
-               ironlake_edp_panel_vdd_on(intel_dp);
                intel_dp_sink_dpms(intel_dp, mode);
                intel_dp_link_down(intel_dp);
                ironlake_edp_panel_vdd_off(intel_dp, false);
@@@ -1961,6 -1965,23 +1965,23 @@@ intel_dp_get_dpcd(struct intel_dp *inte
        return false;
  }
  
+ static void
+ intel_dp_probe_oui(struct intel_dp *intel_dp)
+ {
+       u8 buf[3];
+       if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
+               return;
+       if (intel_dp_aux_native_read_retry(intel_dp, DP_SINK_OUI, buf, 3))
+               DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
+                             buf[0], buf[1], buf[2]);
+       if (intel_dp_aux_native_read_retry(intel_dp, DP_BRANCH_OUI, buf, 3))
+               DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
+                             buf[0], buf[1], buf[2]);
+ }
  static bool
  intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
  {
@@@ -2066,23 -2087,25 +2087,23 @@@ g4x_dp_detect(struct intel_dp *intel_dp
  {
        struct drm_device *dev = intel_dp->base.base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
 -      uint32_t temp, bit;
 +      uint32_t bit;
  
        switch (intel_dp->output_reg) {
        case DP_B:
 -              bit = DPB_HOTPLUG_INT_STATUS;
 +              bit = DPB_HOTPLUG_LIVE_STATUS;
                break;
        case DP_C:
 -              bit = DPC_HOTPLUG_INT_STATUS;
 +              bit = DPC_HOTPLUG_LIVE_STATUS;
                break;
        case DP_D:
 -              bit = DPD_HOTPLUG_INT_STATUS;
 +              bit = DPD_HOTPLUG_LIVE_STATUS;
                break;
        default:
                return connector_status_unknown;
        }
  
 -      temp = I915_READ(PORT_HOTPLUG_STAT);
 -
 -      if ((temp & bit) == 0)
 +      if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
                return connector_status_disconnected;
  
        return intel_dp_detect_dpcd(intel_dp);
@@@ -2142,6 -2165,8 +2163,8 @@@ intel_dp_detect(struct drm_connector *c
        if (status != connector_status_connected)
                return status;
  
+       intel_dp_probe_oui(intel_dp);
        if (intel_dp->force_audio != HDMI_AUDIO_AUTO) {
                intel_dp->has_audio = (intel_dp->force_audio == HDMI_AUDIO_ON);
        } else {
@@@ -2462,19 -2487,19 +2485,19 @@@ intel_dp_init(struct drm_device *dev, i
                case DP_B:
                case PCH_DP_B:
                        dev_priv->hotplug_supported_mask |=
 -                              HDMIB_HOTPLUG_INT_STATUS;
 +                              DPB_HOTPLUG_INT_STATUS;
                        name = "DPDDC-B";
                        break;
                case DP_C:
                case PCH_DP_C:
                        dev_priv->hotplug_supported_mask |=
 -                              HDMIC_HOTPLUG_INT_STATUS;
 +                              DPC_HOTPLUG_INT_STATUS;
                        name = "DPDDC-C";
                        break;
                case DP_D:
                case PCH_DP_D:
                        dev_priv->hotplug_supported_mask |=
 -                              HDMID_HOTPLUG_INT_STATUS;
 +                              DPD_HOTPLUG_INT_STATUS;
                        name = "DPDDC-D";
                        break;
        }
index ca3c6e128594030c6c85156065b6bcb320a666f3,b6a9d45fc3c69d4b5be7e8c6f93490636b6049c2..2f5106a488c511be29f63d482ce0828ae05187bb
@@@ -140,6 -140,9 +140,6 @@@ struct intel_sdvo 
  
        /* DDC bus used by this SDVO encoder */
        uint8_t ddc_bus;
 -
 -      /* Input timings for adjusted_mode */
 -      struct intel_sdvo_dtd input_dtd;
  };
  
  struct intel_sdvo_connector {
@@@ -780,10 -783,12 +780,12 @@@ static void intel_sdvo_get_dtd_from_mod
                ((v_sync_len & 0x30) >> 4);
  
        dtd->part2.dtd_flags = 0x18;
+       if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+               dtd->part2.dtd_flags |= DTD_FLAG_INTERLACE;
        if (mode->flags & DRM_MODE_FLAG_PHSYNC)
-               dtd->part2.dtd_flags |= 0x2;
+               dtd->part2.dtd_flags |= DTD_FLAG_HSYNC_POSITIVE;
        if (mode->flags & DRM_MODE_FLAG_PVSYNC)
-               dtd->part2.dtd_flags |= 0x4;
+               dtd->part2.dtd_flags |= DTD_FLAG_VSYNC_POSITIVE;
  
        dtd->part2.sdvo_flags = 0;
        dtd->part2.v_sync_off_high = v_sync_offset & 0xc0;
@@@ -817,9 -822,11 +819,11 @@@ static void intel_sdvo_get_mode_from_dt
        mode->clock = dtd->part1.clock * 10;
  
        mode->flags &= ~(DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC);
-       if (dtd->part2.dtd_flags & 0x2)
+       if (dtd->part2.dtd_flags & DTD_FLAG_INTERLACE)
+               mode->flags |= DRM_MODE_FLAG_INTERLACE;
+       if (dtd->part2.dtd_flags & DTD_FLAG_HSYNC_POSITIVE)
                mode->flags |= DRM_MODE_FLAG_PHSYNC;
-       if (dtd->part2.dtd_flags & 0x4)
+       if (dtd->part2.dtd_flags & DTD_FLAG_VSYNC_POSITIVE)
                mode->flags |= DRM_MODE_FLAG_PVSYNC;
  }
  
@@@ -946,15 -953,11 +950,15 @@@ intel_sdvo_set_output_timings_from_mode
        return true;
  }
  
 +/* Asks the sdvo controller for the preferred input mode given the output mode.
 + * Unfortunately we have to set up the full output mode to do that. */
  static bool
 -intel_sdvo_set_input_timings_for_mode(struct intel_sdvo *intel_sdvo,
 -                                      struct drm_display_mode *mode,
 -                                      struct drm_display_mode *adjusted_mode)
 +intel_sdvo_get_preferred_input_mode(struct intel_sdvo *intel_sdvo,
 +                                  struct drm_display_mode *mode,
 +                                  struct drm_display_mode *adjusted_mode)
  {
 +      struct intel_sdvo_dtd input_dtd;
 +
        /* Reset the input timing to the screen. Assume always input 0. */
        if (!intel_sdvo_set_target_input(intel_sdvo))
                return false;
                return false;
  
        if (!intel_sdvo_get_preferred_input_timing(intel_sdvo,
 -                                                 &intel_sdvo->input_dtd))
 +                                                 &input_dtd))
                return false;
  
 -      intel_sdvo_get_mode_from_dtd(adjusted_mode, &intel_sdvo->input_dtd);
 +      intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd);
  
        return true;
  }
@@@ -990,17 -993,17 +994,17 @@@ static bool intel_sdvo_mode_fixup(struc
                if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo, mode))
                        return false;
  
 -              (void) intel_sdvo_set_input_timings_for_mode(intel_sdvo,
 -                                                           mode,
 -                                                           adjusted_mode);
 +              (void) intel_sdvo_get_preferred_input_mode(intel_sdvo,
 +                                                         mode,
 +                                                         adjusted_mode);
        } else if (intel_sdvo->is_lvds) {
                if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo,
                                                             intel_sdvo->sdvo_lvds_fixed_mode))
                        return false;
  
 -              (void) intel_sdvo_set_input_timings_for_mode(intel_sdvo,
 -                                                           mode,
 -                                                           adjusted_mode);
 +              (void) intel_sdvo_get_preferred_input_mode(intel_sdvo,
 +                                                         mode,
 +                                                         adjusted_mode);
        }
  
        /* Make the CRTC code factor in the SDVO pixel multiplier.  The
@@@ -1054,9 -1057,7 +1058,9 @@@ static void intel_sdvo_mode_set(struct 
                                             intel_sdvo->sdvo_lvds_fixed_mode);
        else
                intel_sdvo_get_dtd_from_mode(&output_dtd, mode);
 -      (void) intel_sdvo_set_output_timing(intel_sdvo, &output_dtd);
 +      if (!intel_sdvo_set_output_timing(intel_sdvo, &output_dtd))
 +              DRM_INFO("Setting output timings on %s failed\n",
 +                       SDVO_NAME(intel_sdvo));
  
        /* Set the input timing to the screen. Assume always input 0. */
        if (!intel_sdvo_set_target_input(intel_sdvo))
         * adjusted_mode.
         */
        intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
 -      (void) intel_sdvo_set_input_timing(intel_sdvo, &input_dtd);
 +      if (!intel_sdvo_set_input_timing(intel_sdvo, &input_dtd))
 +              DRM_INFO("Setting input timings on %s failed\n",
 +                       SDVO_NAME(intel_sdvo));
  
        switch (pixel_multiplier) {
        default:
@@@ -1246,8 -1245,14 +1250,14 @@@ static bool intel_sdvo_get_capabilities
  
  static int intel_sdvo_supports_hotplug(struct intel_sdvo *intel_sdvo)
  {
+       struct drm_device *dev = intel_sdvo->base.base.dev;
        u8 response[2];
  
+       /* HW Erratum: SDVO Hotplug is broken on all i945G chips, there's noise
+        * on the line. */
+       if (IS_I945G(dev) || IS_I945GM(dev))
+               return false;
        return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT,
                                    &response, 2) && response[0];
  }
@@@ -1371,7 -1376,7 +1381,7 @@@ intel_sdvo_detect(struct drm_connector 
  
        /* add 30ms delay when the output type might be TV */
        if (intel_sdvo->caps.output_flags & SDVO_TV_MASK)
 -              mdelay(30);
 +              msleep(30);
  
        if (!intel_sdvo_read_response(intel_sdvo, &response, 2))
                return connector_status_unknown;
@@@ -2516,7 -2521,6 +2526,7 @@@ bool intel_sdvo_init(struct drm_device 
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_encoder *intel_encoder;
        struct intel_sdvo *intel_sdvo;
 +      u32 hotplug_mask;
        int i;
  
        intel_sdvo = kzalloc(sizeof(struct intel_sdvo), GFP_KERNEL);
                }
        }
  
 -      if (intel_sdvo->is_sdvob)
 -              dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS;
 -      else
 -              dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS;
 +      hotplug_mask = 0;
 +      if (IS_G4X(dev)) {
 +              hotplug_mask = intel_sdvo->is_sdvob ?
 +                      SDVOB_HOTPLUG_INT_STATUS_G4X : SDVOC_HOTPLUG_INT_STATUS_G4X;
 +      } else if (IS_GEN4(dev)) {
 +              hotplug_mask = intel_sdvo->is_sdvob ?
 +                      SDVOB_HOTPLUG_INT_STATUS_I965 : SDVOC_HOTPLUG_INT_STATUS_I965;
 +      } else {
 +              hotplug_mask = intel_sdvo->is_sdvob ?
 +                      SDVOB_HOTPLUG_INT_STATUS_I915 : SDVOC_HOTPLUG_INT_STATUS_I915;
 +      }
 +      dev_priv->hotplug_supported_mask |= hotplug_mask;
  
        drm_encoder_helper_add(&intel_encoder->base, &intel_sdvo_helper_funcs);