]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/commitdiff
Merge tag 'v3.1-rc10' into drm-core-next
authorDave Airlie <airlied@redhat.com>
Tue, 18 Oct 2011 09:54:30 +0000 (10:54 +0100)
committerDave Airlie <airlied@redhat.com>
Tue, 18 Oct 2011 09:54:30 +0000 (10:54 +0100)
There are a number of fixes in mainline required for code in -next,
also there was a few conflicts I'd rather resolve myself.

Signed-off-by: Dave Airlie <airlied@redhat.com>
Conflicts:
drivers/gpu/drm/radeon/evergreen.c
drivers/gpu/drm/radeon/r600.c
drivers/gpu/drm/radeon/radeon_asic.h

13 files changed:
1  2 
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_sdvo.c
drivers/gpu/drm/radeon/evergreen.c
drivers/gpu/drm/radeon/ni.c
drivers/gpu/drm/radeon/r100.c
drivers/gpu/drm/radeon/r600.c
drivers/gpu/drm/radeon/radeon.h
drivers/gpu/drm/radeon/radeon_asic.h
drivers/gpu/drm/radeon/radeon_connectors.c
drivers/gpu/drm/radeon/rv770.c
drivers/gpu/drm/ttm/ttm_bo.c

index b79c6f14fb720ee7dd1ecdf228c2b051ae3772f7,f07e4252b70834c103b19bc193ce6124dde3d67f..c96b019a3b76818c510f0d1890aeb1c943ce0c8d
@@@ -67,11 -67,11 +67,11 @@@ module_param_named(i915_enable_rc6, i91
  MODULE_PARM_DESC(i915_enable_rc6,
                "Enable power-saving render C-state 6 (default: true)");
  
- unsigned int i915_enable_fbc __read_mostly = 1;
+ unsigned int i915_enable_fbc __read_mostly = -1;
  module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600);
  MODULE_PARM_DESC(i915_enable_fbc,
                "Enable frame buffer compression for power savings "
-               "(default: false)");
+               "(default: -1 (use per-chip default))");
  
  unsigned int i915_lvds_downclock __read_mostly = 0;
  module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400);
@@@ -294,7 -294,7 +294,7 @@@ MODULE_DEVICE_TABLE(pci, pciidlist)
  #define INTEL_PCH_CPT_DEVICE_ID_TYPE  0x1c00
  #define INTEL_PCH_PPT_DEVICE_ID_TYPE  0x1e00
  
 -void intel_detect_pch (struct drm_device *dev)
 +void intel_detect_pch(struct drm_device *dev)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct pci_dev *pch;
@@@ -377,7 -377,7 +377,7 @@@ void gen6_gt_force_wake_put(struct drm_
  
  void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
  {
 -      if (dev_priv->gt_fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES ) {
 +      if (dev_priv->gt_fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
                int loop = 500;
                u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
                while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
@@@ -770,12 -770,12 +770,12 @@@ static int i915_pm_poweroff(struct devi
  }
  
  static const struct dev_pm_ops i915_pm_ops = {
 -     .suspend = i915_pm_suspend,
 -     .resume = i915_pm_resume,
 -     .freeze = i915_pm_freeze,
 -     .thaw = i915_pm_thaw,
 -     .poweroff = i915_pm_poweroff,
 -     .restore = i915_pm_resume,
 +      .suspend = i915_pm_suspend,
 +      .resume = i915_pm_resume,
 +      .freeze = i915_pm_freeze,
 +      .thaw = i915_pm_thaw,
 +      .poweroff = i915_pm_poweroff,
 +      .restore = i915_pm_resume,
  };
  
  static struct vm_operations_struct i915_gem_vm_ops = {
index f0e5f9f32aa8ebdc0e962f6d3f915a772a2cf0b6,04411ad2e779c2f2b2d6c8d3a83a214c097bc0d9..8230cf54cc8d7fb8d9ef14a803cfd0104bd3030a
@@@ -31,7 -31,6 +31,7 @@@
  #include <linux/kernel.h>
  #include <linux/slab.h>
  #include <linux/vgaarb.h>
 +#include <drm/drm_edid.h>
  #include "drmP.h"
  #include "intel_drv.h"
  #include "i915_drm.h"
  
  #define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
  
 -bool intel_pipe_has_type (struct drm_crtc *crtc, int type);
 +bool intel_pipe_has_type(struct drm_crtc *crtc, int type);
  static void intel_update_watermarks(struct drm_device *dev);
  static void intel_increase_pllclock(struct drm_crtc *crtc);
  static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
  
  typedef struct {
 -    /* given values */
 -    int n;
 -    int m1, m2;
 -    int p1, p2;
 -    /* derived values */
 -    int       dot;
 -    int       vco;
 -    int       m;
 -    int       p;
 +      /* given values */
 +      int n;
 +      int m1, m2;
 +      int p1, p2;
 +      /* derived values */
 +      int     dot;
 +      int     vco;
 +      int     m;
 +      int     p;
  } intel_clock_t;
  
  typedef struct {
 -    int       min, max;
 +      int     min, max;
  } intel_range_t;
  
  typedef struct {
 -    int       dot_limit;
 -    int       p2_slow, p2_fast;
 +      int     dot_limit;
 +      int     p2_slow, p2_fast;
  } intel_p2_t;
  
  #define INTEL_P2_NUM                2
  typedef struct intel_limit intel_limit_t;
  struct intel_limit {
 -    intel_range_t   dot, vco, n, m, m1, m2, p, p1;
 -    intel_p2_t            p2;
 -    bool (* find_pll)(const intel_limit_t *, struct drm_crtc *,
 -                    int, int, intel_clock_t *);
 +      intel_range_t   dot, vco, n, m, m1, m2, p, p1;
 +      intel_p2_t          p2;
 +      bool (* find_pll)(const intel_limit_t *, struct drm_crtc *,
 +                      int, int, intel_clock_t *);
  };
  
  /* FDI */
@@@ -106,56 -105,56 +106,56 @@@ intel_fdi_link_freq(struct drm_device *
  }
  
  static const intel_limit_t intel_limits_i8xx_dvo = {
 -        .dot = { .min = 25000, .max = 350000 },
 -        .vco = { .min = 930000, .max = 1400000 },
 -        .n = { .min = 3, .max = 16 },
 -        .m = { .min = 96, .max = 140 },
 -        .m1 = { .min = 18, .max = 26 },
 -        .m2 = { .min = 6, .max = 16 },
 -        .p = { .min = 4, .max = 128 },
 -        .p1 = { .min = 2, .max = 33 },
 +      .dot = { .min = 25000, .max = 350000 },
 +      .vco = { .min = 930000, .max = 1400000 },
 +      .n = { .min = 3, .max = 16 },
 +      .m = { .min = 96, .max = 140 },
 +      .m1 = { .min = 18, .max = 26 },
 +      .m2 = { .min = 6, .max = 16 },
 +      .p = { .min = 4, .max = 128 },
 +      .p1 = { .min = 2, .max = 33 },
        .p2 = { .dot_limit = 165000,
                .p2_slow = 4, .p2_fast = 2 },
        .find_pll = intel_find_best_PLL,
  };
  
  static const intel_limit_t intel_limits_i8xx_lvds = {
 -        .dot = { .min = 25000, .max = 350000 },
 -        .vco = { .min = 930000, .max = 1400000 },
 -        .n = { .min = 3, .max = 16 },
 -        .m = { .min = 96, .max = 140 },
 -        .m1 = { .min = 18, .max = 26 },
 -        .m2 = { .min = 6, .max = 16 },
 -        .p = { .min = 4, .max = 128 },
 -        .p1 = { .min = 1, .max = 6 },
 +      .dot = { .min = 25000, .max = 350000 },
 +      .vco = { .min = 930000, .max = 1400000 },
 +      .n = { .min = 3, .max = 16 },
 +      .m = { .min = 96, .max = 140 },
 +      .m1 = { .min = 18, .max = 26 },
 +      .m2 = { .min = 6, .max = 16 },
 +      .p = { .min = 4, .max = 128 },
 +      .p1 = { .min = 1, .max = 6 },
        .p2 = { .dot_limit = 165000,
                .p2_slow = 14, .p2_fast = 7 },
        .find_pll = intel_find_best_PLL,
  };
  
  static const intel_limit_t intel_limits_i9xx_sdvo = {
 -        .dot = { .min = 20000, .max = 400000 },
 -        .vco = { .min = 1400000, .max = 2800000 },
 -        .n = { .min = 1, .max = 6 },
 -        .m = { .min = 70, .max = 120 },
 -        .m1 = { .min = 10, .max = 22 },
 -        .m2 = { .min = 5, .max = 9 },
 -        .p = { .min = 5, .max = 80 },
 -        .p1 = { .min = 1, .max = 8 },
 +      .dot = { .min = 20000, .max = 400000 },
 +      .vco = { .min = 1400000, .max = 2800000 },
 +      .n = { .min = 1, .max = 6 },
 +      .m = { .min = 70, .max = 120 },
 +      .m1 = { .min = 10, .max = 22 },
 +      .m2 = { .min = 5, .max = 9 },
 +      .p = { .min = 5, .max = 80 },
 +      .p1 = { .min = 1, .max = 8 },
        .p2 = { .dot_limit = 200000,
                .p2_slow = 10, .p2_fast = 5 },
        .find_pll = intel_find_best_PLL,
  };
  
  static const intel_limit_t intel_limits_i9xx_lvds = {
 -        .dot = { .min = 20000, .max = 400000 },
 -        .vco = { .min = 1400000, .max = 2800000 },
 -        .n = { .min = 1, .max = 6 },
 -        .m = { .min = 70, .max = 120 },
 -        .m1 = { .min = 10, .max = 22 },
 -        .m2 = { .min = 5, .max = 9 },
 -        .p = { .min = 7, .max = 98 },
 -        .p1 = { .min = 1, .max = 8 },
 +      .dot = { .min = 20000, .max = 400000 },
 +      .vco = { .min = 1400000, .max = 2800000 },
 +      .n = { .min = 1, .max = 6 },
 +      .m = { .min = 70, .max = 120 },
 +      .m1 = { .min = 10, .max = 22 },
 +      .m2 = { .min = 5, .max = 9 },
 +      .p = { .min = 7, .max = 98 },
 +      .p1 = { .min = 1, .max = 8 },
        .p2 = { .dot_limit = 112000,
                .p2_slow = 14, .p2_fast = 7 },
        .find_pll = intel_find_best_PLL,
@@@ -223,44 -222,44 +223,44 @@@ static const intel_limit_t intel_limits
  };
  
  static const intel_limit_t intel_limits_g4x_display_port = {
 -        .dot = { .min = 161670, .max = 227000 },
 -        .vco = { .min = 1750000, .max = 3500000},
 -        .n = { .min = 1, .max = 2 },
 -        .m = { .min = 97, .max = 108 },
 -        .m1 = { .min = 0x10, .max = 0x12 },
 -        .m2 = { .min = 0x05, .max = 0x06 },
 -        .p = { .min = 10, .max = 20 },
 -        .p1 = { .min = 1, .max = 2},
 -        .p2 = { .dot_limit = 0,
 +      .dot = { .min = 161670, .max = 227000 },
 +      .vco = { .min = 1750000, .max = 3500000},
 +      .n = { .min = 1, .max = 2 },
 +      .m = { .min = 97, .max = 108 },
 +      .m1 = { .min = 0x10, .max = 0x12 },
 +      .m2 = { .min = 0x05, .max = 0x06 },
 +      .p = { .min = 10, .max = 20 },
 +      .p1 = { .min = 1, .max = 2},
 +      .p2 = { .dot_limit = 0,
                .p2_slow = 10, .p2_fast = 10 },
 -        .find_pll = intel_find_pll_g4x_dp,
 +      .find_pll = intel_find_pll_g4x_dp,
  };
  
  static const intel_limit_t intel_limits_pineview_sdvo = {
 -        .dot = { .min = 20000, .max = 400000},
 -        .vco = { .min = 1700000, .max = 3500000 },
 +      .dot = { .min = 20000, .max = 400000},
 +      .vco = { .min = 1700000, .max = 3500000 },
        /* Pineview's Ncounter is a ring counter */
 -        .n = { .min = 3, .max = 6 },
 -        .m = { .min = 2, .max = 256 },
 +      .n = { .min = 3, .max = 6 },
 +      .m = { .min = 2, .max = 256 },
        /* Pineview only has one combined m divider, which we treat as m2. */
 -        .m1 = { .min = 0, .max = 0 },
 -        .m2 = { .min = 0, .max = 254 },
 -        .p = { .min = 5, .max = 80 },
 -        .p1 = { .min = 1, .max = 8 },
 +      .m1 = { .min = 0, .max = 0 },
 +      .m2 = { .min = 0, .max = 254 },
 +      .p = { .min = 5, .max = 80 },
 +      .p1 = { .min = 1, .max = 8 },
        .p2 = { .dot_limit = 200000,
                .p2_slow = 10, .p2_fast = 5 },
        .find_pll = intel_find_best_PLL,
  };
  
  static const intel_limit_t intel_limits_pineview_lvds = {
 -        .dot = { .min = 20000, .max = 400000 },
 -        .vco = { .min = 1700000, .max = 3500000 },
 -        .n = { .min = 3, .max = 6 },
 -        .m = { .min = 2, .max = 256 },
 -        .m1 = { .min = 0, .max = 0 },
 -        .m2 = { .min = 0, .max = 254 },
 -        .p = { .min = 7, .max = 112 },
 -        .p1 = { .min = 1, .max = 8 },
 +      .dot = { .min = 20000, .max = 400000 },
 +      .vco = { .min = 1700000, .max = 3500000 },
 +      .n = { .min = 3, .max = 6 },
 +      .m = { .min = 2, .max = 256 },
 +      .m1 = { .min = 0, .max = 0 },
 +      .m2 = { .min = 0, .max = 254 },
 +      .p = { .min = 7, .max = 112 },
 +      .p1 = { .min = 1, .max = 8 },
        .p2 = { .dot_limit = 112000,
                .p2_slow = 14, .p2_fast = 14 },
        .find_pll = intel_find_best_PLL,
@@@ -322,7 -321,7 +322,7 @@@ static const intel_limit_t intel_limits
        .m1 = { .min = 12, .max = 22 },
        .m2 = { .min = 5, .max = 9 },
        .p = { .min = 28, .max = 112 },
 -      .p1 = { .min = 2,.max = 8 },
 +      .p1 = { .min = 2, .max = 8 },
        .p2 = { .dot_limit = 225000,
                .p2_slow = 14, .p2_fast = 14 },
        .find_pll = intel_g4x_find_best_PLL,
@@@ -336,24 -335,24 +336,24 @@@ static const intel_limit_t intel_limits
        .m1 = { .min = 12, .max = 22 },
        .m2 = { .min = 5, .max = 9 },
        .p = { .min = 14, .max = 42 },
 -      .p1 = { .min = 2,.max = 6 },
 +      .p1 = { .min = 2, .max = 6 },
        .p2 = { .dot_limit = 225000,
                .p2_slow = 7, .p2_fast = 7 },
        .find_pll = intel_g4x_find_best_PLL,
  };
  
  static const intel_limit_t intel_limits_ironlake_display_port = {
 -        .dot = { .min = 25000, .max = 350000 },
 -        .vco = { .min = 1760000, .max = 3510000},
 -        .n = { .min = 1, .max = 2 },
 -        .m = { .min = 81, .max = 90 },
 -        .m1 = { .min = 12, .max = 22 },
 -        .m2 = { .min = 5, .max = 9 },
 -        .p = { .min = 10, .max = 20 },
 -        .p1 = { .min = 1, .max = 2},
 -        .p2 = { .dot_limit = 0,
 +      .dot = { .min = 25000, .max = 350000 },
 +      .vco = { .min = 1760000, .max = 3510000},
 +      .n = { .min = 1, .max = 2 },
 +      .m = { .min = 81, .max = 90 },
 +      .m1 = { .min = 12, .max = 22 },
 +      .m2 = { .min = 5, .max = 9 },
 +      .p = { .min = 10, .max = 20 },
 +      .p1 = { .min = 1, .max = 2},
 +      .p2 = { .dot_limit = 0,
                .p2_slow = 10, .p2_fast = 10 },
 -        .find_pll = intel_find_pll_ironlake_dp,
 +      .find_pll = intel_find_pll_ironlake_dp,
  };
  
  static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
@@@ -405,7 -404,7 +405,7 @@@ static const intel_limit_t *intel_g4x_l
                limit = &intel_limits_g4x_hdmi;
        } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) {
                limit = &intel_limits_g4x_sdvo;
 -      } else if (intel_pipe_has_type (crtc, INTEL_OUTPUT_DISPLAYPORT)) {
 +      } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
                limit = &intel_limits_g4x_display_port;
        } else /* The option is for other outputs */
                limit = &intel_limits_i9xx_sdvo;
@@@ -489,26 -488,26 +489,26 @@@ static bool intel_PLL_is_valid(struct d
                               const intel_clock_t *clock)
  {
        if (clock->p1  < limit->p1.min  || limit->p1.max  < clock->p1)
 -              INTELPllInvalid ("p1 out of range\n");
 +              INTELPllInvalid("p1 out of range\n");
        if (clock->p   < limit->p.min   || limit->p.max   < clock->p)
 -              INTELPllInvalid ("p out of range\n");
 +              INTELPllInvalid("p out of range\n");
        if (clock->m2  < limit->m2.min  || limit->m2.max  < clock->m2)
 -              INTELPllInvalid ("m2 out of range\n");
 +              INTELPllInvalid("m2 out of range\n");
        if (clock->m1  < limit->m1.min  || limit->m1.max  < clock->m1)
 -              INTELPllInvalid ("m1 out of range\n");
 +              INTELPllInvalid("m1 out of range\n");
        if (clock->m1 <= clock->m2 && !IS_PINEVIEW(dev))
 -              INTELPllInvalid ("m1 <= m2\n");
 +              INTELPllInvalid("m1 <= m2\n");
        if (clock->m   < limit->m.min   || limit->m.max   < clock->m)
 -              INTELPllInvalid ("m out of range\n");
 +              INTELPllInvalid("m out of range\n");
        if (clock->n   < limit->n.min   || limit->n.max   < clock->n)
 -              INTELPllInvalid ("n out of range\n");
 +              INTELPllInvalid("n out of range\n");
        if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
 -              INTELPllInvalid ("vco out of range\n");
 +              INTELPllInvalid("vco out of range\n");
        /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
         * connector, etc., rather than just a single range.
         */
        if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
 -              INTELPllInvalid ("dot out of range\n");
 +              INTELPllInvalid("dot out of range\n");
  
        return true;
  }
@@@ -543,7 -542,7 +543,7 @@@ intel_find_best_PLL(const intel_limit_
                        clock.p2 = limit->p2.p2_fast;
        }
  
 -      memset (best_clock, 0, sizeof (*best_clock));
 +      memset(best_clock, 0, sizeof(*best_clock));
  
        for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
             clock.m1++) {
@@@ -1800,6 -1799,7 +1800,7 @@@ static void intel_update_fbc(struct drm
        struct drm_framebuffer *fb;
        struct intel_framebuffer *intel_fb;
        struct drm_i915_gem_object *obj;
+       int enable_fbc;
  
        DRM_DEBUG_KMS("\n");
  
        intel_fb = to_intel_framebuffer(fb);
        obj = intel_fb->obj;
  
-       if (!i915_enable_fbc) {
-               DRM_DEBUG_KMS("fbc disabled per module param (default off)\n");
+       enable_fbc = i915_enable_fbc;
+       if (enable_fbc < 0) {
+               DRM_DEBUG_KMS("fbc set to per-chip default\n");
+               enable_fbc = 1;
+               if (INTEL_INFO(dev)->gen <= 5)
+                       enable_fbc = 0;
+       }
+       if (!enable_fbc) {
+               DRM_DEBUG_KMS("fbc disabled per module param\n");
                dev_priv->no_fbc_reason = FBC_MODULE_PARAM;
                goto out_disable;
        }
@@@ -2433,7 -2440,7 +2441,7 @@@ static void ironlake_fdi_link_train(str
  
  }
  
 -static const int snb_b_fdi_train_param [] = {
 +static const int snb_b_fdi_train_param[] = {
        FDI_LINK_TRAIN_400MV_0DB_SNB_B,
        FDI_LINK_TRAIN_400MV_6DB_SNB_B,
        FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
@@@ -2489,7 -2496,7 +2497,7 @@@ static void gen6_fdi_link_train(struct 
        if (HAS_PCH_CPT(dev))
                cpt_phase_pointer_enable(dev, pipe);
  
 -      for (i = 0; i < 4; i++ ) {
 +      for (i = 0; i < 4; i++) {
                reg = FDI_TX_CTL(pipe);
                temp = I915_READ(reg);
                temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
        POSTING_READ(reg);
        udelay(150);
  
 -      for (i = 0; i < 4; i++ ) {
 +      for (i = 0; i < 4; i++) {
                reg = FDI_TX_CTL(pipe);
                temp = I915_READ(reg);
                temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
@@@ -2608,7 -2615,7 +2616,7 @@@ static void ivb_manual_fdi_link_train(s
        if (HAS_PCH_CPT(dev))
                cpt_phase_pointer_enable(dev, pipe);
  
 -      for (i = 0; i < 4; i++ ) {
 +      for (i = 0; i < 4; i++) {
                reg = FDI_TX_CTL(pipe);
                temp = I915_READ(reg);
                temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
        POSTING_READ(reg);
        udelay(150);
  
 -      for (i = 0; i < 4; i++ ) {
 +      for (i = 0; i < 4; i++) {
                reg = FDI_TX_CTL(pipe);
                temp = I915_READ(reg);
                temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
@@@ -3286,14 -3293,14 +3294,14 @@@ static void ironlake_crtc_commit(struc
        ironlake_crtc_enable(crtc);
  }
  
 -void intel_encoder_prepare (struct drm_encoder *encoder)
 +void intel_encoder_prepare(struct drm_encoder *encoder)
  {
        struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
        /* lvds has its own version of prepare see intel_lvds_prepare */
        encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
  }
  
 -void intel_encoder_commit (struct drm_encoder *encoder)
 +void intel_encoder_commit(struct drm_encoder *encoder)
  {
        struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
        /* lvds has its own version of commit see intel_lvds_commit */
@@@ -5670,131 -5677,6 +5678,131 @@@ static int intel_crtc_mode_set(struct d
        return ret;
  }
  
 +static void g4x_write_eld(struct drm_connector *connector,
 +                        struct drm_crtc *crtc)
 +{
 +      struct drm_i915_private *dev_priv = connector->dev->dev_private;
 +      uint8_t *eld = connector->eld;
 +      uint32_t eldv;
 +      uint32_t len;
 +      uint32_t i;
 +
 +      i = I915_READ(G4X_AUD_VID_DID);
 +
 +      if (i == INTEL_AUDIO_DEVBLC || i == INTEL_AUDIO_DEVCL)
 +              eldv = G4X_ELDV_DEVCL_DEVBLC;
 +      else
 +              eldv = G4X_ELDV_DEVCTG;
 +
 +      i = I915_READ(G4X_AUD_CNTL_ST);
 +      i &= ~(eldv | G4X_ELD_ADDR);
 +      len = (i >> 9) & 0x1f;          /* ELD buffer size */
 +      I915_WRITE(G4X_AUD_CNTL_ST, i);
 +
 +      if (!eld[0])
 +              return;
 +
 +      len = min_t(uint8_t, eld[2], len);
 +      DRM_DEBUG_DRIVER("ELD size %d\n", len);
 +      for (i = 0; i < len; i++)
 +              I915_WRITE(G4X_HDMIW_HDMIEDID, *((uint32_t *)eld + i));
 +
 +      i = I915_READ(G4X_AUD_CNTL_ST);
 +      i |= eldv;
 +      I915_WRITE(G4X_AUD_CNTL_ST, i);
 +}
 +
 +static void ironlake_write_eld(struct drm_connector *connector,
 +                                   struct drm_crtc *crtc)
 +{
 +      struct drm_i915_private *dev_priv = connector->dev->dev_private;
 +      uint8_t *eld = connector->eld;
 +      uint32_t eldv;
 +      uint32_t i;
 +      int len;
 +      int hdmiw_hdmiedid;
 +      int aud_cntl_st;
 +      int aud_cntrl_st2;
 +
 +      if (IS_IVYBRIDGE(connector->dev)) {
 +              hdmiw_hdmiedid = GEN7_HDMIW_HDMIEDID_A;
 +              aud_cntl_st = GEN7_AUD_CNTRL_ST_A;
 +              aud_cntrl_st2 = GEN7_AUD_CNTRL_ST2;
 +      } else {
 +              hdmiw_hdmiedid = GEN5_HDMIW_HDMIEDID_A;
 +              aud_cntl_st = GEN5_AUD_CNTL_ST_A;
 +              aud_cntrl_st2 = GEN5_AUD_CNTL_ST2;
 +      }
 +
 +      i = to_intel_crtc(crtc)->pipe;
 +      hdmiw_hdmiedid += i * 0x100;
 +      aud_cntl_st += i * 0x100;
 +
 +      DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(i));
 +
 +      i = I915_READ(aud_cntl_st);
 +      i = (i >> 29) & 0x3;            /* DIP_Port_Select, 0x1 = PortB */
 +      if (!i) {
 +              DRM_DEBUG_DRIVER("Audio directed to unknown port\n");
 +              /* operate blindly on all ports */
 +              eldv = GEN5_ELD_VALIDB;
 +              eldv |= GEN5_ELD_VALIDB << 4;
 +              eldv |= GEN5_ELD_VALIDB << 8;
 +      } else {
 +              DRM_DEBUG_DRIVER("ELD on port %c\n", 'A' + i);
 +              eldv = GEN5_ELD_VALIDB << ((i - 1) * 4);
 +      }
 +
 +      i = I915_READ(aud_cntrl_st2);
 +      i &= ~eldv;
 +      I915_WRITE(aud_cntrl_st2, i);
 +
 +      if (!eld[0])
 +              return;
 +
 +      if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
 +              DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
 +              eld[5] |= (1 << 2);     /* Conn_Type, 0x1 = DisplayPort */
 +      }
 +
 +      i = I915_READ(aud_cntl_st);
 +      i &= ~GEN5_ELD_ADDRESS;
 +      I915_WRITE(aud_cntl_st, i);
 +
 +      len = min_t(uint8_t, eld[2], 21);       /* 84 bytes of hw ELD buffer */
 +      DRM_DEBUG_DRIVER("ELD size %d\n", len);
 +      for (i = 0; i < len; i++)
 +              I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i));
 +
 +      i = I915_READ(aud_cntrl_st2);
 +      i |= eldv;
 +      I915_WRITE(aud_cntrl_st2, i);
 +}
 +
 +void intel_write_eld(struct drm_encoder *encoder,
 +                   struct drm_display_mode *mode)
 +{
 +      struct drm_crtc *crtc = encoder->crtc;
 +      struct drm_connector *connector;
 +      struct drm_device *dev = encoder->dev;
 +      struct drm_i915_private *dev_priv = dev->dev_private;
 +
 +      connector = drm_select_eld(encoder, mode);
 +      if (!connector)
 +              return;
 +
 +      DRM_DEBUG_DRIVER("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
 +                       connector->base.id,
 +                       drm_get_connector_name(connector),
 +                       connector->encoder->base.id,
 +                       drm_get_encoder_name(connector->encoder));
 +
 +      connector->eld[6] = drm_av_sync_delay(connector, mode) / 2;
 +
 +      if (dev_priv->display.write_eld)
 +              dev_priv->display.write_eld(connector, crtc);
 +}
 +
  /** Loads the palette/gamma unit for the CRTC with the prepared values */
  void intel_crtc_load_lut(struct drm_crtc *crtc)
  {
@@@ -8272,7 -8154,7 +8280,7 @@@ static void intel_init_display(struct d
        }
  
        /* Returns the core display clock speed */
 -      if (IS_I945G(dev) || (IS_G33(dev) && ! IS_PINEVIEW_M(dev)))
 +      if (IS_I945G(dev) || (IS_G33(dev) && !IS_PINEVIEW_M(dev)))
                dev_priv->display.get_display_clock_speed =
                        i945_get_display_clock_speed;
        else if (IS_I915G(dev))
                        }
                        dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
                        dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
 +                      dev_priv->display.write_eld = ironlake_write_eld;
                } else if (IS_GEN6(dev)) {
                        if (SNB_READ_WM0_LATENCY()) {
                                dev_priv->display.update_wm = sandybridge_update_wm;
                        }
                        dev_priv->display.fdi_link_train = gen6_fdi_link_train;
                        dev_priv->display.init_clock_gating = gen6_init_clock_gating;
 +                      dev_priv->display.write_eld = ironlake_write_eld;
                } else if (IS_IVYBRIDGE(dev)) {
                        /* FIXME: detect B0+ stepping and use auto training */
                        dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
                                dev_priv->display.update_wm = NULL;
                        }
                        dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
 -
 +                      dev_priv->display.write_eld = ironlake_write_eld;
                } else
                        dev_priv->display.update_wm = NULL;
        } else if (IS_PINEVIEW(dev)) {
                        DRM_INFO("failed to find known CxSR latency "
                                 "(found ddr%s fsb freq %d, mem freq %d), "
                                 "disabling CxSR\n",
 -                               (dev_priv->is_ddr3 == 1) ? "3": "2",
 +                               (dev_priv->is_ddr3 == 1) ? "3" : "2",
                                 dev_priv->fsb_freq, dev_priv->mem_freq);
                        /* Disable CxSR and never update its watermark again */
                        pineview_disable_cxsr(dev);
                        dev_priv->display.update_wm = pineview_update_wm;
                dev_priv->display.init_clock_gating = gen3_init_clock_gating;
        } else if (IS_G4X(dev)) {
 +              dev_priv->display.write_eld = g4x_write_eld;
                dev_priv->display.update_wm = g4x_update_wm;
                dev_priv->display.init_clock_gating = g4x_init_clock_gating;
        } else if (IS_GEN4(dev)) {
   * resume, or other times.  This quirk makes sure that's the case for
   * affected systems.
   */
 -static void quirk_pipea_force (struct drm_device *dev)
 +static void quirk_pipea_force(struct drm_device *dev)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
  
@@@ -8443,7 -8322,7 +8451,7 @@@ struct intel_quirk intel_quirks[] = 
        /* HP Compaq 2730p needs pipe A force quirk (LP: #291555) */
        { 0x2a42, 0x103c, 0x30eb, quirk_pipea_force },
        /* HP Mini needs pipe A force quirk (LP: #322104) */
 -      { 0x27ae,0x103c, 0x361a, quirk_pipea_force },
 +      { 0x27ae, 0x103c, 0x361a, quirk_pipea_force },
  
        /* Thinkpad R31 needs pipe A force quirk */
        { 0x3577, 0x1014, 0x0505, quirk_pipea_force },
@@@ -8694,7 -8573,7 +8702,7 @@@ struct intel_display_error_state 
  struct intel_display_error_state *
  intel_display_capture_error_state(struct drm_device *dev)
  {
 -        drm_i915_private_t *dev_priv = dev->dev_private;
 +      drm_i915_private_t *dev_priv = dev->dev_private;
        struct intel_display_error_state *error;
        int i;
  
                error->plane[i].control = I915_READ(DSPCNTR(i));
                error->plane[i].stride = I915_READ(DSPSTRIDE(i));
                error->plane[i].size = I915_READ(DSPSIZE(i));
 -              error->plane[i].pos= I915_READ(DSPPOS(i));
 +              error->plane[i].pos = I915_READ(DSPPOS(i));
                error->plane[i].addr = I915_READ(DSPADDR(i));
                if (INTEL_INFO(dev)->gen >= 4) {
                        error->plane[i].surface = I915_READ(DSPSURF(i));
index b7e718639b13f122fd22a6d661b78c523ba3e462,fe1099d8817e30127f6ef13274e7ff2be9ce32c7..98044d626a8ddbaebcf27d0aaa13ac4a313807c9
@@@ -34,7 -34,7 +34,7 @@@
  #define _wait_for(COND, MS, W) ({ \
        unsigned long timeout__ = jiffies + msecs_to_jiffies(MS);       \
        int ret__ = 0;                                                  \
 -      while (! (COND)) {                                              \
 +      while (!(COND)) {                                               \
                if (time_after(jiffies, timeout__)) {                   \
                        ret__ = -ETIMEDOUT;                             \
                        break;                                          \
  
  #define MSLEEP(x) do { \
        if (in_dbg_master()) \
 -              mdelay(x); \
 +              mdelay(x); \
        else \
                msleep(x); \
 -} while(0)
 +} while (0)
  
  #define KHz(x) (1000*x)
  #define MHz(x) KHz(1000*x)
@@@ -284,7 -284,7 +284,7 @@@ voi
  intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
                 struct drm_display_mode *adjusted_mode);
  extern bool intel_dpd_is_edp(struct drm_device *dev);
 -extern void intel_edp_link_config (struct intel_encoder *, int *, int *);
 +extern void intel_edp_link_config(struct intel_encoder *, int *, int *);
  extern bool intel_encoder_is_pch_edp(struct drm_encoder *encoder);
  
  /* intel_panel.c */
@@@ -304,8 -304,8 +304,8 @@@ extern void intel_panel_destroy_backlig
  extern enum drm_connector_status intel_panel_detect(struct drm_device *dev);
  
  extern void intel_crtc_load_lut(struct drm_crtc *crtc);
 -extern void intel_encoder_prepare (struct drm_encoder *encoder);
 -extern void intel_encoder_commit (struct drm_encoder *encoder);
 +extern void intel_encoder_prepare(struct drm_encoder *encoder);
 +extern void intel_encoder_commit(struct drm_encoder *encoder);
  extern void intel_encoder_destroy(struct drm_encoder *encoder);
  
  static inline struct intel_encoder *intel_attached_encoder(struct drm_connector *connector)
@@@ -337,9 -337,6 +337,6 @@@ extern void intel_release_load_detect_p
                                           struct drm_connector *connector,
                                           struct intel_load_detect_pipe *old);
  
- extern struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB);
- extern int intel_sdvo_supports_hotplug(struct drm_connector *connector);
- extern void intel_sdvo_set_hotplug(struct drm_connector *connector, int enable);
  extern void intelfb_restore(void);
  extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
                                    u16 blue, int regno);
@@@ -380,6 -377,4 +377,6 @@@ extern void intel_fb_output_poll_change
  extern void intel_fb_restore_mode(struct drm_device *dev);
  
  extern void intel_init_clock_gating(struct drm_device *dev);
 +extern void intel_write_eld(struct drm_encoder *encoder,
 +                          struct drm_display_mode *mode);
  #endif /* __INTEL_DRV_H__ */
index aa94110f0be42fe999ff540272baa843440c9fd9,6348c499616f08449d164626b6f818c43a87780d..73120024321929136de0e4107de156f12e98b54e
@@@ -43,7 -43,7 +43,7 @@@
  #define SDVO_TV_MASK   (SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_SVID0)
  
  #define SDVO_OUTPUT_MASK (SDVO_TMDS_MASK | SDVO_RGB_MASK | SDVO_LVDS_MASK |\
 -                         SDVO_TV_MASK)
 +                      SDVO_TV_MASK)
  
  #define IS_TV(c)      (c->output_flag & SDVO_TV_MASK)
  #define IS_TMDS(c)    (c->output_flag & SDVO_TMDS_MASK)
@@@ -92,6 -92,11 +92,11 @@@ struct intel_sdvo 
        */
        uint16_t attached_output;
  
+       /*
+        * Hotplug activation bits for this device
+        */
+       uint8_t hotplug_active[2];
        /**
         * This is used to select the color range of RBG outputs in HDMI mode.
         * It is only valid when using TMDS encoding and 8 bit per color mode.
@@@ -283,117 -288,117 +288,117 @@@ static const struct _sdvo_cmd_name 
        u8 cmd;
        const char *name;
  } sdvo_cmd_names[] = {
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_RESET),
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DEVICE_CAPS),
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FIRMWARE_REV),
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TRAINED_INPUTS),
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_OUTPUTS),
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_OUTPUTS),
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_IN_OUT_MAP),
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_IN_OUT_MAP),
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ATTACHED_DISPLAYS),
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HOT_PLUG_SUPPORT),
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_HOT_PLUG),
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_HOT_PLUG),
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE),
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_INPUT),
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_OUTPUT),
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART1),
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART2),
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1),
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART2),
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1),
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART1),
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART2),
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART1),
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART2),
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING),
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1),
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2),
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE),
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE),
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS),
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CLOCK_RATE_MULT),
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CLOCK_RATE_MULT),
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_TV_FORMATS),
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_FORMAT),
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_FORMAT),
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_POWER_STATES),
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_POWER_STATE),
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ENCODER_POWER_STATE),
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_DISPLAY_POWER_STATE),
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTROL_BUS_SWITCH),
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT),
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SCALED_HDTV_RESOLUTION_SUPPORT),
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS),
 -
 -    /* Add the op code for SDVO enhancements */
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_HPOS),
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HPOS),
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HPOS),
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_VPOS),
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_VPOS),
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_VPOS),
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SATURATION),
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SATURATION),
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SATURATION),
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_HUE),
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HUE),
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HUE),
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_CONTRAST),
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CONTRAST),
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTRAST),
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_BRIGHTNESS),
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_BRIGHTNESS),
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_BRIGHTNESS),
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_H),
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_H),
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_H),
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_V),
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_V),
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_V),
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER),
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER),
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER),
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER_ADAPTIVE),
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER_ADAPTIVE),
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER_ADAPTIVE),
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER_2D),
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER_2D),
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER_2D),
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SHARPNESS),
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SHARPNESS),
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SHARPNESS),
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DOT_CRAWL),
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_DOT_CRAWL),
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_TV_CHROMA_FILTER),
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_CHROMA_FILTER),
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_CHROMA_FILTER),
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_TV_LUMA_FILTER),
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_LUMA_FILTER),
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_LUMA_FILTER),
 -
 -    /* HDMI op code */
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPP_ENCODE),
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ENCODE),
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ENCODE),
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_PIXEL_REPLI),
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PIXEL_REPLI),
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_COLORIMETRY_CAP),
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_COLORIMETRY),
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_COLORIMETRY),
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_AUDIO_ENCRYPT_PREFER),
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_AUDIO_STAT),
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_AUDIO_STAT),
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_INDEX),
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_INDEX),
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_INFO),
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_AV_SPLIT),
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_AV_SPLIT),
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_TXRATE),
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_TXRATE),
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_DATA),
 -    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_DATA),
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_RESET),
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DEVICE_CAPS),
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FIRMWARE_REV),
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TRAINED_INPUTS),
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_OUTPUTS),
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_OUTPUTS),
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_IN_OUT_MAP),
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_IN_OUT_MAP),
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ATTACHED_DISPLAYS),
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HOT_PLUG_SUPPORT),
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_HOT_PLUG),
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_HOT_PLUG),
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE),
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_INPUT),
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_OUTPUT),
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART1),
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART2),
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1),
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART2),
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1),
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART1),
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART2),
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART1),
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART2),
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING),
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1),
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2),
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE),
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE),
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS),
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CLOCK_RATE_MULT),
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CLOCK_RATE_MULT),
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_TV_FORMATS),
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_FORMAT),
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_FORMAT),
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_POWER_STATES),
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_POWER_STATE),
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ENCODER_POWER_STATE),
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_DISPLAY_POWER_STATE),
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTROL_BUS_SWITCH),
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT),
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SCALED_HDTV_RESOLUTION_SUPPORT),
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS),
 +
 +      /* Add the op code for SDVO enhancements */
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_HPOS),
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HPOS),
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HPOS),
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_VPOS),
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_VPOS),
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_VPOS),
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SATURATION),
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SATURATION),
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SATURATION),
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_HUE),
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HUE),
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HUE),
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_CONTRAST),
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CONTRAST),
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTRAST),
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_BRIGHTNESS),
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_BRIGHTNESS),
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_BRIGHTNESS),
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_H),
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_H),
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_H),
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_V),
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_V),
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_V),
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER),
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER),
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER),
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER_ADAPTIVE),
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER_ADAPTIVE),
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER_ADAPTIVE),
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER_2D),
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER_2D),
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER_2D),
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SHARPNESS),
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SHARPNESS),
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SHARPNESS),
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DOT_CRAWL),
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_DOT_CRAWL),
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_TV_CHROMA_FILTER),
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_CHROMA_FILTER),
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_CHROMA_FILTER),
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_TV_LUMA_FILTER),
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_LUMA_FILTER),
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_LUMA_FILTER),
 +
 +      /* HDMI op code */
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPP_ENCODE),
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ENCODE),
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ENCODE),
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_PIXEL_REPLI),
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PIXEL_REPLI),
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_COLORIMETRY_CAP),
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_COLORIMETRY),
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_COLORIMETRY),
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_AUDIO_ENCRYPT_PREFER),
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_AUDIO_STAT),
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_AUDIO_STAT),
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_INDEX),
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_INDEX),
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_INFO),
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_AV_SPLIT),
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_AV_SPLIT),
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_TXRATE),
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_TXRATE),
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_DATA),
 +      SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_DATA),
  };
  
  #define IS_SDVOB(reg) (reg == SDVOB || reg == PCH_SDVOB)
@@@ -1208,74 -1213,20 +1213,20 @@@ static bool intel_sdvo_get_capabilities
        return true;
  }
  
- /* No use! */
- #if 0
- struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB)
- {
-       struct drm_connector *connector = NULL;
-       struct intel_sdvo *iout = NULL;
-       struct intel_sdvo *sdvo;
-       /* find the sdvo connector */
-       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-               iout = to_intel_sdvo(connector);
-               if (iout->type != INTEL_OUTPUT_SDVO)
-                       continue;
-               sdvo = iout->dev_priv;
-               if (sdvo->sdvo_reg == SDVOB && sdvoB)
-                       return connector;
-               if (sdvo->sdvo_reg == SDVOC && !sdvoB)
-                       return connector;
-       }
-       return NULL;
- }
- int intel_sdvo_supports_hotplug(struct drm_connector *connector)
+ static int intel_sdvo_supports_hotplug(struct intel_sdvo *intel_sdvo)
  {
        u8 response[2];
-       u8 status;
-       struct intel_sdvo *intel_sdvo;
-       DRM_DEBUG_KMS("\n");
-       if (!connector)
-               return 0;
-       intel_sdvo = to_intel_sdvo(connector);
  
        return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT,
                                    &response, 2) && response[0];
  }
  
void intel_sdvo_set_hotplug(struct drm_connector *connector, int on)
static void intel_sdvo_enable_hotplug(struct intel_encoder *encoder)
  {
-       u8 response[2];
-       u8 status;
-       struct intel_sdvo *intel_sdvo = to_intel_sdvo(connector);
-       intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0);
-       intel_sdvo_read_response(intel_sdvo, &response, 2);
-       if (on) {
-               intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0);
-               status = intel_sdvo_read_response(intel_sdvo, &response, 2);
-               intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2);
-       } else {
-               response[0] = 0;
-               response[1] = 0;
-               intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2);
-       }
+       struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
  
-       intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0);
-       intel_sdvo_read_response(intel_sdvo, &response, 2);
+       intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &intel_sdvo->hotplug_active, 2);
  }
- #endif
  
  static bool
  intel_sdvo_multifunc_encoder(struct intel_sdvo *intel_sdvo)
@@@ -2045,6 -1996,7 +1996,7 @@@ intel_sdvo_dvi_init(struct intel_sdvo *
  {
        struct drm_encoder *encoder = &intel_sdvo->base.base;
        struct drm_connector *connector;
+       struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
        struct intel_connector *intel_connector;
        struct intel_sdvo_connector *intel_sdvo_connector;
  
  
        intel_connector = &intel_sdvo_connector->base;
        connector = &intel_connector->base;
-       connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
+       if (intel_sdvo_supports_hotplug(intel_sdvo) & (1 << device)) {
+               connector->polled = DRM_CONNECTOR_POLL_HPD;
+               intel_sdvo->hotplug_active[0] |= 1 << device;
+               /* Some SDVO devices have one-shot hotplug interrupts.
+                * Ensure that they get re-enabled when an interrupt happens.
+                */
+               intel_encoder->hot_plug = intel_sdvo_enable_hotplug;
+               intel_sdvo_enable_hotplug(intel_encoder);
+       }
+       else
+               connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
        encoder->encoder_type = DRM_MODE_ENCODER_TMDS;
        connector->connector_type = DRM_MODE_CONNECTOR_DVID;
  
@@@ -2313,7 -2275,7 +2275,7 @@@ static bool intel_sdvo_tv_create_proper
                DRM_DEBUG_KMS(#name ": max %d, default %d, current %d\n", \
                              data_value[0], data_value[1], response); \
        } \
 -} while(0)
 +} while (0)
  
  static bool
  intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
@@@ -2480,7 -2442,7 +2442,7 @@@ static bool intel_sdvo_create_enhance_p
  
        if (IS_TV(intel_sdvo_connector))
                return intel_sdvo_create_enhance_property_tv(intel_sdvo, intel_sdvo_connector, enhancements.reply);
 -      else if(IS_LVDS(intel_sdvo_connector))
 +      else if (IS_LVDS(intel_sdvo_connector))
                return intel_sdvo_create_enhance_property_lvds(intel_sdvo, intel_sdvo_connector, enhancements.reply);
        else
                return true;
@@@ -2569,6 -2531,14 +2531,14 @@@ bool intel_sdvo_init(struct drm_device 
        if (!intel_sdvo_get_capabilities(intel_sdvo, &intel_sdvo->caps))
                goto err;
  
+       /* Set up hotplug command - note paranoia about contents of reply.
+        * We assume that the hardware is in a sane state, and only touch
+        * the bits we think we understand.
+        */
+       intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG,
+                            &intel_sdvo->hotplug_active, 2);
+       intel_sdvo->hotplug_active[0] &= ~0x3;
        if (intel_sdvo_output_setup(intel_sdvo,
                                    intel_sdvo->caps.output_flags) != true) {
                DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n",
index 35b5673d432d2518166ef862b18119ef04e90a66,c4ffa14fb2f45ed09733cca219c54873f48d5a82..ed406e8404a3498870d517fd286b5824a8665e22
@@@ -39,7 -39,7 +39,7 @@@
  
  static void evergreen_gpu_init(struct radeon_device *rdev);
  void evergreen_fini(struct radeon_device *rdev);
 -static void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
 +void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
  
  void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev)
  {
@@@ -935,9 -935,6 +935,9 @@@ int evergreen_pcie_gart_enable(struct r
        WREG32(VM_CONTEXT1_CNTL, 0);
  
        evergreen_pcie_gart_tlb_flush(rdev);
 +      DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
 +               (unsigned)(rdev->mc.gtt_size >> 20),
 +               (unsigned long long)rdev->gart.table_addr);
        rdev->gart.ready = true;
        return 0;
  }
@@@ -1407,7 -1404,8 +1407,8 @@@ int evergreen_cp_resume(struct radeon_d
        /* Initialize the ring buffer's read and write pointers */
        WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
        WREG32(CP_RB_RPTR_WR, 0);
-       WREG32(CP_RB_WPTR, 0);
+       rdev->cp.wptr = 0;
+       WREG32(CP_RB_WPTR, rdev->cp.wptr);
  
        /* set the wb address wether it's enabled or not */
        WREG32(CP_RB_RPTR_ADDR,
        WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
  
        rdev->cp.rptr = RREG32(CP_RB_RPTR);
-       rdev->cp.wptr = RREG32(CP_RB_WPTR);
  
        evergreen_cp_start(rdev);
        rdev->cp.ready = true;
@@@ -1593,48 -1590,6 +1593,6 @@@ static u32 evergreen_get_tile_pipe_to_b
        return backend_map;
  }
  
- static void evergreen_program_channel_remap(struct radeon_device *rdev)
- {
-       u32 tcp_chan_steer_lo, tcp_chan_steer_hi, mc_shared_chremap, tmp;
-       tmp = RREG32(MC_SHARED_CHMAP);
-       switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
-       case 0:
-       case 1:
-       case 2:
-       case 3:
-       default:
-               /* default mapping */
-               mc_shared_chremap = 0x00fac688;
-               break;
-       }
-       switch (rdev->family) {
-       case CHIP_HEMLOCK:
-       case CHIP_CYPRESS:
-       case CHIP_BARTS:
-               tcp_chan_steer_lo = 0x54763210;
-               tcp_chan_steer_hi = 0x0000ba98;
-               break;
-       case CHIP_JUNIPER:
-       case CHIP_REDWOOD:
-       case CHIP_CEDAR:
-       case CHIP_PALM:
-       case CHIP_SUMO:
-       case CHIP_SUMO2:
-       case CHIP_TURKS:
-       case CHIP_CAICOS:
-       default:
-               tcp_chan_steer_lo = 0x76543210;
-               tcp_chan_steer_hi = 0x0000ba98;
-               break;
-       }
-       WREG32(TCP_CHAN_STEER_LO, tcp_chan_steer_lo);
-       WREG32(TCP_CHAN_STEER_HI, tcp_chan_steer_hi);
-       WREG32(MC_SHARED_CHREMAP, mc_shared_chremap);
- }
  static void evergreen_gpu_init(struct radeon_device *rdev)
  {
        u32 cc_rb_backend_disable = 0;
        WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
        WREG32(HDP_ADDR_CONFIG, gb_addr_config);
  
-       evergreen_program_channel_remap(rdev);
        num_shader_engines = ((RREG32(GB_ADDR_CONFIG) & NUM_SHADER_ENGINES(3)) >> 12) + 1;
        grbm_gfx_index = INSTANCE_BROADCAST_WRITES;
  
@@@ -2633,7 -2586,7 +2589,7 @@@ int evergreen_irq_set(struct radeon_dev
        return 0;
  }
  
 -static inline void evergreen_irq_ack(struct radeon_device *rdev)
 +static void evergreen_irq_ack(struct radeon_device *rdev)
  {
        u32 tmp;
  
@@@ -2744,7 -2697,7 +2700,7 @@@ void evergreen_irq_suspend(struct radeo
        r600_rlc_stop(rdev);
  }
  
 -static inline u32 evergreen_get_ih_wptr(struct radeon_device *rdev)
 +static u32 evergreen_get_ih_wptr(struct radeon_device *rdev)
  {
        u32 wptr, tmp;
  
@@@ -3050,7 -3003,8 +3006,7 @@@ static int evergreen_startup(struct rad
        int r;
  
        /* enable pcie gen2 link */
 -      if (!ASIC_IS_DCE5(rdev))
 -              evergreen_pcie_gen2_enable(rdev);
 +      evergreen_pcie_gen2_enable(rdev);
  
        if (ASIC_IS_DCE5(rdev)) {
                if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
  
        r = evergreen_blit_init(rdev);
        if (r) {
 -              evergreen_blit_fini(rdev);
 +              r600_blit_fini(rdev);
                rdev->asic->copy = NULL;
                dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
        }
@@@ -3153,14 -3107,45 +3109,14 @@@ int evergreen_resume(struct radeon_devi
  
  int evergreen_suspend(struct radeon_device *rdev)
  {
 -      int r;
 -
        /* FIXME: we should wait for ring to be empty */
        r700_cp_stop(rdev);
        rdev->cp.ready = false;
        evergreen_irq_suspend(rdev);
        radeon_wb_disable(rdev);
        evergreen_pcie_gart_disable(rdev);
 +      r600_blit_suspend(rdev);
  
 -      /* unpin shaders bo */
 -      r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
 -      if (likely(r == 0)) {
 -              radeon_bo_unpin(rdev->r600_blit.shader_obj);
 -              radeon_bo_unreserve(rdev->r600_blit.shader_obj);
 -      }
 -
 -      return 0;
 -}
 -
 -int evergreen_copy_blit(struct radeon_device *rdev,
 -                      uint64_t src_offset,
 -                      uint64_t dst_offset,
 -                      unsigned num_gpu_pages,
 -                      struct radeon_fence *fence)
 -{
 -      int r;
 -
 -      mutex_lock(&rdev->r600_blit.mutex);
 -      rdev->r600_blit.vb_ib = NULL;
 -      r = evergreen_blit_prepare_copy(rdev, num_gpu_pages * RADEON_GPU_PAGE_SIZE);
 -      if (r) {
 -              if (rdev->r600_blit.vb_ib)
 -                      radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
 -              mutex_unlock(&rdev->r600_blit.mutex);
 -              return r;
 -      }
 -      evergreen_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages * RADEON_GPU_PAGE_SIZE);
 -      evergreen_blit_done_copy(rdev, fence);
 -      mutex_unlock(&rdev->r600_blit.mutex);
        return 0;
  }
  
@@@ -3272,7 -3257,7 +3228,7 @@@ int evergreen_init(struct radeon_devic
  
  void evergreen_fini(struct radeon_device *rdev)
  {
 -      evergreen_blit_fini(rdev);
 +      r600_blit_fini(rdev);
        r700_cp_fini(rdev);
        r600_irq_fini(rdev);
        radeon_wb_fini(rdev);
        rdev->bios = NULL;
  }
  
 -static void evergreen_pcie_gen2_enable(struct radeon_device *rdev)
 +void evergreen_pcie_gen2_enable(struct radeon_device *rdev)
  {
        u32 link_width_cntl, speed_cntl;
  
index 770cc2ab088a80c88423a75ff762aa76c1507d19,8c79ca97753db9508aaefac92c4453567f7a1157..556b7bc3418b9da6d0aa5bb61bdf31e83ce89059
@@@ -40,7 -40,6 +40,7 @@@ extern void evergreen_mc_program(struc
  extern void evergreen_irq_suspend(struct radeon_device *rdev);
  extern int evergreen_mc_init(struct radeon_device *rdev);
  extern void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev);
 +extern void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
  
  #define EVERGREEN_PFP_UCODE_SIZE 1120
  #define EVERGREEN_PM4_UCODE_SIZE 1376
@@@ -570,36 -569,6 +570,6 @@@ static u32 cayman_get_tile_pipe_to_back
        return backend_map;
  }
  
- static void cayman_program_channel_remap(struct radeon_device *rdev)
- {
-       u32 tcp_chan_steer_lo, tcp_chan_steer_hi, mc_shared_chremap, tmp;
-       tmp = RREG32(MC_SHARED_CHMAP);
-       switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
-       case 0:
-       case 1:
-       case 2:
-       case 3:
-       default:
-               /* default mapping */
-               mc_shared_chremap = 0x00fac688;
-               break;
-       }
-       switch (rdev->family) {
-       case CHIP_CAYMAN:
-       default:
-               //tcp_chan_steer_lo = 0x54763210
-               tcp_chan_steer_lo = 0x76543210;
-               tcp_chan_steer_hi = 0x0000ba98;
-               break;
-       }
-       WREG32(TCP_CHAN_STEER_LO, tcp_chan_steer_lo);
-       WREG32(TCP_CHAN_STEER_HI, tcp_chan_steer_hi);
-       WREG32(MC_SHARED_CHREMAP, mc_shared_chremap);
- }
  static u32 cayman_get_disable_mask_per_asic(struct radeon_device *rdev,
                                            u32 disable_mask_per_se,
                                            u32 max_disable_mask_per_se,
@@@ -843,8 -812,6 +813,6 @@@ static void cayman_gpu_init(struct rade
        WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
        WREG32(HDP_ADDR_CONFIG, gb_addr_config);
  
-       cayman_program_channel_remap(rdev);
        /* primary versions */
        WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
        WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
@@@ -1000,9 -967,6 +968,9 @@@ int cayman_pcie_gart_enable(struct rade
        WREG32(VM_CONTEXT1_CNTL, 0);
  
        cayman_pcie_gart_tlb_flush(rdev);
 +      DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
 +               (unsigned)(rdev->mc.gtt_size >> 20),
 +               (unsigned long long)rdev->gart.table_addr);
        rdev->gart.ready = true;
        return 0;
  }
@@@ -1191,7 -1155,8 +1159,8 @@@ int cayman_cp_resume(struct radeon_devi
  
        /* Initialize the ring buffer's read and write pointers */
        WREG32(CP_RB0_CNTL, tmp | RB_RPTR_WR_ENA);
-       WREG32(CP_RB0_WPTR, 0);
+       rdev->cp.wptr = 0;
+       WREG32(CP_RB0_WPTR, rdev->cp.wptr);
  
        /* set the wb address wether it's enabled or not */
        WREG32(CP_RB0_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC);
        WREG32(CP_RB0_BASE, rdev->cp.gpu_addr >> 8);
  
        rdev->cp.rptr = RREG32(CP_RB0_RPTR);
-       rdev->cp.wptr = RREG32(CP_RB0_WPTR);
  
        /* ring1  - compute only */
        /* Set ring buffer size */
  
        /* Initialize the ring buffer's read and write pointers */
        WREG32(CP_RB1_CNTL, tmp | RB_RPTR_WR_ENA);
-       WREG32(CP_RB1_WPTR, 0);
+       rdev->cp1.wptr = 0;
+       WREG32(CP_RB1_WPTR, rdev->cp1.wptr);
  
        /* set the wb address wether it's enabled or not */
        WREG32(CP_RB1_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFFFFFFFC);
        WREG32(CP_RB1_BASE, rdev->cp1.gpu_addr >> 8);
  
        rdev->cp1.rptr = RREG32(CP_RB1_RPTR);
-       rdev->cp1.wptr = RREG32(CP_RB1_WPTR);
  
        /* ring2 - compute only */
        /* Set ring buffer size */
  
        /* Initialize the ring buffer's read and write pointers */
        WREG32(CP_RB2_CNTL, tmp | RB_RPTR_WR_ENA);
-       WREG32(CP_RB2_WPTR, 0);
+       rdev->cp2.wptr = 0;
+       WREG32(CP_RB2_WPTR, rdev->cp2.wptr);
  
        /* set the wb address wether it's enabled or not */
        WREG32(CP_RB2_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFFFFFFFC);
        WREG32(CP_RB2_BASE, rdev->cp2.gpu_addr >> 8);
  
        rdev->cp2.rptr = RREG32(CP_RB2_RPTR);
-       rdev->cp2.wptr = RREG32(CP_RB2_WPTR);
  
        /* start the rings */
        cayman_cp_start(rdev);
@@@ -1377,9 -1341,6 +1345,9 @@@ static int cayman_startup(struct radeon
  {
        int r;
  
 +      /* enable pcie gen2 link */
 +      evergreen_pcie_gen2_enable(rdev);
 +
        if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
                r = ni_init_microcode(rdev);
                if (r) {
  
        r = evergreen_blit_init(rdev);
        if (r) {
 -              evergreen_blit_fini(rdev);
 +              r600_blit_fini(rdev);
                rdev->asic->copy = NULL;
                dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
        }
@@@ -1462,13 -1423,21 +1430,13 @@@ int cayman_resume(struct radeon_device 
  
  int cayman_suspend(struct radeon_device *rdev)
  {
 -      int r;
 -
        /* FIXME: we should wait for ring to be empty */
        cayman_cp_enable(rdev, false);
        rdev->cp.ready = false;
        evergreen_irq_suspend(rdev);
        radeon_wb_disable(rdev);
        cayman_pcie_gart_disable(rdev);
 -
 -      /* unpin shaders bo */
 -      r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
 -      if (likely(r == 0)) {
 -              radeon_bo_unpin(rdev->r600_blit.shader_obj);
 -              radeon_bo_unreserve(rdev->r600_blit.shader_obj);
 -      }
 +      r600_blit_suspend(rdev);
  
        return 0;
  }
@@@ -1581,7 -1550,7 +1549,7 @@@ int cayman_init(struct radeon_device *r
  
  void cayman_fini(struct radeon_device *rdev)
  {
 -      evergreen_blit_fini(rdev);
 +      r600_blit_fini(rdev);
        cayman_cp_fini(rdev);
        r600_irq_fini(rdev);
        radeon_wb_fini(rdev);
index e108f265882a79e8588fbbfa7545b905f9f24c1e,7fcdbbbf297965044e550e611d8eb8b32a086051..8f8b8fa143570c116960befda9c048837cf974e3
@@@ -68,108 -68,6 +68,108 @@@ MODULE_FIRMWARE(FIRMWARE_R520)
   * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280
   */
  
 +int r100_reloc_pitch_offset(struct radeon_cs_parser *p,
 +                          struct radeon_cs_packet *pkt,
 +                          unsigned idx,
 +                          unsigned reg)
 +{
 +      int r;
 +      u32 tile_flags = 0;
 +      u32 tmp;
 +      struct radeon_cs_reloc *reloc;
 +      u32 value;
 +
 +      r = r100_cs_packet_next_reloc(p, &reloc);
 +      if (r) {
 +              DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
 +                        idx, reg);
 +              r100_cs_dump_packet(p, pkt);
 +              return r;
 +      }
 +      value = radeon_get_ib_value(p, idx);
 +      tmp = value & 0x003fffff;
 +      tmp += (((u32)reloc->lobj.gpu_offset) >> 10);
 +
 +      if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
 +              tile_flags |= RADEON_DST_TILE_MACRO;
 +      if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
 +              if (reg == RADEON_SRC_PITCH_OFFSET) {
 +                      DRM_ERROR("Cannot src blit from microtiled surface\n");
 +                      r100_cs_dump_packet(p, pkt);
 +                      return -EINVAL;
 +              }
 +              tile_flags |= RADEON_DST_TILE_MICRO;
 +      }
 +
 +      tmp |= tile_flags;
 +      p->ib->ptr[idx] = (value & 0x3fc00000) | tmp;
 +      return 0;
 +}
 +
 +int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
 +                           struct radeon_cs_packet *pkt,
 +                           int idx)
 +{
 +      unsigned c, i;
 +      struct radeon_cs_reloc *reloc;
 +      struct r100_cs_track *track;
 +      int r = 0;
 +      volatile uint32_t *ib;
 +      u32 idx_value;
 +
 +      ib = p->ib->ptr;
 +      track = (struct r100_cs_track *)p->track;
 +      c = radeon_get_ib_value(p, idx++) & 0x1F;
 +      if (c > 16) {
 +          DRM_ERROR("Only 16 vertex buffers are allowed %d\n",
 +                    pkt->opcode);
 +          r100_cs_dump_packet(p, pkt);
 +          return -EINVAL;
 +      }
 +      track->num_arrays = c;
 +      for (i = 0; i < (c - 1); i+=2, idx+=3) {
 +              r = r100_cs_packet_next_reloc(p, &reloc);
 +              if (r) {
 +                      DRM_ERROR("No reloc for packet3 %d\n",
 +                                pkt->opcode);
 +                      r100_cs_dump_packet(p, pkt);
 +                      return r;
 +              }
 +              idx_value = radeon_get_ib_value(p, idx);
 +              ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset);
 +
 +              track->arrays[i + 0].esize = idx_value >> 8;
 +              track->arrays[i + 0].robj = reloc->robj;
 +              track->arrays[i + 0].esize &= 0x7F;
 +              r = r100_cs_packet_next_reloc(p, &reloc);
 +              if (r) {
 +                      DRM_ERROR("No reloc for packet3 %d\n",
 +                                pkt->opcode);
 +                      r100_cs_dump_packet(p, pkt);
 +                      return r;
 +              }
 +              ib[idx+2] = radeon_get_ib_value(p, idx + 2) + ((u32)reloc->lobj.gpu_offset);
 +              track->arrays[i + 1].robj = reloc->robj;
 +              track->arrays[i + 1].esize = idx_value >> 24;
 +              track->arrays[i + 1].esize &= 0x7F;
 +      }
 +      if (c & 1) {
 +              r = r100_cs_packet_next_reloc(p, &reloc);
 +              if (r) {
 +                      DRM_ERROR("No reloc for packet3 %d\n",
 +                                        pkt->opcode);
 +                      r100_cs_dump_packet(p, pkt);
 +                      return r;
 +              }
 +              idx_value = radeon_get_ib_value(p, idx);
 +              ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset);
 +              track->arrays[i + 0].robj = reloc->robj;
 +              track->arrays[i + 0].esize = idx_value >> 8;
 +              track->arrays[i + 0].esize &= 0x7F;
 +      }
 +      return r;
 +}
 +
  void r100_pre_page_flip(struct radeon_device *rdev, int crtc)
  {
        /* enable the pflip int */
@@@ -615,9 -513,6 +615,9 @@@ int r100_pci_gart_enable(struct radeon_
        tmp = RREG32(RADEON_AIC_CNTL) | RADEON_PCIGART_TRANSLATE_EN;
        WREG32(RADEON_AIC_CNTL, tmp);
        r100_pci_gart_tlb_flush(rdev);
 +      DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
 +               (unsigned)(rdev->mc.gtt_size >> 20),
 +               (unsigned long long)rdev->gart.table_addr);
        rdev->gart.ready = true;
        return 0;
  }
@@@ -693,7 -588,7 +693,7 @@@ void r100_irq_disable(struct radeon_dev
        WREG32(R_000044_GEN_INT_STATUS, tmp);
  }
  
 -static inline uint32_t r100_irq_ack(struct radeon_device *rdev)
 +static uint32_t r100_irq_ack(struct radeon_device *rdev)
  {
        uint32_t irqs = RREG32(RADEON_GEN_INT_STATUS);
        uint32_t irq_mask = RADEON_SW_INT_TEST |
@@@ -826,11 -721,11 +826,11 @@@ void r100_fence_ring_emit(struct radeon
  int r100_copy_blit(struct radeon_device *rdev,
                   uint64_t src_offset,
                   uint64_t dst_offset,
-                  unsigned num_pages,
+                  unsigned num_gpu_pages,
                   struct radeon_fence *fence)
  {
        uint32_t cur_pages;
-       uint32_t stride_bytes = PAGE_SIZE;
+       uint32_t stride_bytes = RADEON_GPU_PAGE_SIZE;
        uint32_t pitch;
        uint32_t stride_pixels;
        unsigned ndw;
        /* radeon pitch is /64 */
        pitch = stride_bytes / 64;
        stride_pixels = stride_bytes / 4;
-       num_loops = DIV_ROUND_UP(num_pages, 8191);
+       num_loops = DIV_ROUND_UP(num_gpu_pages, 8191);
  
        /* Ask for enough room for blit + flush + fence */
        ndw = 64 + (10 * num_loops);
                DRM_ERROR("radeon: moving bo (%d) asking for %u dw.\n", r, ndw);
                return -EINVAL;
        }
-       while (num_pages > 0) {
-               cur_pages = num_pages;
+       while (num_gpu_pages > 0) {
+               cur_pages = num_gpu_pages;
                if (cur_pages > 8191) {
                        cur_pages = 8191;
                }
-               num_pages -= cur_pages;
+               num_gpu_pages -= cur_pages;
  
                /* pages are in Y direction - height
                   page width in X direction - width */
                radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16));
                radeon_ring_write(rdev, 0);
                radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16));
-               radeon_ring_write(rdev, num_pages);
-               radeon_ring_write(rdev, num_pages);
+               radeon_ring_write(rdev, num_gpu_pages);
+               radeon_ring_write(rdev, num_gpu_pages);
                radeon_ring_write(rdev, cur_pages | (stride_pixels << 16));
        }
        radeon_ring_write(rdev, PACKET0(RADEON_DSTCACHE_CTLSTAT, 0));
@@@ -1095,7 -990,8 +1095,8 @@@ int r100_cp_init(struct radeon_device *
        /* Force read & write ptr to 0 */
        WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA | RADEON_RB_NO_UPDATE);
        WREG32(RADEON_CP_RB_RPTR_WR, 0);
-       WREG32(RADEON_CP_RB_WPTR, 0);
+       rdev->cp.wptr = 0;
+       WREG32(RADEON_CP_RB_WPTR, rdev->cp.wptr);
  
        /* set the wb address whether it's enabled or not */
        WREG32(R_00070C_CP_RB_RPTR_ADDR,
        WREG32(RADEON_CP_RB_CNTL, tmp);
        udelay(10);
        rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
-       rdev->cp.wptr = RREG32(RADEON_CP_RB_WPTR);
-       /* protect against crazy HW on resume */
-       rdev->cp.wptr &= rdev->cp.ptr_mask;
        /* Set cp mode to bus mastering & enable cp*/
        WREG32(RADEON_CP_CSQ_MODE,
               REG_SET(RADEON_INDIRECT2_START, indirect2_start) |
@@@ -3254,7 -3147,7 +3252,7 @@@ void r100_bandwidth_update(struct radeo
        }
  }
  
 -static inline void r100_cs_track_texture_print(struct r100_cs_track_texture *t)
 +static void r100_cs_track_texture_print(struct r100_cs_track_texture *t)
  {
        DRM_ERROR("pitch                      %d\n", t->pitch);
        DRM_ERROR("use_pitch                  %d\n", t->use_pitch);
@@@ -4072,43 -3965,3 +4070,43 @@@ int r100_init(struct radeon_device *rde
        }
        return 0;
  }
 +
 +uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg)
 +{
 +      if (reg < rdev->rmmio_size)
 +              return readl(((void __iomem *)rdev->rmmio) + reg);
 +      else {
 +              writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
 +              return readl(((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
 +      }
 +}
 +
 +void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
 +{
 +      if (reg < rdev->rmmio_size)
 +              writel(v, ((void __iomem *)rdev->rmmio) + reg);
 +      else {
 +              writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
 +              writel(v, ((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
 +      }
 +}
 +
 +u32 r100_io_rreg(struct radeon_device *rdev, u32 reg)
 +{
 +      if (reg < rdev->rio_mem_size)
 +              return ioread32(rdev->rio_mem + reg);
 +      else {
 +              iowrite32(reg, rdev->rio_mem + RADEON_MM_INDEX);
 +              return ioread32(rdev->rio_mem + RADEON_MM_DATA);
 +      }
 +}
 +
 +void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v)
 +{
 +      if (reg < rdev->rio_mem_size)
 +              iowrite32(v, rdev->rio_mem + reg);
 +      else {
 +              iowrite32(reg, rdev->rio_mem + RADEON_MM_INDEX);
 +              iowrite32(v, rdev->rio_mem + RADEON_MM_DATA);
 +      }
 +}
index e87f5662a1046eb312cf23298171f8569a99a5da,720dd99163f855feeb16b921f64f21fa38cc2971..12470b090ddf871a62e478d2bca0c340792175d3
@@@ -993,9 -993,6 +993,9 @@@ int r600_pcie_gart_enable(struct radeon
                WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
  
        r600_pcie_gart_tlb_flush(rdev);
 +      DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
 +               (unsigned)(rdev->mc.gtt_size >> 20),
 +               (unsigned long long)rdev->gart.table_addr);
        rdev->gart.ready = true;
        return 0;
  }
@@@ -2212,7 -2209,8 +2212,8 @@@ int r600_cp_resume(struct radeon_devic
        /* Initialize the ring buffer's read and write pointers */
        WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
        WREG32(CP_RB_RPTR_WR, 0);
-       WREG32(CP_RB_WPTR, 0);
+       rdev->cp.wptr = 0;
+       WREG32(CP_RB_WPTR, rdev->cp.wptr);
  
        /* set the wb address whether it's enabled or not */
        WREG32(CP_RB_RPTR_ADDR,
        WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
  
        rdev->cp.rptr = RREG32(CP_RB_RPTR);
-       rdev->cp.wptr = RREG32(CP_RB_WPTR);
  
        r600_cp_start(rdev);
        rdev->cp.ready = true;
@@@ -2356,40 -2353,28 +2356,42 @@@ void r600_fence_ring_emit(struct radeon
  }
  
  int r600_copy_blit(struct radeon_device *rdev,
-                  uint64_t src_offset, uint64_t dst_offset,
-                  unsigned num_pages, struct radeon_fence *fence)
+                  uint64_t src_offset,
+                  uint64_t dst_offset,
+                  unsigned num_gpu_pages,
+                  struct radeon_fence *fence)
  {
        int r;
  
        mutex_lock(&rdev->r600_blit.mutex);
        rdev->r600_blit.vb_ib = NULL;
-       r = r600_blit_prepare_copy(rdev, num_pages);
 -      r = r600_blit_prepare_copy(rdev, num_gpu_pages * RADEON_GPU_PAGE_SIZE);
++      r = r600_blit_prepare_copy(rdev, num_gpu_pages);
        if (r) {
                if (rdev->r600_blit.vb_ib)
                        radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
                mutex_unlock(&rdev->r600_blit.mutex);
                return r;
        }
-       r600_kms_blit_copy(rdev, src_offset, dst_offset, num_pages);
 -      r600_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages * RADEON_GPU_PAGE_SIZE);
++      r600_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages);
        r600_blit_done_copy(rdev, fence);
        mutex_unlock(&rdev->r600_blit.mutex);
        return 0;
  }
  
 +void r600_blit_suspend(struct radeon_device *rdev)
 +{
 +      int r;
 +
 +      /* unpin shaders bo */
 +      if (rdev->r600_blit.shader_obj) {
 +              r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
 +              if (!r) {
 +                      radeon_bo_unpin(rdev->r600_blit.shader_obj);
 +                      radeon_bo_unreserve(rdev->r600_blit.shader_obj);
 +              }
 +      }
 +}
 +
  int r600_set_surface_reg(struct radeon_device *rdev, int reg,
                         uint32_t tiling_flags, uint32_t pitch,
                         uint32_t offset, uint32_t obj_size)
@@@ -2509,6 -2494,8 +2511,6 @@@ int r600_resume(struct radeon_device *r
  
  int r600_suspend(struct radeon_device *rdev)
  {
 -      int r;
 -
        r600_audio_fini(rdev);
        /* FIXME: we should wait for ring to be empty */
        r600_cp_stop(rdev);
        r600_irq_suspend(rdev);
        radeon_wb_disable(rdev);
        r600_pcie_gart_disable(rdev);
 -      /* unpin shaders bo */
 -      if (rdev->r600_blit.shader_obj) {
 -              r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
 -              if (!r) {
 -                      radeon_bo_unpin(rdev->r600_blit.shader_obj);
 -                      radeon_bo_unreserve(rdev->r600_blit.shader_obj);
 -              }
 -      }
 +      r600_blit_suspend(rdev);
 +
        return 0;
  }
  
@@@ -3144,7 -3137,7 +3146,7 @@@ int r600_irq_set(struct radeon_device *
        return 0;
  }
  
 -static inline void r600_irq_ack(struct radeon_device *rdev)
 +static void r600_irq_ack(struct radeon_device *rdev)
  {
        u32 tmp;
  
@@@ -3245,7 -3238,7 +3247,7 @@@ void r600_irq_disable(struct radeon_dev
        r600_disable_interrupt_state(rdev);
  }
  
 -static inline u32 r600_get_ih_wptr(struct radeon_device *rdev)
 +static u32 r600_get_ih_wptr(struct radeon_device *rdev)
  {
        u32 wptr, tmp;
  
index 3cf983c5243ff8f6160226a3067b528836d85f33,c1e056b35b292b76bdae1416cf5c95280dacb301..156b8b7e028e73b3f2b8d6f9761e91df242e15b5
@@@ -102,7 -102,7 +102,7 @@@ extern int radeon_pcie_gen2
  #define RADEON_FENCE_JIFFIES_TIMEOUT  (HZ / 2)
  /* RADEON_IB_POOL_SIZE must be a power of 2 */
  #define RADEON_IB_POOL_SIZE           16
 -#define RADEON_DEBUGFS_MAX_NUM_FILES  32
 +#define RADEON_DEBUGFS_MAX_COMPONENTS 32
  #define RADEONFB_CONN_LIMIT           4
  #define RADEON_BIOS_NUM_SCRATCH               8
  
@@@ -322,6 -322,7 +322,7 @@@ union radeon_gart_table 
  
  #define RADEON_GPU_PAGE_SIZE 4096
  #define RADEON_GPU_PAGE_MASK (RADEON_GPU_PAGE_SIZE - 1)
+ #define RADEON_GPU_PAGE_SHIFT 12
  
  struct radeon_gart {
        dma_addr_t                      table_addr;
@@@ -522,30 -523,9 +523,30 @@@ struct r600_ih 
        bool                    enabled;
  };
  
 +struct r600_blit_cp_primitives {
 +      void (*set_render_target)(struct radeon_device *rdev, int format,
 +                                int w, int h, u64 gpu_addr);
 +      void (*cp_set_surface_sync)(struct radeon_device *rdev,
 +                                  u32 sync_type, u32 size,
 +                                  u64 mc_addr);
 +      void (*set_shaders)(struct radeon_device *rdev);
 +      void (*set_vtx_resource)(struct radeon_device *rdev, u64 gpu_addr);
 +      void (*set_tex_resource)(struct radeon_device *rdev,
 +                               int format, int w, int h, int pitch,
 +                               u64 gpu_addr);
 +      void (*set_scissors)(struct radeon_device *rdev, int x1, int y1,
 +                           int x2, int y2);
 +      void (*draw_auto)(struct radeon_device *rdev);
 +      void (*set_default_state)(struct radeon_device *rdev);
 +};
 +
  struct r600_blit {
        struct mutex            mutex;
        struct radeon_bo        *shader_obj;
 +      struct r600_blit_cp_primitives primitives;
 +      int max_dim;
 +      int ring_size_common;
 +      int ring_size_per_loop;
        u64 shader_gpu_addr;
        u32 vs_offset, ps_offset;
        u32 state_offset;
        struct radeon_ib *vb_ib;
  };
  
 +void r600_blit_suspend(struct radeon_device *rdev);
 +
  int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib);
  void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib);
  int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib);
@@@ -623,7 -601,32 +624,7 @@@ struct radeon_cs_parser 
  
  extern int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx);
  extern int radeon_cs_finish_pages(struct radeon_cs_parser *p);
 -
 -
 -static inline u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
 -{
 -      struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
 -      u32 pg_idx, pg_offset;
 -      u32 idx_value = 0;
 -      int new_page;
 -
 -      pg_idx = (idx * 4) / PAGE_SIZE;
 -      pg_offset = (idx * 4) % PAGE_SIZE;
 -
 -      if (ibc->kpage_idx[0] == pg_idx)
 -              return ibc->kpage[0][pg_offset/4];
 -      if (ibc->kpage_idx[1] == pg_idx)
 -              return ibc->kpage[1][pg_offset/4];
 -
 -      new_page = radeon_cs_update_pages(p, pg_idx);
 -      if (new_page < 0) {
 -              p->parser_error = new_page;
 -              return 0;
 -      }
 -
 -      idx_value = ibc->kpage[new_page][pg_offset/4];
 -      return idx_value;
 -}
 +extern u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx);
  
  struct radeon_cs_packet {
        unsigned        idx;
@@@ -866,7 -869,7 +867,7 @@@ struct radeon_pm 
  /*
   * Benchmarking
   */
 -void radeon_benchmark(struct radeon_device *rdev);
 +void radeon_benchmark(struct radeon_device *rdev, int test_number);
  
  
  /*
@@@ -912,17 -915,17 +913,17 @@@ struct radeon_asic 
        int (*copy_blit)(struct radeon_device *rdev,
                         uint64_t src_offset,
                         uint64_t dst_offset,
-                        unsigned num_pages,
+                        unsigned num_gpu_pages,
                         struct radeon_fence *fence);
        int (*copy_dma)(struct radeon_device *rdev,
                        uint64_t src_offset,
                        uint64_t dst_offset,
-                       unsigned num_pages,
+                       unsigned num_gpu_pages,
                        struct radeon_fence *fence);
        int (*copy)(struct radeon_device *rdev,
                    uint64_t src_offset,
                    uint64_t dst_offset,
-                   unsigned num_pages,
+                   unsigned num_gpu_pages,
                    struct radeon_fence *fence);
        uint32_t (*get_engine_clock)(struct radeon_device *rdev);
        void (*set_engine_clock)(struct radeon_device *rdev, uint32_t eng_clock);
@@@ -1141,8 -1144,6 +1142,8 @@@ int radeon_gem_set_tiling_ioctl(struct 
                                struct drm_file *filp);
  int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
                                struct drm_file *filp);
 +int radeon_gem_wait_ioctl(struct drm_device *dev, void *data,
 +                        struct drm_file *filp);
  
  /* VRAM scratch page for HDP bug */
  struct r700_vram_scratch {
@@@ -1251,10 -1252,45 +1252,10 @@@ int radeon_device_init(struct radeon_de
  void radeon_device_fini(struct radeon_device *rdev);
  int radeon_gpu_wait_for_idle(struct radeon_device *rdev);
  
 -static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg)
 -{
 -      if (reg < rdev->rmmio_size)
 -              return readl((rdev->rmmio) + reg);
 -      else {
 -              writel(reg, (rdev->rmmio) + RADEON_MM_INDEX);
 -              return readl((rdev->rmmio) + RADEON_MM_DATA);
 -      }
 -}
 -
 -static inline void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
 -{
 -      if (reg < rdev->rmmio_size)
 -              writel(v, (rdev->rmmio) + reg);
 -      else {
 -              writel(reg, (rdev->rmmio) + RADEON_MM_INDEX);
 -              writel(v, (rdev->rmmio) + RADEON_MM_DATA);
 -      }
 -}
 -
 -static inline u32 r100_io_rreg(struct radeon_device *rdev, u32 reg)
 -{
 -      if (reg < rdev->rio_mem_size)
 -              return ioread32(rdev->rio_mem + reg);
 -      else {
 -              iowrite32(reg, rdev->rio_mem + RADEON_MM_INDEX);
 -              return ioread32(rdev->rio_mem + RADEON_MM_DATA);
 -      }
 -}
 -
 -static inline void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v)
 -{
 -      if (reg < rdev->rio_mem_size)
 -              iowrite32(v, rdev->rio_mem + reg);
 -      else {
 -              iowrite32(reg, rdev->rio_mem + RADEON_MM_INDEX);
 -              iowrite32(v, rdev->rio_mem + RADEON_MM_DATA);
 -      }
 -}
 +uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg);
 +void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
 +u32 r100_io_rreg(struct radeon_device *rdev, u32 reg);
 +void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v);
  
  /*
   * Cast helper
@@@ -1377,19 -1413,19 +1378,19 @@@ void radeon_atombios_fini(struct radeon
  /*
   * RING helpers.
   */
 +
 +#if DRM_DEBUG_CODE == 0
  static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
  {
 -#if DRM_DEBUG_CODE
 -      if (rdev->cp.count_dw <= 0) {
 -              DRM_ERROR("radeon: writting more dword to ring than expected !\n");
 -      }
 -#endif
        rdev->cp.ring[rdev->cp.wptr++] = v;
        rdev->cp.wptr &= rdev->cp.ptr_mask;
        rdev->cp.count_dw--;
        rdev->cp.ring_free_dw--;
  }
 -
 +#else
 +/* With debugging this is just too big to inline */
 +void radeon_ring_write(struct radeon_device *rdev, uint32_t v);
 +#endif
  
  /*
   * ASICs macro.
index e040de3e8cc7435d7b9f03cb41921754c4c00286,3dedaa07aac197b4179ce27aa25054b460684f7b..85f14f0337e402f8d978cf7219e9861ef276aa5c
@@@ -75,7 -75,7 +75,7 @@@ uint32_t r100_pll_rreg(struct radeon_de
  int r100_copy_blit(struct radeon_device *rdev,
                   uint64_t src_offset,
                   uint64_t dst_offset,
-                  unsigned num_pages,
+                  unsigned num_gpu_pages,
                   struct radeon_fence *fence);
  int r100_set_surface_reg(struct radeon_device *rdev, int reg,
                         uint32_t tiling_flags, uint32_t pitch,
@@@ -143,7 -143,7 +143,7 @@@ extern void r100_post_page_flip(struct 
  extern int r200_copy_dma(struct radeon_device *rdev,
                         uint64_t src_offset,
                         uint64_t dst_offset,
-                        unsigned num_pages,
+                        unsigned num_gpu_pages,
                         struct radeon_fence *fence);
  void r200_set_safe_registers(struct radeon_device *rdev);
  
@@@ -311,7 -311,7 +311,7 @@@ void r600_ring_ib_execute(struct radeon
  int r600_ring_test(struct radeon_device *rdev);
  int r600_copy_blit(struct radeon_device *rdev,
                   uint64_t src_offset, uint64_t dst_offset,
-                  unsigned num_pages, struct radeon_fence *fence);
+                  unsigned num_gpu_pages, struct radeon_fence *fence);
  void r600_hpd_init(struct radeon_device *rdev);
  void r600_hpd_fini(struct radeon_device *rdev);
  bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
@@@ -364,11 -364,11 +364,11 @@@ void r600_hdmi_init(struct drm_encoder 
  int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder);
  void r600_hdmi_update_audio_settings(struct drm_encoder *encoder);
  /* r600 blit */
 -int r600_blit_prepare_copy(struct radeon_device *rdev, int size_bytes);
 +int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages);
  void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence);
  void r600_kms_blit_copy(struct radeon_device *rdev,
                        u64 src_gpu_addr, u64 dst_gpu_addr,
 -                      int size_bytes);
 +                      unsigned num_gpu_pages);
  
  /*
   * rv770,rv730,rv710,rv740
@@@ -401,6 -401,9 +401,6 @@@ bool evergreen_gpu_is_lockup(struct rad
  int evergreen_asic_reset(struct radeon_device *rdev);
  void evergreen_bandwidth_update(struct radeon_device *rdev);
  void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
 -int evergreen_copy_blit(struct radeon_device *rdev,
 -                      uint64_t src_offset, uint64_t dst_offset,
 -                      unsigned num_gpu_pages, struct radeon_fence *fence);
  void evergreen_hpd_init(struct radeon_device *rdev);
  void evergreen_hpd_fini(struct radeon_device *rdev);
  bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
@@@ -418,6 -421,13 +418,6 @@@ extern u32 evergreen_page_flip(struct r
  extern void evergreen_post_page_flip(struct radeon_device *rdev, int crtc);
  void evergreen_disable_interrupt_state(struct radeon_device *rdev);
  int evergreen_blit_init(struct radeon_device *rdev);
 -void evergreen_blit_fini(struct radeon_device *rdev);
 -/* evergreen blit */
 -int evergreen_blit_prepare_copy(struct radeon_device *rdev, int size_bytes);
 -void evergreen_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence);
 -void evergreen_kms_blit_copy(struct radeon_device *rdev,
 -                           u64 src_gpu_addr, u64 dst_gpu_addr,
 -                           int size_bytes);
  
  /*
   * cayman
index 9b5b3e4d23864bdf3430a0c119461099a9110e99,449c3d8c68367034cfba3bc012c75bcd9a8888e2..dec6cbe6a0a6a4ef5a1b59be5b63e037521870a4
@@@ -68,11 -68,11 +68,11 @@@ void radeon_connector_hotplug(struct dr
        if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
                int saved_dpms = connector->dpms;
  
-               if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd) &&
-                   radeon_dp_needs_link_train(radeon_connector))
-                       drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
-               else
+               /* Only turn off the display it it's physically disconnected */
+               if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd))
                        drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+               else if (radeon_dp_needs_link_train(radeon_connector))
+                       drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
                connector->dpms = saved_dpms;
        }
  }
@@@ -724,7 -724,6 +724,7 @@@ radeon_vga_detect(struct drm_connector 
                dret = radeon_ddc_probe(radeon_connector,
                                        radeon_connector->requires_extended_probe);
        if (dret) {
 +              radeon_connector->detected_by_load = false;
                if (radeon_connector->edid) {
                        kfree(radeon_connector->edid);
                        radeon_connector->edid = NULL;
        } else {
  
                /* if we aren't forcing don't do destructive polling */
 -              if (!force)
 -                      return connector->status;
 +              if (!force) {
 +                      /* only return the previous status if we last
 +                       * detected a monitor via load.
 +                       */
 +                      if (radeon_connector->detected_by_load)
 +                              return connector->status;
 +                      else
 +                              return ret;
 +              }
  
                if (radeon_connector->dac_load_detect && encoder) {
                        encoder_funcs = encoder->helper_private;
                        ret = encoder_funcs->detect(encoder, connector);
 +                      if (ret == connector_status_connected)
 +                              radeon_connector->detected_by_load = true;
                }
        }
  
@@@ -907,7 -897,6 +907,7 @@@ radeon_dvi_detect(struct drm_connector 
                dret = radeon_ddc_probe(radeon_connector,
                                        radeon_connector->requires_extended_probe);
        if (dret) {
 +              radeon_connector->detected_by_load = false;
                if (radeon_connector->edid) {
                        kfree(radeon_connector->edid);
                        radeon_connector->edid = NULL;
        if ((ret == connector_status_connected) && (radeon_connector->use_digital == true))
                goto out;
  
 +      /* DVI-D and HDMI-A are digital only */
 +      if ((connector->connector_type == DRM_MODE_CONNECTOR_DVID) ||
 +          (connector->connector_type == DRM_MODE_CONNECTOR_HDMIA))
 +              goto out;
 +
 +      /* if we aren't forcing don't do destructive polling */
        if (!force) {
 -              ret = connector->status;
 +              /* only return the previous status if we last
 +               * detected a monitor via load.
 +               */
 +              if (radeon_connector->detected_by_load)
 +                      ret = connector->status;
                goto out;
        }
  
                                        ret = encoder_funcs->detect(encoder, connector);
                                        if (ret == connector_status_connected) {
                                                radeon_connector->use_digital = false;
 +                                              radeon_connector->detected_by_load = true;
                                        }
                                }
                                break;
@@@ -1325,23 -1303,14 +1325,14 @@@ radeon_dp_detect(struct drm_connector *
                /* get the DPCD from the bridge */
                radeon_dp_getdpcd(radeon_connector);
  
-               if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd))
-                       ret = connector_status_connected;
-               else {
-                       /* need to setup ddc on the bridge */
-                       if (encoder)
-                               radeon_atom_ext_encoder_setup_ddc(encoder);
+               if (encoder) {
+                       /* setup ddc on the bridge */
+                       radeon_atom_ext_encoder_setup_ddc(encoder);
                        if (radeon_ddc_probe(radeon_connector,
-                                            radeon_connector->requires_extended_probe))
+                                            radeon_connector->requires_extended_probe)) /* try DDC */
                                ret = connector_status_connected;
-               }
-               if ((ret == connector_status_disconnected) &&
-                   radeon_connector->dac_load_detect) {
-                       struct drm_encoder *encoder = radeon_best_single_encoder(connector);
-                       struct drm_encoder_helper_funcs *encoder_funcs;
-                       if (encoder) {
-                               encoder_funcs = encoder->helper_private;
+                       else if (radeon_connector->dac_load_detect) { /* try load detection */
+                               struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
                                ret = encoder_funcs->detect(encoder, connector);
                        }
                }
index 298feaec6d56a6112c6996d25d8b54cc6aeac5dd,b13c2eedc3218ff558340da52f4bf2955cce1ada..87cc1feee3ac9e29d10a52cd33d4062342b5cc95
@@@ -161,9 -161,6 +161,9 @@@ int rv770_pcie_gart_enable(struct radeo
                WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
  
        r600_pcie_gart_tlb_flush(rdev);
 +      DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
 +               (unsigned)(rdev->mc.gtt_size >> 20),
 +               (unsigned long long)rdev->gart.table_addr);
        rdev->gart.ready = true;
        return 0;
  }
@@@ -539,55 -536,6 +539,6 @@@ static u32 r700_get_tile_pipe_to_backen
        return backend_map;
  }
  
- static void rv770_program_channel_remap(struct radeon_device *rdev)
- {
-       u32 tcp_chan_steer, mc_shared_chremap, tmp;
-       bool force_no_swizzle;
-       switch (rdev->family) {
-       case CHIP_RV770:
-       case CHIP_RV730:
-               force_no_swizzle = false;
-               break;
-       case CHIP_RV710:
-       case CHIP_RV740:
-       default:
-               force_no_swizzle = true;
-               break;
-       }
-       tmp = RREG32(MC_SHARED_CHMAP);
-       switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
-       case 0:
-       case 1:
-       default:
-               /* default mapping */
-               mc_shared_chremap = 0x00fac688;
-               break;
-       case 2:
-       case 3:
-               if (force_no_swizzle)
-                       mc_shared_chremap = 0x00fac688;
-               else
-                       mc_shared_chremap = 0x00bbc298;
-               break;
-       }
-       if (rdev->family == CHIP_RV740)
-               tcp_chan_steer = 0x00ef2a60;
-       else
-               tcp_chan_steer = 0x00fac688;
-       /* RV770 CE has special chremap setup */
-       if (rdev->pdev->device == 0x944e) {
-               tcp_chan_steer = 0x00b08b08;
-               mc_shared_chremap = 0x00b08b08;
-       }
-       WREG32(TCP_CHAN_STEER, tcp_chan_steer);
-       WREG32(MC_SHARED_CHREMAP, mc_shared_chremap);
- }
  static void rv770_gpu_init(struct radeon_device *rdev)
  {
        int i, j, num_qd_pipes;
        WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff));
        WREG32(HDP_TILING_CONFIG, (gb_tiling_config & 0xffff));
  
-       rv770_program_channel_remap(rdev);
        WREG32(CC_RB_BACKEND_DISABLE,      cc_rb_backend_disable);
        WREG32(CC_GC_SHADER_PIPE_CONFIG,   cc_gc_shader_pipe_config);
        WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
@@@ -1238,6 -1184,8 +1187,6 @@@ int rv770_resume(struct radeon_device *
  
  int rv770_suspend(struct radeon_device *rdev)
  {
 -      int r;
 -
        r600_audio_fini(rdev);
        /* FIXME: we should wait for ring to be empty */
        r700_cp_stop(rdev);
        r600_irq_suspend(rdev);
        radeon_wb_disable(rdev);
        rv770_pcie_gart_disable(rdev);
 -      /* unpin shaders bo */
 -      if (rdev->r600_blit.shader_obj) {
 -              r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
 -              if (likely(r == 0)) {
 -                      radeon_bo_unpin(rdev->r600_blit.shader_obj);
 -                      radeon_bo_unreserve(rdev->r600_blit.shader_obj);
 -              }
 -      }
 +      r600_blit_suspend(rdev);
 +
        return 0;
  }
  
index 6e96c85b70da6089f3d9914da09c006ccd41ed7c,ef06194c5aa6049c4813db6048f22922b1b5eaed..50fc8e4c9a310c78b57ca4e749bc46eb1ea0a4f0
@@@ -394,7 -394,8 +394,8 @@@ static int ttm_bo_handle_move_mem(struc
  
        if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
                if (bo->ttm == NULL) {
-                       ret = ttm_bo_add_ttm(bo, false);
+                       bool zero = !(old_man->flags & TTM_MEMTYPE_FLAG_FIXED);
+                       ret = ttm_bo_add_ttm(bo, zero);
                        if (ret)
                                goto out_err;
                }
@@@ -498,7 -499,7 +499,7 @@@ static void ttm_bo_cleanup_refs_or_queu
        int ret;
  
        spin_lock(&bdev->fence_lock);
 -      (void) ttm_bo_wait(bo, false, false, true);
 +      (void) ttm_bo_wait(bo, false, false, true, TTM_USAGE_READWRITE);
        if (!bo->sync_obj) {
  
                spin_lock(&glob->lru_lock);
@@@ -566,8 -567,7 +567,8 @@@ static int ttm_bo_cleanup_refs(struct t
  
  retry:
        spin_lock(&bdev->fence_lock);
 -      ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
 +      ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu,
 +                        TTM_USAGE_READWRITE);
        spin_unlock(&bdev->fence_lock);
  
        if (unlikely(ret != 0))
@@@ -726,8 -726,7 +727,8 @@@ static int ttm_bo_evict(struct ttm_buff
        int ret = 0;
  
        spin_lock(&bdev->fence_lock);
 -      ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
 +      ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu,
 +                        TTM_USAGE_READWRITE);
        spin_unlock(&bdev->fence_lock);
  
        if (unlikely(ret != 0)) {
@@@ -1074,8 -1073,7 +1075,8 @@@ int ttm_bo_move_buffer(struct ttm_buffe
         * instead of doing it here.
         */
        spin_lock(&bdev->fence_lock);
 -      ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
 +      ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu,
 +                        TTM_USAGE_READWRITE);
        spin_unlock(&bdev->fence_lock);
        if (ret)
                return ret;
@@@ -1295,7 -1293,6 +1296,7 @@@ int ttm_bo_create(struct ttm_bo_device 
  
        return ret;
  }
 +EXPORT_SYMBOL(ttm_bo_create);
  
  static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
                                        unsigned mem_type, bool allow_errors)
@@@ -1696,83 -1693,34 +1697,83 @@@ out_unlock
        return ret;
  }
  
 +static void ttm_bo_unref_sync_obj_locked(struct ttm_buffer_object *bo,
 +                                       void *sync_obj,
 +                                       void **extra_sync_obj)
 +{
 +      struct ttm_bo_device *bdev = bo->bdev;
 +      struct ttm_bo_driver *driver = bdev->driver;
 +      void *tmp_obj = NULL, *tmp_obj_read = NULL, *tmp_obj_write = NULL;
 +
 +      /* We must unref the sync obj wherever it's ref'd.
 +       * Note that if we unref bo->sync_obj, we can unref both the read
 +       * and write sync objs too, because they can't be newer than
 +       * bo->sync_obj, so they are no longer relevant. */
 +      if (sync_obj == bo->sync_obj ||
 +          sync_obj == bo->sync_obj_read) {
 +              tmp_obj_read = bo->sync_obj_read;
 +              bo->sync_obj_read = NULL;
 +      }
 +      if (sync_obj == bo->sync_obj ||
 +          sync_obj == bo->sync_obj_write) {
 +              tmp_obj_write = bo->sync_obj_write;
 +              bo->sync_obj_write = NULL;
 +      }
 +      if (sync_obj == bo->sync_obj) {
 +              tmp_obj = bo->sync_obj;
 +              bo->sync_obj = NULL;
 +      }
 +
 +      clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
 +      spin_unlock(&bdev->fence_lock);
 +      if (tmp_obj)
 +              driver->sync_obj_unref(&tmp_obj);
 +      if (tmp_obj_read)
 +              driver->sync_obj_unref(&tmp_obj_read);
 +      if (tmp_obj_write)
 +              driver->sync_obj_unref(&tmp_obj_write);
 +      if (extra_sync_obj)
 +              driver->sync_obj_unref(extra_sync_obj);
 +      spin_lock(&bdev->fence_lock);
 +}
 +
  int ttm_bo_wait(struct ttm_buffer_object *bo,
 -              bool lazy, bool interruptible, bool no_wait)
 +              bool lazy, bool interruptible, bool no_wait,
 +              enum ttm_buffer_usage usage)
  {
        struct ttm_bo_driver *driver = bo->bdev->driver;
        struct ttm_bo_device *bdev = bo->bdev;
        void *sync_obj;
        void *sync_obj_arg;
        int ret = 0;
 +      void **bo_sync_obj;
  
 -      if (likely(bo->sync_obj == NULL))
 +      switch (usage) {
 +      case TTM_USAGE_READ:
 +              bo_sync_obj = &bo->sync_obj_read;
 +              break;
 +      case TTM_USAGE_WRITE:
 +              bo_sync_obj = &bo->sync_obj_write;
 +              break;
 +      case TTM_USAGE_READWRITE:
 +      default:
 +              bo_sync_obj = &bo->sync_obj;
 +      }
 +
 +      if (likely(*bo_sync_obj == NULL))
                return 0;
  
 -      while (bo->sync_obj) {
 +      while (*bo_sync_obj) {
  
 -              if (driver->sync_obj_signaled(bo->sync_obj, bo->sync_obj_arg)) {
 -                      void *tmp_obj = bo->sync_obj;
 -                      bo->sync_obj = NULL;
 -                      clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
 -                      spin_unlock(&bdev->fence_lock);
 -                      driver->sync_obj_unref(&tmp_obj);
 -                      spin_lock(&bdev->fence_lock);
 +              if (driver->sync_obj_signaled(*bo_sync_obj, bo->sync_obj_arg)) {
 +                      ttm_bo_unref_sync_obj_locked(bo, *bo_sync_obj, NULL);
                        continue;
                }
  
                if (no_wait)
                        return -EBUSY;
  
 -              sync_obj = driver->sync_obj_ref(bo->sync_obj);
 +              sync_obj = driver->sync_obj_ref(*bo_sync_obj);
                sync_obj_arg = bo->sync_obj_arg;
                spin_unlock(&bdev->fence_lock);
                ret = driver->sync_obj_wait(sync_obj, sync_obj_arg,
                        return ret;
                }
                spin_lock(&bdev->fence_lock);
 -              if (likely(bo->sync_obj == sync_obj &&
 +              if (likely(*bo_sync_obj == sync_obj &&
                           bo->sync_obj_arg == sync_obj_arg)) {
 -                      void *tmp_obj = bo->sync_obj;
 -                      bo->sync_obj = NULL;
 -                      clear_bit(TTM_BO_PRIV_FLAG_MOVING,
 -                                &bo->priv_flags);
 -                      spin_unlock(&bdev->fence_lock);
 -                      driver->sync_obj_unref(&sync_obj);
 -                      driver->sync_obj_unref(&tmp_obj);
 -                      spin_lock(&bdev->fence_lock);
 +                      ttm_bo_unref_sync_obj_locked(bo, *bo_sync_obj, &sync_obj);
                } else {
                        spin_unlock(&bdev->fence_lock);
                        driver->sync_obj_unref(&sync_obj);
@@@ -1809,7 -1764,7 +1810,7 @@@ int ttm_bo_synccpu_write_grab(struct tt
        if (unlikely(ret != 0))
                return ret;
        spin_lock(&bdev->fence_lock);
 -      ret = ttm_bo_wait(bo, false, true, no_wait);
 +      ret = ttm_bo_wait(bo, false, true, no_wait, TTM_USAGE_READWRITE);
        spin_unlock(&bdev->fence_lock);
        if (likely(ret == 0))
                atomic_inc(&bo->cpu_writers);
@@@ -1883,7 -1838,7 +1884,7 @@@ static int ttm_bo_swapout(struct ttm_me
         */
  
        spin_lock(&bo->bdev->fence_lock);
 -      ret = ttm_bo_wait(bo, false, false, false);
 +      ret = ttm_bo_wait(bo, false, false, false, TTM_USAGE_READWRITE);
        spin_unlock(&bo->bdev->fence_lock);
  
        if (unlikely(ret != 0))