]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/commitdiff
Merge tag 'drm-intel-next-2014-06-20' of git://anongit.freedesktop.org/drm-intel...
authorDave Airlie <airlied@redhat.com>
Wed, 9 Jul 2014 00:38:42 +0000 (10:38 +1000)
committerDave Airlie <airlied@redhat.com>
Wed, 9 Jul 2014 00:38:42 +0000 (10:38 +1000)
- Accurate frontbuffer tracking and frontbuffer rendering invalidate, flush and
  flip events. This is prep work for proper PSR support and should also be
  useful for DRRS&fbc.
- Runtime suspend hardware on system suspend to support the new SOix sleep
  states, from Jesse.
- PSR updates for broadwell (Rodrigo)
- Universal plane support for cursors (Matt Roper), including core drm patches.
- Prefault gtt mappings (Chris)
- baytrail write-enable pte bit support (Akash Goel)
- mmio based flips (Sourab Gupta) instead of blitter ring flips
- interrupt handling race fixes (Oscar Mateo)

And old, not yet merged features from the previous round:
- rps/turbo support for chv (Deepak)
- some other straggling chv patches (Ville)
- proper universal plane conversion for the primary plane (Matt Roper)
- ppgtt on vlv from Jesse
- pile of cleanups, little fixes for insane corner cases and improved debug
  support all over

* tag 'drm-intel-next-2014-06-20' of git://anongit.freedesktop.org/drm-intel: (99 commits)
  drm/i915: Update DRIVER_DATE to 20140620
  drivers/i915: Fix unnoticed failure of init_ring_common()
  drm/i915: Track frontbuffer invalidation/flushing
  drm/i915: Use new frontbuffer bits to increase pll clock
  drm/i915: don't take runtime PM reference around freeze/thaw
  drm/i915: use runtime irq suspend/resume in freeze/thaw
  drm/i915: Properly track domain of the fbcon fb
  drm/i915: Print obj->frontbuffer_bits in debugfs output
  drm/i915: Introduce accurate frontbuffer tracking
  drm/i915: Drop schedule_back from psr_exit
  drm/i915: Ditch intel_edp_psr_update
  drm/i915: Drop unecessary complexity from psr_inactivate
  drm/i915: Remove ctx->last_ring
  drm/i915/chv: Ack interrupts before handling them (CHV)
  drm/i915/bdw: Ack interrupts before handling them (GEN8)
  drm/i915/vlv: Ack interrupts before handling them (VLV)
  drm/i915: Ack interrupts before handling them (GEN5 - GEN7)
  drm/i915: Don't BUG_ON in i915_gem_obj_offset
  drm/i915: Grab dev->struct_mutex in i915_gem_pageflip_info
  drm/i915: Add some L3 registers to the parser whitelist
  ...

Conflicts:
drivers/gpu/drm/i915/i915_drv.c

35 files changed:
drivers/gpu/drm/drm_crtc.c
drivers/gpu/drm/i915/i915_cmd_parser.c
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_dma.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_context.c
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/i915_gem_gtt.c
drivers/gpu/drm/i915/i915_gem_gtt.h
drivers/gpu/drm/i915/i915_gem_render_state.c
drivers/gpu/drm/i915/i915_gem_stolen.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_params.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/intel_ddi.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_dsi.c
drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
drivers/gpu/drm/i915/intel_dvo.c
drivers/gpu/drm/i915/intel_fbdev.c
drivers/gpu/drm/i915/intel_hdmi.c
drivers/gpu/drm/i915/intel_overlay.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/i915/intel_renderstate.h
drivers/gpu/drm/i915/intel_renderstate_gen6.c
drivers/gpu/drm/i915/intel_renderstate_gen7.c
drivers/gpu/drm/i915/intel_renderstate_gen8.c
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/i915/intel_sprite.c
drivers/gpu/drm/i915/intel_uncore.c
include/drm/drm_crtc.h

index bd742267663843594fc8d25ee4d84269696acfae..c808a092d82413f1287ce2247c9d6383b361748e 100644 (file)
 
 #include "drm_crtc_internal.h"
 
+static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev,
+                                                       struct drm_mode_fb_cmd2 *r,
+                                                       struct drm_file *file_priv);
+
 /**
  * drm_modeset_lock_all - take all modeset locks
  * @dev: drm device
@@ -723,7 +727,7 @@ DEFINE_WW_CLASS(crtc_ww_class);
  */
 int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
                              struct drm_plane *primary,
-                             void *cursor,
+                             struct drm_plane *cursor,
                              const struct drm_crtc_funcs *funcs)
 {
        struct drm_mode_config *config = &dev->mode_config;
@@ -748,8 +752,11 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
        config->num_crtc++;
 
        crtc->primary = primary;
+       crtc->cursor = cursor;
        if (primary)
                primary->possible_crtcs = 1 << drm_crtc_index(crtc);
+       if (cursor)
+               cursor->possible_crtcs = 1 << drm_crtc_index(crtc);
 
  out:
        drm_modeset_unlock_all(dev);
@@ -2177,45 +2184,32 @@ out:
        return ret;
 }
 
-/**
- * drm_mode_setplane - configure a plane's configuration
- * @dev: DRM device
- * @data: ioctl data*
- * @file_priv: DRM file info
+/*
+ * setplane_internal - setplane handler for internal callers
  *
- * Set plane configuration, including placement, fb, scaling, and other factors.
- * Or pass a NULL fb to disable.
+ * Note that we assume an extra reference has already been taken on fb.  If the
+ * update fails, this reference will be dropped before return; if it succeeds,
+ * the previous framebuffer (if any) will be unreferenced instead.
  *
- * Returns:
- * Zero on success, errno on failure.
+ * src_{x,y,w,h} are provided in 16.16 fixed point format
  */
-int drm_mode_setplane(struct drm_device *dev, void *data,
-                     struct drm_file *file_priv)
+static int setplane_internal(struct drm_plane *plane,
+                            struct drm_crtc *crtc,
+                            struct drm_framebuffer *fb,
+                            int32_t crtc_x, int32_t crtc_y,
+                            uint32_t crtc_w, uint32_t crtc_h,
+                            /* src_{x,y,w,h} values are 16.16 fixed point */
+                            uint32_t src_x, uint32_t src_y,
+                            uint32_t src_w, uint32_t src_h)
 {
-       struct drm_mode_set_plane *plane_req = data;
-       struct drm_plane *plane;
-       struct drm_crtc *crtc;
-       struct drm_framebuffer *fb = NULL, *old_fb = NULL;
+       struct drm_device *dev = plane->dev;
+       struct drm_framebuffer *old_fb = NULL;
        int ret = 0;
        unsigned int fb_width, fb_height;
        int i;
 
-       if (!drm_core_check_feature(dev, DRIVER_MODESET))
-               return -EINVAL;
-
-       /*
-        * First, find the plane, crtc, and fb objects.  If not available,
-        * we don't bother to call the driver.
-        */
-       plane = drm_plane_find(dev, plane_req->plane_id);
-       if (!plane) {
-               DRM_DEBUG_KMS("Unknown plane ID %d\n",
-                             plane_req->plane_id);
-               return -ENOENT;
-       }
-
        /* No fb means shut it down */
-       if (!plane_req->fb_id) {
+       if (!fb) {
                drm_modeset_lock_all(dev);
                old_fb = plane->fb;
                ret = plane->funcs->disable_plane(plane);
@@ -2229,14 +2223,6 @@ int drm_mode_setplane(struct drm_device *dev, void *data,
                goto out;
        }
 
-       crtc = drm_crtc_find(dev, plane_req->crtc_id);
-       if (!crtc) {
-               DRM_DEBUG_KMS("Unknown crtc ID %d\n",
-                             plane_req->crtc_id);
-               ret = -ENOENT;
-               goto out;
-       }
-
        /* Check whether this plane is usable on this CRTC */
        if (!(plane->possible_crtcs & drm_crtc_mask(crtc))) {
                DRM_DEBUG_KMS("Invalid crtc for plane\n");
@@ -2244,14 +2230,6 @@ int drm_mode_setplane(struct drm_device *dev, void *data,
                goto out;
        }
 
-       fb = drm_framebuffer_lookup(dev, plane_req->fb_id);
-       if (!fb) {
-               DRM_DEBUG_KMS("Unknown framebuffer ID %d\n",
-                             plane_req->fb_id);
-               ret = -ENOENT;
-               goto out;
-       }
-
        /* Check whether this plane supports the fb pixel format. */
        for (i = 0; i < plane->format_count; i++)
                if (fb->pixel_format == plane->format_types[i])
@@ -2267,43 +2245,25 @@ int drm_mode_setplane(struct drm_device *dev, void *data,
        fb_height = fb->height << 16;
 
        /* Make sure source coordinates are inside the fb. */
-       if (plane_req->src_w > fb_width ||
-           plane_req->src_x > fb_width - plane_req->src_w ||
-           plane_req->src_h > fb_height ||
-           plane_req->src_y > fb_height - plane_req->src_h) {
+       if (src_w > fb_width ||
+           src_x > fb_width - src_w ||
+           src_h > fb_height ||
+           src_y > fb_height - src_h) {
                DRM_DEBUG_KMS("Invalid source coordinates "
                              "%u.%06ux%u.%06u+%u.%06u+%u.%06u\n",
-                             plane_req->src_w >> 16,
-                             ((plane_req->src_w & 0xffff) * 15625) >> 10,
-                             plane_req->src_h >> 16,
-                             ((plane_req->src_h & 0xffff) * 15625) >> 10,
-                             plane_req->src_x >> 16,
-                             ((plane_req->src_x & 0xffff) * 15625) >> 10,
-                             plane_req->src_y >> 16,
-                             ((plane_req->src_y & 0xffff) * 15625) >> 10);
+                             src_w >> 16, ((src_w & 0xffff) * 15625) >> 10,
+                             src_h >> 16, ((src_h & 0xffff) * 15625) >> 10,
+                             src_x >> 16, ((src_x & 0xffff) * 15625) >> 10,
+                             src_y >> 16, ((src_y & 0xffff) * 15625) >> 10);
                ret = -ENOSPC;
                goto out;
        }
 
-       /* Give drivers some help against integer overflows */
-       if (plane_req->crtc_w > INT_MAX ||
-           plane_req->crtc_x > INT_MAX - (int32_t) plane_req->crtc_w ||
-           plane_req->crtc_h > INT_MAX ||
-           plane_req->crtc_y > INT_MAX - (int32_t) plane_req->crtc_h) {
-               DRM_DEBUG_KMS("Invalid CRTC coordinates %ux%u+%d+%d\n",
-                             plane_req->crtc_w, plane_req->crtc_h,
-                             plane_req->crtc_x, plane_req->crtc_y);
-               ret = -ERANGE;
-               goto out;
-       }
-
        drm_modeset_lock_all(dev);
        old_fb = plane->fb;
        ret = plane->funcs->update_plane(plane, crtc, fb,
-                                        plane_req->crtc_x, plane_req->crtc_y,
-                                        plane_req->crtc_w, plane_req->crtc_h,
-                                        plane_req->src_x, plane_req->src_y,
-                                        plane_req->src_w, plane_req->src_h);
+                                        crtc_x, crtc_y, crtc_w, crtc_h,
+                                        src_x, src_y, src_w, src_h);
        if (!ret) {
                plane->crtc = crtc;
                plane->fb = fb;
@@ -2320,6 +2280,85 @@ out:
                drm_framebuffer_unreference(old_fb);
 
        return ret;
+
+}
+
+/**
+ * drm_mode_setplane - configure a plane's configuration
+ * @dev: DRM device
+ * @data: ioctl data*
+ * @file_priv: DRM file info
+ *
+ * Set plane configuration, including placement, fb, scaling, and other factors.
+ * Or pass a NULL fb to disable (planes may be disabled without providing a
+ * valid crtc).
+ *
+ * Returns:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_setplane(struct drm_device *dev, void *data,
+                     struct drm_file *file_priv)
+{
+       struct drm_mode_set_plane *plane_req = data;
+       struct drm_mode_object *obj;
+       struct drm_plane *plane;
+       struct drm_crtc *crtc = NULL;
+       struct drm_framebuffer *fb = NULL;
+
+       if (!drm_core_check_feature(dev, DRIVER_MODESET))
+               return -EINVAL;
+
+       /* Give drivers some help against integer overflows */
+       if (plane_req->crtc_w > INT_MAX ||
+           plane_req->crtc_x > INT_MAX - (int32_t) plane_req->crtc_w ||
+           plane_req->crtc_h > INT_MAX ||
+           plane_req->crtc_y > INT_MAX - (int32_t) plane_req->crtc_h) {
+               DRM_DEBUG_KMS("Invalid CRTC coordinates %ux%u+%d+%d\n",
+                             plane_req->crtc_w, plane_req->crtc_h,
+                             plane_req->crtc_x, plane_req->crtc_y);
+               return -ERANGE;
+       }
+
+       /*
+        * First, find the plane, crtc, and fb objects.  If not available,
+        * we don't bother to call the driver.
+        */
+       obj = drm_mode_object_find(dev, plane_req->plane_id,
+                                  DRM_MODE_OBJECT_PLANE);
+       if (!obj) {
+               DRM_DEBUG_KMS("Unknown plane ID %d\n",
+                             plane_req->plane_id);
+               return -ENOENT;
+       }
+       plane = obj_to_plane(obj);
+
+       if (plane_req->fb_id) {
+               fb = drm_framebuffer_lookup(dev, plane_req->fb_id);
+               if (!fb) {
+                       DRM_DEBUG_KMS("Unknown framebuffer ID %d\n",
+                                     plane_req->fb_id);
+                       return -ENOENT;
+               }
+
+               obj = drm_mode_object_find(dev, plane_req->crtc_id,
+                                          DRM_MODE_OBJECT_CRTC);
+               if (!obj) {
+                       DRM_DEBUG_KMS("Unknown crtc ID %d\n",
+                                     plane_req->crtc_id);
+                       return -ENOENT;
+               }
+               crtc = obj_to_crtc(obj);
+       }
+
+       /*
+        * setplane_internal will take care of deref'ing either the old or new
+        * framebuffer depending on success.
+        */
+       return setplane_internal(plane, crtc, fb,
+                                plane_req->crtc_x, plane_req->crtc_y,
+                                plane_req->crtc_w, plane_req->crtc_h,
+                                plane_req->src_x, plane_req->src_y,
+                                plane_req->src_w, plane_req->src_h);
 }
 
 /**
@@ -2568,6 +2607,102 @@ out:
        return ret;
 }
 
+/**
+ * drm_mode_cursor_universal - translate legacy cursor ioctl call into a
+ *     universal plane handler call
+ * @crtc: crtc to update cursor for
+ * @req: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
+ *
+ * Legacy cursor ioctl's work directly with driver buffer handles.  To
+ * translate legacy ioctl calls into universal plane handler calls, we need to
+ * wrap the native buffer handle in a drm_framebuffer.
+ *
+ * Note that we assume any handle passed to the legacy ioctls was a 32-bit ARGB
+ * buffer with a pitch of 4*width; the universal plane interface should be used
+ * directly in cases where the hardware can support other buffer settings and
+ * userspace wants to make use of these capabilities.
+ *
+ * Returns:
+ * Zero on success, errno on failure.
+ */
+static int drm_mode_cursor_universal(struct drm_crtc *crtc,
+                                    struct drm_mode_cursor2 *req,
+                                    struct drm_file *file_priv)
+{
+       struct drm_device *dev = crtc->dev;
+       struct drm_framebuffer *fb = NULL;
+       struct drm_mode_fb_cmd2 fbreq = {
+               .width = req->width,
+               .height = req->height,
+               .pixel_format = DRM_FORMAT_ARGB8888,
+               .pitches = { req->width * 4 },
+               .handles = { req->handle },
+       };
+       int32_t crtc_x, crtc_y;
+       uint32_t crtc_w = 0, crtc_h = 0;
+       uint32_t src_w = 0, src_h = 0;
+       int ret = 0;
+
+       BUG_ON(!crtc->cursor);
+
+       /*
+        * Obtain fb we'll be using (either new or existing) and take an extra
+        * reference to it if fb != null.  setplane will take care of dropping
+        * the reference if the plane update fails.
+        */
+       if (req->flags & DRM_MODE_CURSOR_BO) {
+               if (req->handle) {
+                       fb = add_framebuffer_internal(dev, &fbreq, file_priv);
+                       if (IS_ERR(fb)) {
+                               DRM_DEBUG_KMS("failed to wrap cursor buffer in drm framebuffer\n");
+                               return PTR_ERR(fb);
+                       }
+
+                       drm_framebuffer_reference(fb);
+               } else {
+                       fb = NULL;
+               }
+       } else {
+               mutex_lock(&dev->mode_config.mutex);
+               fb = crtc->cursor->fb;
+               if (fb)
+                       drm_framebuffer_reference(fb);
+               mutex_unlock(&dev->mode_config.mutex);
+       }
+
+       if (req->flags & DRM_MODE_CURSOR_MOVE) {
+               crtc_x = req->x;
+               crtc_y = req->y;
+       } else {
+               crtc_x = crtc->cursor_x;
+               crtc_y = crtc->cursor_y;
+       }
+
+       if (fb) {
+               crtc_w = fb->width;
+               crtc_h = fb->height;
+               src_w = fb->width << 16;
+               src_h = fb->height << 16;
+       }
+
+       /*
+        * setplane_internal will take care of deref'ing either the old or new
+        * framebuffer depending on success.
+        */
+       ret = setplane_internal(crtc->cursor, crtc, fb,
+                               crtc_x, crtc_y, crtc_w, crtc_h,
+                               0, 0, src_w, src_h);
+
+       /* Update successful; save new cursor position, if necessary */
+       if (ret == 0 && req->flags & DRM_MODE_CURSOR_MOVE) {
+               crtc->cursor_x = req->x;
+               crtc->cursor_y = req->y;
+       }
+
+       return ret;
+}
+
 static int drm_mode_cursor_common(struct drm_device *dev,
                                  struct drm_mode_cursor2 *req,
                                  struct drm_file *file_priv)
@@ -2587,6 +2722,13 @@ static int drm_mode_cursor_common(struct drm_device *dev,
                return -ENOENT;
        }
 
+       /*
+        * If this crtc has a universal cursor plane, call that plane's update
+        * handler rather than using legacy cursor handlers.
+        */
+       if (crtc->cursor)
+               return drm_mode_cursor_universal(crtc, req, file_priv);
+
        drm_modeset_lock(&crtc->mutex, NULL);
        if (req->flags & DRM_MODE_CURSOR_BO) {
                if (!crtc->funcs->cursor_set && !crtc->funcs->cursor_set2) {
@@ -2886,56 +3028,38 @@ static int framebuffer_check(const struct drm_mode_fb_cmd2 *r)
        return 0;
 }
 
-/**
- * drm_mode_addfb2 - add an FB to the graphics configuration
- * @dev: drm device for the ioctl
- * @data: data pointer for the ioctl
- * @file_priv: drm file for the ioctl call
- *
- * Add a new FB to the specified CRTC, given a user request with format. This is
- * the 2nd version of the addfb ioctl, which supports multi-planar framebuffers
- * and uses fourcc codes as pixel format specifiers.
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, errno on failure.
- */
-int drm_mode_addfb2(struct drm_device *dev,
-                   void *data, struct drm_file *file_priv)
+static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev,
+                                                       struct drm_mode_fb_cmd2 *r,
+                                                       struct drm_file *file_priv)
 {
-       struct drm_mode_fb_cmd2 *r = data;
        struct drm_mode_config *config = &dev->mode_config;
        struct drm_framebuffer *fb;
        int ret;
 
-       if (!drm_core_check_feature(dev, DRIVER_MODESET))
-               return -EINVAL;
-
        if (r->flags & ~DRM_MODE_FB_INTERLACED) {
                DRM_DEBUG_KMS("bad framebuffer flags 0x%08x\n", r->flags);
-               return -EINVAL;
+               return ERR_PTR(-EINVAL);
        }
 
        if ((config->min_width > r->width) || (r->width > config->max_width)) {
                DRM_DEBUG_KMS("bad framebuffer width %d, should be >= %d && <= %d\n",
                          r->width, config->min_width, config->max_width);
-               return -EINVAL;
+               return ERR_PTR(-EINVAL);
        }
        if ((config->min_height > r->height) || (r->height > config->max_height)) {
                DRM_DEBUG_KMS("bad framebuffer height %d, should be >= %d && <= %d\n",
                          r->height, config->min_height, config->max_height);
-               return -EINVAL;
+               return ERR_PTR(-EINVAL);
        }
 
        ret = framebuffer_check(r);
        if (ret)
-               return ret;
+               return ERR_PTR(ret);
 
        fb = dev->mode_config.funcs->fb_create(dev, file_priv, r);
        if (IS_ERR(fb)) {
                DRM_DEBUG_KMS("could not create framebuffer\n");
-               return PTR_ERR(fb);
+               return fb;
        }
 
        mutex_lock(&file_priv->fbs_lock);
@@ -2944,8 +3068,37 @@ int drm_mode_addfb2(struct drm_device *dev,
        DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id);
        mutex_unlock(&file_priv->fbs_lock);
 
+       return fb;
+}
 
-       return ret;
+/**
+ * drm_mode_addfb2 - add an FB to the graphics configuration
+ * @dev: drm device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
+ *
+ * Add a new FB to the specified CRTC, given a user request with format. This is
+ * the 2nd version of the addfb ioctl, which supports multi-planar framebuffers
+ * and uses fourcc codes as pixel format specifiers.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_addfb2(struct drm_device *dev,
+                   void *data, struct drm_file *file_priv)
+{
+       struct drm_framebuffer *fb;
+
+       if (!drm_core_check_feature(dev, DRIVER_MODESET))
+               return -EINVAL;
+
+       fb = add_framebuffer_internal(dev, data, file_priv);
+       if (IS_ERR(fb))
+               return PTR_ERR(fb);
+
+       return 0;
 }
 
 /**
index 9d7954366bd28ea9300ddfe321219f35f8c9726e..dea99d92fb4a195784c288f3fbc236c00a8188c2 100644 (file)
@@ -426,6 +426,9 @@ static const u32 gen7_render_regs[] = {
        GEN7_SO_WRITE_OFFSET(1),
        GEN7_SO_WRITE_OFFSET(2),
        GEN7_SO_WRITE_OFFSET(3),
+       GEN7_L3SQCREG1,
+       GEN7_L3CNTLREG2,
+       GEN7_L3CNTLREG3,
 };
 
 static const u32 gen7_blt_regs[] = {
index b8c689202c4041c4e22dc63d7415ffb2232e94a9..a93b3bfdad6145f953c9c792922652f25cf949be 100644 (file)
@@ -170,6 +170,8 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
        }
        if (obj->ring != NULL)
                seq_printf(m, " (%s)", obj->ring->name);
+       if (obj->frontbuffer_bits)
+               seq_printf(m, " (frontbuffer: 0x%03x)", obj->frontbuffer_bits);
 }
 
 static void describe_ctx(struct seq_file *m, struct intel_context *ctx)
@@ -515,6 +517,11 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
        struct drm_device *dev = node->minor->dev;
        unsigned long flags;
        struct intel_crtc *crtc;
+       int ret;
+
+       ret = mutex_lock_interruptible(&dev->struct_mutex);
+       if (ret)
+               return ret;
 
        for_each_intel_crtc(dev, crtc) {
                const char pipe = pipe_name(crtc->pipe);
@@ -556,6 +563,8 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
                spin_unlock_irqrestore(&dev->event_lock, flags);
        }
 
+       mutex_unlock(&dev->struct_mutex);
+
        return 0;
 }
 
@@ -1029,7 +1038,8 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
                           MEMSTAT_VID_SHIFT);
                seq_printf(m, "Current P-state: %d\n",
                           (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
-       } else if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev)) {
+       } else if (IS_GEN6(dev) || (IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) ||
+                  IS_BROADWELL(dev)) {
                u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
                u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
                u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
@@ -1048,7 +1058,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
 
                reqf = I915_READ(GEN6_RPNSWREQ);
                reqf &= ~GEN6_TURBO_DISABLE;
-               if (IS_HASWELL(dev))
+               if (IS_HASWELL(dev) || IS_BROADWELL(dev))
                        reqf >>= 24;
                else
                        reqf >>= 25;
@@ -1065,7 +1075,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
                rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI);
                rpcurdown = I915_READ(GEN6_RP_CUR_DOWN);
                rpprevdown = I915_READ(GEN6_RP_PREV_DOWN);
-               if (IS_HASWELL(dev))
+               if (IS_HASWELL(dev) || IS_BROADWELL(dev))
                        cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
                else
                        cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
@@ -1677,9 +1687,6 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
 
 #ifdef CONFIG_DRM_I915_FBDEV
        struct drm_i915_private *dev_priv = dev->dev_private;
-       int ret = mutex_lock_interruptible(&dev->mode_config.mutex);
-       if (ret)
-               return ret;
 
        ifbdev = dev_priv->fbdev;
        fb = to_intel_framebuffer(ifbdev->helper.fb);
@@ -1692,7 +1699,6 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
                   atomic_read(&fb->base.refcount.refcount));
        describe_obj(m, fb->obj);
        seq_putc(m, '\n');
-       mutex_unlock(&dev->mode_config.mutex);
 #endif
 
        mutex_lock(&dev->mode_config.fb_lock);
@@ -1723,7 +1729,7 @@ static int i915_context_status(struct seq_file *m, void *unused)
        struct intel_context *ctx;
        int ret, i;
 
-       ret = mutex_lock_interruptible(&dev->mode_config.mutex);
+       ret = mutex_lock_interruptible(&dev->struct_mutex);
        if (ret)
                return ret;
 
@@ -1753,7 +1759,7 @@ static int i915_context_status(struct seq_file *m, void *unused)
                seq_putc(m, '\n');
        }
 
-       mutex_unlock(&dev->mode_config.mutex);
+       mutex_unlock(&dev->struct_mutex);
 
        return 0;
 }
@@ -1978,10 +1984,12 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
 
        seq_printf(m, "Sink_Support: %s\n", yesno(dev_priv->psr.sink_support));
        seq_printf(m, "Source_OK: %s\n", yesno(dev_priv->psr.source_ok));
+       seq_printf(m, "Enabled: %s\n", yesno(dev_priv->psr.enabled));
+       seq_printf(m, "Active: %s\n", yesno(dev_priv->psr.active));
 
        enabled = HAS_PSR(dev) &&
                I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
-       seq_printf(m, "Enabled: %s\n", yesno(enabled));
+       seq_printf(m, "HW Enabled & Active bit: %s\n", yesno(enabled));
 
        if (HAS_PSR(dev))
                psrperf = I915_READ(EDP_PSR_PERF_CNT(dev)) &
@@ -2223,9 +2231,12 @@ static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
        struct drm_crtc *crtc = &intel_crtc->base;
        struct intel_encoder *intel_encoder;
 
-       seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
-                  crtc->primary->fb->base.id, crtc->x, crtc->y,
-                  crtc->primary->fb->width, crtc->primary->fb->height);
+       if (crtc->primary->fb)
+               seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
+                          crtc->primary->fb->base.id, crtc->x, crtc->y,
+                          crtc->primary->fb->width, crtc->primary->fb->height);
+       else
+               seq_puts(m, "\tprimary plane disabled\n");
        for_each_encoder_on_crtc(dev, crtc, intel_encoder)
                intel_encoder_info(m, intel_crtc, intel_encoder);
 }
@@ -2929,11 +2940,16 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
        /* real source -> none transition */
        if (source == INTEL_PIPE_CRC_SOURCE_NONE) {
                struct intel_pipe_crc_entry *entries;
+               struct intel_crtc *crtc =
+                       to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
 
                DRM_DEBUG_DRIVER("stopping CRCs for pipe %c\n",
                                 pipe_name(pipe));
 
-               intel_wait_for_vblank(dev, pipe);
+               drm_modeset_lock(&crtc->base.mutex, NULL);
+               if (crtc->active)
+                       intel_wait_for_vblank(dev, pipe);
+               drm_modeset_unlock(&crtc->base.mutex);
 
                spin_lock_irq(&pipe_crc->lock);
                entries = pipe_crc->entries;
@@ -3506,7 +3522,7 @@ i915_max_freq_get(void *data, u64 *val)
        struct drm_i915_private *dev_priv = dev->dev_private;
        int ret;
 
-       if (!(IS_GEN6(dev) || IS_GEN7(dev)))
+       if (INTEL_INFO(dev)->gen < 6)
                return -ENODEV;
 
        flush_delayed_work(&dev_priv->rps.delayed_resume_work);
@@ -3532,7 +3548,7 @@ i915_max_freq_set(void *data, u64 val)
        u32 rp_state_cap, hw_max, hw_min;
        int ret;
 
-       if (!(IS_GEN6(dev) || IS_GEN7(dev)))
+       if (INTEL_INFO(dev)->gen < 6)
                return -ENODEV;
 
        flush_delayed_work(&dev_priv->rps.delayed_resume_work);
@@ -3587,7 +3603,7 @@ i915_min_freq_get(void *data, u64 *val)
        struct drm_i915_private *dev_priv = dev->dev_private;
        int ret;
 
-       if (!(IS_GEN6(dev) || IS_GEN7(dev)))
+       if (INTEL_INFO(dev)->gen < 6)
                return -ENODEV;
 
        flush_delayed_work(&dev_priv->rps.delayed_resume_work);
@@ -3613,7 +3629,7 @@ i915_min_freq_set(void *data, u64 val)
        u32 rp_state_cap, hw_max, hw_min;
        int ret;
 
-       if (!(IS_GEN6(dev) || IS_GEN7(dev)))
+       if (INTEL_INFO(dev)->gen < 6)
                return -ENODEV;
 
        flush_delayed_work(&dev_priv->rps.delayed_resume_work);
index cac9265f9757f12d967e63a4f191f3a9dfa6b259..84b55665bd8722d21643b90de95e5ba92762b5cc 100644 (file)
@@ -138,7 +138,7 @@ static void i915_free_hws(struct drm_device *dev)
        I915_WRITE(HWS_PGA, 0x1ffff000);
 }
 
-void i915_kernel_lost_context(struct drm_device * dev)
+void i915_kernel_lost_context(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_master_private *master_priv;
@@ -166,7 +166,7 @@ void i915_kernel_lost_context(struct drm_device * dev)
                master_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
 }
 
-static int i915_dma_cleanup(struct drm_device * dev)
+static int i915_dma_cleanup(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        int i;
@@ -190,7 +190,7 @@ static int i915_dma_cleanup(struct drm_device * dev)
        return 0;
 }
 
-static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
+static int i915_initialize(struct drm_device *dev, drm_i915_init_t *init)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
@@ -235,7 +235,7 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
        return 0;
 }
 
-static int i915_dma_resume(struct drm_device * dev)
+static int i915_dma_resume(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_engine_cs *ring = LP_RING(dev_priv);
@@ -359,7 +359,7 @@ static int validate_cmd(int cmd)
        return 0;
 }
 
-static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords)
+static int i915_emit_cmds(struct drm_device *dev, int *buffer, int dwords)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        int i, ret;
@@ -369,6 +369,7 @@ static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords)
 
        for (i = 0; i < dwords;) {
                int sz = validate_cmd(buffer[i]);
+
                if (sz == 0 || i + sz > dwords)
                        return -EINVAL;
                i += sz;
@@ -453,7 +454,7 @@ static void i915_emit_breadcrumb(struct drm_device *dev)
        }
 }
 
-static int i915_dispatch_cmdbuffer(struct drm_device * dev,
+static int i915_dispatch_cmdbuffer(struct drm_device *dev,
                                   drm_i915_cmdbuffer_t *cmd,
                                   struct drm_clip_rect *cliprects,
                                   void *cmdbuf)
@@ -487,8 +488,8 @@ static int i915_dispatch_cmdbuffer(struct drm_device * dev,
        return 0;
 }
 
-static int i915_dispatch_batchbuffer(struct drm_device * dev,
-                                    drm_i915_batchbuffer_t * batch,
+static int i915_dispatch_batchbuffer(struct drm_device *dev,
+                                    drm_i915_batchbuffer_t *batch,
                                     struct drm_clip_rect *cliprects)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -549,7 +550,7 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
        return 0;
 }
 
-static int i915_dispatch_flip(struct drm_device * dev)
+static int i915_dispatch_flip(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_master_private *master_priv =
@@ -755,7 +756,7 @@ fail_batch_free:
        return ret;
 }
 
-static int i915_emit_irq(struct drm_device * dev)
+static int i915_emit_irq(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
@@ -781,7 +782,7 @@ static int i915_emit_irq(struct drm_device * dev)
        return dev_priv->dri1.counter;
 }
 
-static int i915_wait_irq(struct drm_device * dev, int irq_nr)
+static int i915_wait_irq(struct drm_device *dev, int irq_nr)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
@@ -1266,6 +1267,7 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_
 {
        struct drm_device *dev = pci_get_drvdata(pdev);
        pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
+
        if (state == VGA_SWITCHEROO_ON) {
                pr_info("switched on\n");
                dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
@@ -1488,10 +1490,11 @@ static void i915_dump_device_info(struct drm_i915_private *dev_priv)
 #define SEP_EMPTY
 #define PRINT_FLAG(name) info->name ? #name "," : ""
 #define SEP_COMMA ,
-       DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x flags="
+       DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x rev=0x%02x flags="
                         DEV_INFO_FOR_EACH_FLAG(PRINT_S, SEP_EMPTY),
                         info->gen,
                         dev_priv->dev->pdev->device,
+                        dev_priv->dev->pdev->revision,
                         DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_COMMA));
 #undef PRINT_S
 #undef SEP_EMPTY
@@ -1602,6 +1605,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
        spin_lock_init(&dev_priv->backlight_lock);
        spin_lock_init(&dev_priv->uncore.lock);
        spin_lock_init(&dev_priv->mm.object_stat_lock);
+       spin_lock_init(&dev_priv->mmio_flip_lock);
        mutex_init(&dev_priv->dpio_lock);
        mutex_init(&dev_priv->modeset_restore_lock);
 
@@ -1929,7 +1933,7 @@ int i915_driver_open(struct drm_device *dev, struct drm_file *file)
  * and DMA structures, since the kernel won't be using them, and clea
  * up any GEM state.
  */
-void i915_driver_lastclose(struct drm_device * dev)
+void i915_driver_lastclose(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
 
@@ -1950,7 +1954,7 @@ void i915_driver_lastclose(struct drm_device * dev)
        i915_dma_cleanup(dev);
 }
 
-void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
+void i915_driver_preclose(struct drm_device *dev, struct drm_file *file_priv)
 {
        mutex_lock(&dev->struct_mutex);
        i915_gem_context_close(dev, file_priv);
@@ -2027,7 +2031,7 @@ int i915_max_ioctl = ARRAY_SIZE(i915_ioctls);
  * manage the gtt, we need to claim that all intel devices are agp.  For
  * otherwise the drm core refuses to initialize the agp support code.
  */
-int i915_driver_device_is_agp(struct drm_device * dev)
+int i915_driver_device_is_agp(struct drm_device *dev)
 {
        return 1;
 }
index d935ab3718e18071b0394965540cb412dd60e028..b0955fffca982c5293786f9da146dab1e416d5f9 100644 (file)
@@ -28,6 +28,7 @@
  */
 
 #include <linux/device.h>
+#include <linux/acpi.h>
 #include <drm/drmP.h>
 #include <drm/i915_drm.h>
 #include "i915_drv.h"
@@ -46,8 +47,6 @@ static struct drm_driver driver;
                          PIPE_C_OFFSET, PIPE_EDP_OFFSET }, \
        .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
                           TRANSCODER_C_OFFSET, TRANSCODER_EDP_OFFSET }, \
-       .dpll_offsets = { DPLL_A_OFFSET, DPLL_B_OFFSET }, \
-       .dpll_md_offsets = { DPLL_A_MD_OFFSET, DPLL_B_MD_OFFSET }, \
        .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET }
 
 #define GEN_CHV_PIPEOFFSETS \
@@ -55,10 +54,6 @@ static struct drm_driver driver;
                          CHV_PIPE_C_OFFSET }, \
        .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
                           CHV_TRANSCODER_C_OFFSET, }, \
-       .dpll_offsets = { DPLL_A_OFFSET, DPLL_B_OFFSET, \
-                         CHV_DPLL_C_OFFSET }, \
-       .dpll_md_offsets = { DPLL_A_MD_OFFSET, DPLL_B_MD_OFFSET, \
-                            CHV_DPLL_C_MD_OFFSET }, \
        .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET, \
                             CHV_PALETTE_C_OFFSET }
 
@@ -499,8 +494,7 @@ static int i915_drm_freeze(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_crtc *crtc;
-
-       intel_runtime_pm_get(dev_priv);
+       pci_power_t opregion_target_state;
 
        /* ignore lid events during suspend */
        mutex_lock(&dev_priv->modeset_restore_lock);
@@ -526,9 +520,9 @@ static int i915_drm_freeze(struct drm_device *dev)
                        return error;
                }
 
-               drm_irq_uninstall(dev);
+               intel_runtime_pm_disable_interrupts(dev);
 
-               intel_disable_gt_powersave(dev);
+               intel_suspend_gt_powersave(dev);
 
                /*
                 * Disable CRTCs directly since we want to preserve sw state
@@ -547,8 +541,14 @@ static int i915_drm_freeze(struct drm_device *dev)
 
        i915_save_state(dev);
 
+       if (acpi_target_system_state() >= ACPI_STATE_S3)
+               opregion_target_state = PCI_D3cold;
+       else
+               opregion_target_state = PCI_D1;
+       intel_opregion_notify_adapter(dev, opregion_target_state);
+
+       intel_uncore_forcewake_reset(dev, false);
        intel_opregion_fini(dev);
-       intel_uncore_fini(dev);
 
        console_lock();
        intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED);
@@ -556,6 +556,8 @@ static int i915_drm_freeze(struct drm_device *dev)
 
        dev_priv->suspend_count++;
 
+       intel_display_set_init_power(dev_priv, false);
+
        return 0;
 }
 
@@ -605,7 +607,10 @@ static int i915_drm_thaw_early(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
 
-       intel_uncore_early_sanitize(dev);
+       if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+               hsw_disable_pc8(dev_priv);
+
+       intel_uncore_early_sanitize(dev, true);
        intel_uncore_sanitize(dev);
        intel_power_domains_init_hw(dev_priv);
 
@@ -638,8 +643,7 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
                }
                mutex_unlock(&dev->struct_mutex);
 
-               /* We need working interrupts for modeset enabling ... */
-               drm_irq_install(dev, dev->pdev->irq);
+               intel_runtime_pm_restore_interrupts(dev);
 
                intel_modeset_init_hw(dev);
 
@@ -676,7 +680,8 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
        dev_priv->modeset_restore = MODESET_DONE;
        mutex_unlock(&dev_priv->modeset_restore_lock);
 
-       intel_runtime_pm_put(dev_priv);
+       intel_opregion_notify_adapter(dev, PCI_D0);
+
        return 0;
 }
 
@@ -885,6 +890,7 @@ static int i915_pm_suspend_late(struct device *dev)
 {
        struct pci_dev *pdev = to_pci_dev(dev);
        struct drm_device *drm_dev = pci_get_drvdata(pdev);
+       struct drm_i915_private *dev_priv = drm_dev->dev_private;
 
        /*
         * We have a suspedn ordering issue with the snd-hda driver also
@@ -898,6 +904,9 @@ static int i915_pm_suspend_late(struct device *dev)
        if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
                return 0;
 
+       if (IS_HASWELL(drm_dev) || IS_BROADWELL(drm_dev))
+               hsw_enable_pc8(dev_priv);
+
        pci_disable_device(pdev);
        pci_set_power_state(pdev, PCI_D3hot);
 
index 5484f052d50c205da6da28cb3bf621e911696363..6a1e990cb4829c60857d0f475e1e3a7c84c96496 100644 (file)
@@ -53,7 +53,7 @@
 
 #define DRIVER_NAME            "i915"
 #define DRIVER_DESC            "Intel Graphics"
-#define DRIVER_DATE            "20080730"
+#define DRIVER_DATE            "20140620"
 
 enum pipe {
        INVALID_PIPE = -1,
@@ -552,8 +552,6 @@ struct intel_device_info {
        /* Register offsets for the various display pipes and transcoders */
        int pipe_offsets[I915_MAX_TRANSCODERS];
        int trans_offsets[I915_MAX_TRANSCODERS];
-       int dpll_offsets[I915_MAX_PIPES];
-       int dpll_md_offsets[I915_MAX_PIPES];
        int palette_offsets[I915_MAX_PIPES];
        int cursor_offsets[I915_MAX_PIPES];
 };
@@ -593,7 +591,6 @@ struct intel_context {
        bool is_initialized;
        uint8_t remap_slice;
        struct drm_i915_file_private *file_priv;
-       struct intel_engine_cs *last_ring;
        struct drm_i915_gem_object *obj;
        struct i915_ctx_hang_stats hang_stats;
        struct i915_address_space *vm;
@@ -638,6 +635,10 @@ struct i915_drrs {
 struct i915_psr {
        bool sink_support;
        bool source_ok;
+       bool setup_done;
+       bool enabled;
+       bool active;
+       struct delayed_work work;
 };
 
 enum intel_pch {
@@ -1331,6 +1332,17 @@ struct intel_pipe_crc {
        wait_queue_head_t wq;
 };
 
+struct i915_frontbuffer_tracking {
+       struct mutex lock;
+
+       /*
+        * Tracking bits for delayed frontbuffer flushing du to gpu activity or
+        * scheduled flips.
+        */
+       unsigned busy_bits;
+       unsigned flip_bits;
+};
+
 struct drm_i915_private {
        struct drm_device *dev;
        struct kmem_cache *slab;
@@ -1370,6 +1382,9 @@ struct drm_i915_private {
        /* protects the irq masks */
        spinlock_t irq_lock;
 
+       /* protects the mmio flip data */
+       spinlock_t mmio_flip_lock;
+
        bool display_irqs_enabled;
 
        /* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
@@ -1473,6 +1488,9 @@ struct drm_i915_private {
        bool lvds_downclock_avail;
        /* indicates the reduced downclock for LVDS*/
        int lvds_downclock;
+
+       struct i915_frontbuffer_tracking fb_tracking;
+
        u16 orig_clock;
 
        bool mchbar_need_disable;
@@ -1590,6 +1608,28 @@ struct drm_i915_gem_object_ops {
        void (*release)(struct drm_i915_gem_object *);
 };
 
+/*
+ * Frontbuffer tracking bits. Set in obj->frontbuffer_bits while a gem bo is
+ * considered to be the frontbuffer for the given plane interface-vise. This
+ * doesn't mean that the hw necessarily already scans it out, but that any
+ * rendering (by the cpu or gpu) will land in the frontbuffer eventually.
+ *
+ * We have one bit per pipe and per scanout plane type.
+ */
+#define INTEL_FRONTBUFFER_BITS_PER_PIPE 4
+#define INTEL_FRONTBUFFER_BITS \
+       (INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES)
+#define INTEL_FRONTBUFFER_PRIMARY(pipe) \
+       (1 << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))
+#define INTEL_FRONTBUFFER_CURSOR(pipe) \
+       (1 << (1 +(INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))))
+#define INTEL_FRONTBUFFER_SPRITE(pipe) \
+       (1 << (2 +(INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))))
+#define INTEL_FRONTBUFFER_OVERLAY(pipe) \
+       (1 << (3 +(INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))))
+#define INTEL_FRONTBUFFER_ALL_MASK(pipe) \
+       (0xf << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))
+
 struct drm_i915_gem_object {
        struct drm_gem_object base;
 
@@ -1659,6 +1699,12 @@ struct drm_i915_gem_object {
        unsigned int pin_mappable:1;
        unsigned int pin_display:1;
 
+       /*
+        * Is the object to be mapped as read-only to the GPU
+        * Only honoured if hardware has relevant pte bit
+        */
+       unsigned long gt_ro:1;
+
        /*
         * Is the GPU currently using a fence to access this buffer,
         */
@@ -1671,6 +1717,8 @@ struct drm_i915_gem_object {
        unsigned int has_global_gtt_mapping:1;
        unsigned int has_dma_mapping:1;
 
+       unsigned int frontbuffer_bits:INTEL_FRONTBUFFER_BITS;
+
        struct sg_table *pages;
        int pages_pin_count;
 
@@ -1717,6 +1765,10 @@ struct drm_i915_gem_object {
 };
 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
 
+void i915_gem_track_fb(struct drm_i915_gem_object *old,
+                      struct drm_i915_gem_object *new,
+                      unsigned frontbuffer_bits);
+
 /**
  * Request queue structure.
  *
@@ -1938,10 +1990,8 @@ struct drm_i915_cmd_table {
 #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
 
 #define HAS_HW_CONTEXTS(dev)   (INTEL_INFO(dev)->gen >= 6)
-#define HAS_ALIASING_PPGTT(dev)        (INTEL_INFO(dev)->gen >= 6 && \
-                                (!IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)))
-#define HAS_PPGTT(dev)         (INTEL_INFO(dev)->gen >= 7 \
-                                && !IS_GEN8(dev))
+#define HAS_ALIASING_PPGTT(dev)        (INTEL_INFO(dev)->gen >= 6)
+#define HAS_PPGTT(dev)         (INTEL_INFO(dev)->gen >= 7 && !IS_GEN8(dev))
 #define USES_PPGTT(dev)                intel_enable_ppgtt(dev, false)
 #define USES_FULL_PPGTT(dev)   intel_enable_ppgtt(dev, true)
 
@@ -2038,6 +2088,7 @@ struct i915_params {
        bool reset;
        bool disable_display;
        bool disable_vtd_wa;
+       int use_mmio_flip;
 };
 extern struct i915_params i915 __read_mostly;
 
@@ -2082,10 +2133,12 @@ extern void intel_irq_init(struct drm_device *dev);
 extern void intel_hpd_init(struct drm_device *dev);
 
 extern void intel_uncore_sanitize(struct drm_device *dev);
-extern void intel_uncore_early_sanitize(struct drm_device *dev);
+extern void intel_uncore_early_sanitize(struct drm_device *dev,
+                                       bool restore_forcewake);
 extern void intel_uncore_init(struct drm_device *dev);
 extern void intel_uncore_check_errors(struct drm_device *dev);
 extern void intel_uncore_fini(struct drm_device *dev);
+extern void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore);
 
 void
 i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
@@ -2233,6 +2286,8 @@ bool i915_gem_retire_requests(struct drm_device *dev);
 void i915_gem_retire_requests_ring(struct intel_engine_cs *ring);
 int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
                                      bool interruptible);
+int __must_check i915_gem_check_olr(struct intel_engine_cs *ring, u32 seqno);
+
 static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
 {
        return unlikely(atomic_read(&error->reset_counter)
@@ -2443,7 +2498,6 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
                                               u32 stolen_offset,
                                               u32 gtt_offset,
                                               u32 size);
-void i915_gem_object_release_stolen(struct drm_i915_gem_object *obj);
 
 /* i915_gem_tiling.c */
 static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
@@ -2603,6 +2657,8 @@ int i915_reg_read_ioctl(struct drm_device *dev, void *data,
 int i915_get_reset_stats_ioctl(struct drm_device *dev, void *data,
                               struct drm_file *file);
 
+void intel_notify_mmio_flip(struct intel_engine_cs *ring);
+
 /* overlay */
 extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
 extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e,
index d86b77e905a2b9dc484635ac69719047e4191561..f6d123828926f6966487389322863f35a1b7dda5 100644 (file)
@@ -1095,7 +1095,7 @@ i915_gem_check_wedge(struct i915_gpu_error *error,
  * Compare seqno against outstanding lazy request. Emit a request if they are
  * equal.
  */
-static int
+int
 i915_gem_check_olr(struct intel_engine_cs *ring, u32 seqno)
 {
        int ret;
@@ -1561,14 +1561,29 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
        if (ret)
                goto unpin;
 
-       obj->fault_mappable = true;
-
+       /* Finally, remap it using the new GTT offset */
        pfn = dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj);
        pfn >>= PAGE_SHIFT;
-       pfn += page_offset;
 
-       /* Finally, remap it using the new GTT offset */
-       ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
+       if (!obj->fault_mappable) {
+               unsigned long size = min_t(unsigned long,
+                                          vma->vm_end - vma->vm_start,
+                                          obj->base.size);
+               int i;
+
+               for (i = 0; i < size >> PAGE_SHIFT; i++) {
+                       ret = vm_insert_pfn(vma,
+                                           (unsigned long)vma->vm_start + i * PAGE_SIZE,
+                                           pfn + i);
+                       if (ret)
+                               break;
+               }
+
+               obj->fault_mappable = true;
+       } else
+               ret = vm_insert_pfn(vma,
+                                   (unsigned long)vmf->virtual_address,
+                                   pfn + page_offset);
 unpin:
        i915_gem_object_ggtt_unpin(obj);
 unlock:
@@ -1616,22 +1631,6 @@ out:
        return ret;
 }
 
-void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
-{
-       struct i915_vma *vma;
-
-       /*
-        * Only the global gtt is relevant for gtt memory mappings, so restrict
-        * list traversal to objects bound into the global address space. Note
-        * that the active list should be empty, but better safe than sorry.
-        */
-       WARN_ON(!list_empty(&dev_priv->gtt.base.active_list));
-       list_for_each_entry(vma, &dev_priv->gtt.base.active_list, mm_list)
-               i915_gem_release_mmap(vma->obj);
-       list_for_each_entry(vma, &dev_priv->gtt.base.inactive_list, mm_list)
-               i915_gem_release_mmap(vma->obj);
-}
-
 /**
  * i915_gem_release_mmap - remove physical page mappings
  * @obj: obj in question
@@ -1657,6 +1656,15 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj)
        obj->fault_mappable = false;
 }
 
+void
+i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
+{
+       struct drm_i915_gem_object *obj;
+
+       list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
+               i915_gem_release_mmap(obj);
+}
+
 uint32_t
 i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
 {
@@ -2211,6 +2219,8 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
                        list_move_tail(&vma->mm_list, &vm->inactive_list);
        }
 
+       intel_fb_obj_flush(obj, true);
+
        list_del_init(&obj->ring_list);
        obj->ring = NULL;
 
@@ -3540,6 +3550,8 @@ i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
        old_write_domain = obj->base.write_domain;
        obj->base.write_domain = 0;
 
+       intel_fb_obj_flush(obj, false);
+
        trace_i915_gem_object_change_domain(obj,
                                            obj->base.read_domains,
                                            old_write_domain);
@@ -3561,6 +3573,8 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
        old_write_domain = obj->base.write_domain;
        obj->base.write_domain = 0;
 
+       intel_fb_obj_flush(obj, false);
+
        trace_i915_gem_object_change_domain(obj,
                                            obj->base.read_domains,
                                            old_write_domain);
@@ -3614,6 +3628,9 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
                obj->dirty = 1;
        }
 
+       if (write)
+               intel_fb_obj_invalidate(obj, NULL);
+
        trace_i915_gem_object_change_domain(obj,
                                            old_read_domains,
                                            old_write_domain);
@@ -3950,6 +3967,9 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
                obj->base.write_domain = I915_GEM_DOMAIN_CPU;
        }
 
+       if (write)
+               intel_fb_obj_invalidate(obj, NULL);
+
        trace_i915_gem_object_change_domain(obj,
                                            old_read_domains,
                                            old_write_domain);
@@ -4438,13 +4458,14 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
        if (obj->stolen)
                i915_gem_object_unpin_pages(obj);
 
+       WARN_ON(obj->frontbuffer_bits);
+
        if (WARN_ON(obj->pages_pin_count))
                obj->pages_pin_count = 0;
        if (discard_backing_storage(obj))
                obj->madv = I915_MADV_DONTNEED;
        i915_gem_object_put_pages(obj);
        i915_gem_object_free_mmap_offset(obj);
-       i915_gem_object_release_stolen(obj);
 
        BUG_ON(obj->pages);
 
@@ -4922,6 +4943,8 @@ i915_gem_load(struct drm_device *dev)
 
        dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
        register_oom_notifier(&dev_priv->mm.oom_notifier);
+
+       mutex_init(&dev_priv->fb_tracking.lock);
 }
 
 void i915_gem_release(struct drm_device *dev, struct drm_file *file)
@@ -4983,6 +5006,23 @@ int i915_gem_open(struct drm_device *dev, struct drm_file *file)
        return ret;
 }
 
+void i915_gem_track_fb(struct drm_i915_gem_object *old,
+                      struct drm_i915_gem_object *new,
+                      unsigned frontbuffer_bits)
+{
+       if (old) {
+               WARN_ON(!mutex_is_locked(&old->base.dev->struct_mutex));
+               WARN_ON(!(old->frontbuffer_bits & frontbuffer_bits));
+               old->frontbuffer_bits &= ~frontbuffer_bits;
+       }
+
+       if (new) {
+               WARN_ON(!mutex_is_locked(&new->base.dev->struct_mutex));
+               WARN_ON(new->frontbuffer_bits & frontbuffer_bits);
+               new->frontbuffer_bits |= frontbuffer_bits;
+       }
+}
+
 static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
 {
        if (!mutex_is_locked(mutex))
@@ -5065,12 +5105,13 @@ unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
            vm == &dev_priv->mm.aliasing_ppgtt->base)
                vm = &dev_priv->gtt.base;
 
-       BUG_ON(list_empty(&o->vma_list));
        list_for_each_entry(vma, &o->vma_list, vma_link) {
                if (vma->vm == vm)
                        return vma->node.start;
 
        }
+       WARN(1, "%s vma for this object not found.\n",
+            i915_is_ggtt(vm) ? "global" : "ppgtt");
        return -1;
 }
 
index a5ddf3bce9c3fe06e3338b8b1c15e86405f312df..0d2c75bde96ecb463cff3109b1dc65d04802853a 100644 (file)
@@ -606,7 +606,7 @@ static int do_switch(struct intel_engine_cs *ring,
                BUG_ON(!i915_gem_obj_is_pinned(from->obj));
        }
 
-       if (from == to && from->last_ring == ring && !to->remap_slice)
+       if (from == to && !to->remap_slice)
                return 0;
 
        /* Trying to pin first makes error handling easier. */
@@ -703,7 +703,6 @@ static int do_switch(struct intel_engine_cs *ring,
 done:
        i915_gem_context_reference(to);
        ring->last_context = to;
-       to->last_ring = ring;
 
        if (uninitialized) {
                ret = i915_gem_render_state_init(ring);
index 3a30133f93e858a449366727c266a4093485e085..d815ef51a5eac9fb91587b73d0bb63b32f026c9b 100644 (file)
@@ -975,10 +975,8 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
                if (obj->base.write_domain) {
                        obj->dirty = 1;
                        obj->last_write_seqno = intel_ring_get_seqno(ring);
-                       /* check for potential scanout */
-                       if (i915_gem_obj_ggtt_bound(obj) &&
-                           i915_gem_obj_to_ggtt(obj)->pin_count)
-                               intel_mark_fb_busy(obj, ring);
+
+                       intel_fb_obj_invalidate(obj, ring);
 
                        /* update for the implicit flush after a batch */
                        obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
@@ -1525,7 +1523,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
        ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
        if (!ret) {
                /* Copy the new buffer offsets back to the user's exec list. */
-               struct drm_i915_gem_exec_object2 *user_exec_list =
+               struct drm_i915_gem_exec_object2 __user *user_exec_list =
                                   to_user_ptr(args->buffers_ptr);
                int i;
 
index 8b3cde7033640e2450ab0b911c5cedb39172a003..a4153eef48c20c02f444e53b4de144e5414987fc 100644 (file)
@@ -63,6 +63,12 @@ static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
        }
 #endif
 
+       /* Early VLV doesn't have this */
+       if (IS_VALLEYVIEW(dev) && dev->pdev->revision < 0xb) {
+               DRM_DEBUG_DRIVER("disabling PPGTT on pre-B3 step VLV\n");
+               return 0;
+       }
+
        return HAS_ALIASING_PPGTT(dev) ? 1 : 0;
 }
 
@@ -110,7 +116,7 @@ static inline gen8_ppgtt_pde_t gen8_pde_encode(struct drm_device *dev,
 
 static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr,
                                     enum i915_cache_level level,
-                                    bool valid)
+                                    bool valid, u32 unused)
 {
        gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
        pte |= GEN6_PTE_ADDR_ENCODE(addr);
@@ -132,7 +138,7 @@ static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr,
 
 static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr,
                                     enum i915_cache_level level,
-                                    bool valid)
+                                    bool valid, u32 unused)
 {
        gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
        pte |= GEN6_PTE_ADDR_ENCODE(addr);
@@ -156,7 +162,7 @@ static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr,
 
 static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr,
                                     enum i915_cache_level level,
-                                    bool valid)
+                                    bool valid, u32 flags)
 {
        gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
        pte |= GEN6_PTE_ADDR_ENCODE(addr);
@@ -164,7 +170,8 @@ static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr,
        /* Mark the page as writeable.  Other platforms don't have a
         * setting for read-only/writable, so this matches that behavior.
         */
-       pte |= BYT_PTE_WRITEABLE;
+       if (!(flags & PTE_READ_ONLY))
+               pte |= BYT_PTE_WRITEABLE;
 
        if (level != I915_CACHE_NONE)
                pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES;
@@ -174,7 +181,7 @@ static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr,
 
 static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr,
                                     enum i915_cache_level level,
-                                    bool valid)
+                                    bool valid, u32 unused)
 {
        gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
        pte |= HSW_PTE_ADDR_ENCODE(addr);
@@ -187,7 +194,7 @@ static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr,
 
 static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr,
                                      enum i915_cache_level level,
-                                     bool valid)
+                                     bool valid, u32 unused)
 {
        gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
        pte |= HSW_PTE_ADDR_ENCODE(addr);
@@ -301,7 +308,7 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
 static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
                                      struct sg_table *pages,
                                      uint64_t start,
-                                     enum i915_cache_level cache_level)
+                                     enum i915_cache_level cache_level, u32 unused)
 {
        struct i915_hw_ppgtt *ppgtt =
                container_of(vm, struct i915_hw_ppgtt, base);
@@ -639,7 +646,7 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
        uint32_t pd_entry;
        int pte, pde;
 
-       scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true);
+       scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true, 0);
 
        pd_addr = (gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm +
                ppgtt->pd_offset / sizeof(gen6_gtt_pte_t);
@@ -941,7 +948,7 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
        unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
        unsigned last_pte, i;
 
-       scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true);
+       scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true, 0);
 
        while (num_entries) {
                last_pte = first_pte + num_entries;
@@ -964,7 +971,7 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
 static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
                                      struct sg_table *pages,
                                      uint64_t start,
-                                     enum i915_cache_level cache_level)
+                                     enum i915_cache_level cache_level, u32 flags)
 {
        struct i915_hw_ppgtt *ppgtt =
                container_of(vm, struct i915_hw_ppgtt, base);
@@ -981,7 +988,8 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
 
                pt_vaddr[act_pte] =
                        vm->pte_encode(sg_page_iter_dma_address(&sg_iter),
-                                      cache_level, true);
+                                      cache_level, true, flags);
+
                if (++act_pte == I915_PPGTT_PT_ENTRIES) {
                        kunmap_atomic(pt_vaddr);
                        pt_vaddr = NULL;
@@ -1218,8 +1226,12 @@ ppgtt_bind_vma(struct i915_vma *vma,
               enum i915_cache_level cache_level,
               u32 flags)
 {
+       /* Currently applicable only to VLV */
+       if (vma->obj->gt_ro)
+               flags |= PTE_READ_ONLY;
+
        vma->vm->insert_entries(vma->vm, vma->obj->pages, vma->node.start,
-                               cache_level);
+                               cache_level, flags);
 }
 
 static void ppgtt_unbind_vma(struct i915_vma *vma)
@@ -1394,7 +1406,7 @@ static inline void gen8_set_pte(void __iomem *addr, gen8_gtt_pte_t pte)
 static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
                                     struct sg_table *st,
                                     uint64_t start,
-                                    enum i915_cache_level level)
+                                    enum i915_cache_level level, u32 unused)
 {
        struct drm_i915_private *dev_priv = vm->dev->dev_private;
        unsigned first_entry = start >> PAGE_SHIFT;
@@ -1440,7 +1452,7 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
 static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
                                     struct sg_table *st,
                                     uint64_t start,
-                                    enum i915_cache_level level)
+                                    enum i915_cache_level level, u32 flags)
 {
        struct drm_i915_private *dev_priv = vm->dev->dev_private;
        unsigned first_entry = start >> PAGE_SHIFT;
@@ -1452,7 +1464,7 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
 
        for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
                addr = sg_page_iter_dma_address(&sg_iter);
-               iowrite32(vm->pte_encode(addr, level, true), &gtt_entries[i]);
+               iowrite32(vm->pte_encode(addr, level, true, flags), &gtt_entries[i]);
                i++;
        }
 
@@ -1464,7 +1476,7 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
         */
        if (i != 0)
                WARN_ON(readl(&gtt_entries[i-1]) !=
-                       vm->pte_encode(addr, level, true));
+                       vm->pte_encode(addr, level, true, flags));
 
        /* This next bit makes the above posting read even more important. We
         * want to flush the TLBs only after we're certain all the PTE updates
@@ -1518,7 +1530,7 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
                 first_entry, num_entries, max_entries))
                num_entries = max_entries;
 
-       scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, use_scratch);
+       scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, use_scratch, 0);
 
        for (i = 0; i < num_entries; i++)
                iowrite32(scratch_pte, &gtt_base[i]);
@@ -1567,6 +1579,10 @@ static void ggtt_bind_vma(struct i915_vma *vma,
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_object *obj = vma->obj;
 
+       /* Currently applicable only to VLV */
+       if (obj->gt_ro)
+               flags |= PTE_READ_ONLY;
+
        /* If there is no aliasing PPGTT, or the caller needs a global mapping,
         * or we have a global mapping already but the cacheability flags have
         * changed, set the global PTEs.
@@ -1583,7 +1599,7 @@ static void ggtt_bind_vma(struct i915_vma *vma,
                    (cache_level != obj->cache_level)) {
                        vma->vm->insert_entries(vma->vm, obj->pages,
                                                vma->node.start,
-                                               cache_level);
+                                               cache_level, flags);
                        obj->has_global_gtt_mapping = 1;
                }
        }
@@ -1595,7 +1611,7 @@ static void ggtt_bind_vma(struct i915_vma *vma,
                appgtt->base.insert_entries(&appgtt->base,
                                            vma->obj->pages,
                                            vma->node.start,
-                                           cache_level);
+                                           cache_level, flags);
                vma->obj->has_aliasing_ppgtt_mapping = 1;
        }
 }
index 1b96a06be3cb4f4872103c04d437e966c7b10f4f..8d6f7c18c40413bf46288bec1144d26b5fc652dc 100644 (file)
@@ -154,6 +154,7 @@ struct i915_vma {
        void (*unbind_vma)(struct i915_vma *vma);
        /* Map an object into an address space with the given cache flags. */
 #define GLOBAL_BIND (1<<0)
+#define PTE_READ_ONLY (1<<1)
        void (*bind_vma)(struct i915_vma *vma,
                         enum i915_cache_level cache_level,
                         u32 flags);
@@ -197,7 +198,7 @@ struct i915_address_space {
        /* FIXME: Need a more generic return type */
        gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr,
                                     enum i915_cache_level level,
-                                    bool valid); /* Create a valid PTE */
+                                    bool valid, u32 flags); /* Create a valid PTE */
        void (*clear_range)(struct i915_address_space *vm,
                            uint64_t start,
                            uint64_t length,
@@ -205,7 +206,7 @@ struct i915_address_space {
        void (*insert_entries)(struct i915_address_space *vm,
                               struct sg_table *st,
                               uint64_t start,
-                              enum i915_cache_level cache_level);
+                              enum i915_cache_level cache_level, u32 flags);
        void (*cleanup)(struct i915_address_space *vm);
 };
 
index 3521f998a1788488b8c396860f6433ffacb81ae2..e60be3f552a6b1aed14770bef853a1eab2ddf5aa 100644 (file)
 #include "i915_drv.h"
 #include "intel_renderstate.h"
 
-struct i915_render_state {
+struct render_state {
+       const struct intel_renderstate_rodata *rodata;
        struct drm_i915_gem_object *obj;
-       unsigned long ggtt_offset;
-       void *batch;
-       u32 size;
-       u32 len;
+       u64 ggtt_offset;
+       int gen;
 };
 
-static struct i915_render_state *render_state_alloc(struct drm_device *dev)
-{
-       struct i915_render_state *so;
-       struct page *page;
-       int ret;
-
-       so = kzalloc(sizeof(*so), GFP_KERNEL);
-       if (!so)
-               return ERR_PTR(-ENOMEM);
-
-       so->obj = i915_gem_alloc_object(dev, 4096);
-       if (so->obj == NULL) {
-               ret = -ENOMEM;
-               goto free;
-       }
-       so->size = 4096;
-
-       ret = i915_gem_obj_ggtt_pin(so->obj, 4096, 0);
-       if (ret)
-               goto free_gem;
-
-       BUG_ON(so->obj->pages->nents != 1);
-       page = sg_page(so->obj->pages->sgl);
-
-       so->batch = kmap(page);
-       if (!so->batch) {
-               ret = -ENOMEM;
-               goto unpin;
-       }
-
-       so->ggtt_offset = i915_gem_obj_ggtt_offset(so->obj);
-
-       return so;
-unpin:
-       i915_gem_object_ggtt_unpin(so->obj);
-free_gem:
-       drm_gem_object_unreference(&so->obj->base);
-free:
-       kfree(so);
-       return ERR_PTR(ret);
-}
-
-static void render_state_free(struct i915_render_state *so)
-{
-       kunmap(so->batch);
-       i915_gem_object_ggtt_unpin(so->obj);
-       drm_gem_object_unreference(&so->obj->base);
-       kfree(so);
-}
-
 static const struct intel_renderstate_rodata *
 render_state_get_rodata(struct drm_device *dev, const int gen)
 {
@@ -101,98 +50,120 @@ render_state_get_rodata(struct drm_device *dev, const int gen)
        return NULL;
 }
 
-static int render_state_setup(const int gen,
-                             const struct intel_renderstate_rodata *rodata,
-                             struct i915_render_state *so)
+static int render_state_init(struct render_state *so, struct drm_device *dev)
 {
-       const u64 goffset = i915_gem_obj_ggtt_offset(so->obj);
-       u32 reloc_index = 0;
-       u32 * const d = so->batch;
-       unsigned int i = 0;
        int ret;
 
-       if (!rodata || rodata->batch_items * 4 > so->size)
+       so->gen = INTEL_INFO(dev)->gen;
+       so->rodata = render_state_get_rodata(dev, so->gen);
+       if (so->rodata == NULL)
+               return 0;
+
+       if (so->rodata->batch_items * 4 > 4096)
                return -EINVAL;
 
+       so->obj = i915_gem_alloc_object(dev, 4096);
+       if (so->obj == NULL)
+               return -ENOMEM;
+
+       ret = i915_gem_obj_ggtt_pin(so->obj, 4096, 0);
+       if (ret)
+               goto free_gem;
+
+       so->ggtt_offset = i915_gem_obj_ggtt_offset(so->obj);
+       return 0;
+
+free_gem:
+       drm_gem_object_unreference(&so->obj->base);
+       return ret;
+}
+
+static int render_state_setup(struct render_state *so)
+{
+       const struct intel_renderstate_rodata *rodata = so->rodata;
+       unsigned int i = 0, reloc_index = 0;
+       struct page *page;
+       u32 *d;
+       int ret;
+
        ret = i915_gem_object_set_to_cpu_domain(so->obj, true);
        if (ret)
                return ret;
 
+       page = sg_page(so->obj->pages->sgl);
+       d = kmap(page);
+
        while (i < rodata->batch_items) {
                u32 s = rodata->batch[i];
 
-               if (reloc_index < rodata->reloc_items &&
-                   i * 4  == rodata->reloc[reloc_index]) {
-
-                       s += goffset & 0xffffffff;
-
-                       /* We keep batch offsets max 32bit */
-                       if (gen >= 8) {
+               if (i * 4  == rodata->reloc[reloc_index]) {
+                       u64 r = s + so->ggtt_offset;
+                       s = lower_32_bits(r);
+                       if (so->gen >= 8) {
                                if (i + 1 >= rodata->batch_items ||
                                    rodata->batch[i + 1] != 0)
                                        return -EINVAL;
 
-                               d[i] = s;
-                               i++;
-                               s = (goffset & 0xffffffff00000000ull) >> 32;
+                               d[i++] = s;
+                               s = upper_32_bits(r);
                        }
 
                        reloc_index++;
                }
 
-               d[i] = s;
-               i++;
+               d[i++] = s;
        }
+       kunmap(page);
 
        ret = i915_gem_object_set_to_gtt_domain(so->obj, false);
        if (ret)
                return ret;
 
-       if (rodata->reloc_items != reloc_index) {
-               DRM_ERROR("not all relocs resolved, %d out of %d\n",
-                         reloc_index, rodata->reloc_items);
+       if (rodata->reloc[reloc_index] != -1) {
+               DRM_ERROR("only %d relocs resolved\n", reloc_index);
                return -EINVAL;
        }
 
-       so->len = rodata->batch_items * 4;
-
        return 0;
 }
 
+static void render_state_fini(struct render_state *so)
+{
+       i915_gem_object_ggtt_unpin(so->obj);
+       drm_gem_object_unreference(&so->obj->base);
+}
+
 int i915_gem_render_state_init(struct intel_engine_cs *ring)
 {
-       const int gen = INTEL_INFO(ring->dev)->gen;
-       struct i915_render_state *so;
-       const struct intel_renderstate_rodata *rodata;
+       struct render_state so;
        int ret;
 
        if (WARN_ON(ring->id != RCS))
                return -ENOENT;
 
-       rodata = render_state_get_rodata(ring->dev, gen);
-       if (rodata == NULL)
-               return 0;
+       ret = render_state_init(&so, ring->dev);
+       if (ret)
+               return ret;
 
-       so = render_state_alloc(ring->dev);
-       if (IS_ERR(so))
-               return PTR_ERR(so);
+       if (so.rodata == NULL)
+               return 0;
 
-       ret = render_state_setup(gen, rodata, so);
+       ret = render_state_setup(&so);
        if (ret)
                goto out;
 
        ret = ring->dispatch_execbuffer(ring,
-                                       i915_gem_obj_ggtt_offset(so->obj),
-                                       so->len,
+                                       so.ggtt_offset,
+                                       so.rodata->batch_items * 4,
                                        I915_DISPATCH_SECURE);
        if (ret)
                goto out;
 
-       i915_vma_move_to_active(i915_gem_obj_to_ggtt(so->obj), ring);
+       i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), ring);
 
-       ret = __i915_add_request(ring, NULL, so->obj, NULL);
+       ret = __i915_add_request(ring, NULL, so.obj, NULL);
        /* __i915_add_request moves object to inactive if it fails */
 out:
-       render_state_free(so);
+       render_state_fini(&so);
        return ret;
 }
index 62ef55ba061cfe42ed5dfddd025fc8eecad0fab5..644117855e019796853f173cf0a93f5d39bb937e 100644 (file)
@@ -292,9 +292,20 @@ static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj)
        kfree(obj->pages);
 }
 
+
+static void
+i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
+{
+       if (obj->stolen) {
+               drm_mm_remove_node(obj->stolen);
+               kfree(obj->stolen);
+               obj->stolen = NULL;
+       }
+}
 static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
        .get_pages = i915_gem_object_get_pages_stolen,
        .put_pages = i915_gem_object_put_pages_stolen,
+       .release = i915_gem_object_release_stolen,
 };
 
 static struct drm_i915_gem_object *
@@ -452,13 +463,3 @@ err_out:
        drm_gem_object_unreference(&obj->base);
        return NULL;
 }
-
-void
-i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
-{
-       if (obj->stolen) {
-               drm_mm_remove_node(obj->stolen);
-               kfree(obj->stolen);
-               obj->stolen = NULL;
-       }
-}
index 69a7960c36bb65e0558ec08a80a64c903894997c..c0d7674c45cd3f84c1aff1f4278de011a56ac8dd 100644 (file)
@@ -1214,6 +1214,9 @@ static void notify_ring(struct drm_device *dev,
 
        trace_i915_gem_request_complete(ring);
 
+       if (drm_core_check_feature(dev, DRIVER_MODESET))
+               intel_notify_mmio_flip(ring);
+
        wake_up_all(&ring->irq_queue);
        i915_queue_hangcheck(dev);
 }
@@ -1248,8 +1251,10 @@ static void gen6_pm_rps_work(struct work_struct *work)
        if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
                if (adj > 0)
                        adj *= 2;
-               else
-                       adj = 1;
+               else {
+                       /* CHV needs even encode values */
+                       adj = IS_CHERRYVIEW(dev_priv->dev) ? 2 : 1;
+               }
                new_delay = dev_priv->rps.cur_freq + adj;
 
                /*
@@ -1267,8 +1272,10 @@ static void gen6_pm_rps_work(struct work_struct *work)
        } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
                if (adj < 0)
                        adj *= 2;
-               else
-                       adj = -1;
+               else {
+                       /* CHV needs even encode values */
+                       adj = IS_CHERRYVIEW(dev_priv->dev) ? -2 : -1;
+               }
                new_delay = dev_priv->rps.cur_freq + adj;
        } else { /* unknown event */
                new_delay = dev_priv->rps.cur_freq;
@@ -1454,6 +1461,7 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
        if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
                tmp = I915_READ(GEN8_GT_IIR(0));
                if (tmp) {
+                       I915_WRITE(GEN8_GT_IIR(0), tmp);
                        ret = IRQ_HANDLED;
                        rcs = tmp >> GEN8_RCS_IRQ_SHIFT;
                        bcs = tmp >> GEN8_BCS_IRQ_SHIFT;
@@ -1461,7 +1469,6 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
                                notify_ring(dev, &dev_priv->ring[RCS]);
                        if (bcs & GT_RENDER_USER_INTERRUPT)
                                notify_ring(dev, &dev_priv->ring[BCS]);
-                       I915_WRITE(GEN8_GT_IIR(0), tmp);
                } else
                        DRM_ERROR("The master control interrupt lied (GT0)!\n");
        }
@@ -1469,6 +1476,7 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
        if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
                tmp = I915_READ(GEN8_GT_IIR(1));
                if (tmp) {
+                       I915_WRITE(GEN8_GT_IIR(1), tmp);
                        ret = IRQ_HANDLED;
                        vcs = tmp >> GEN8_VCS1_IRQ_SHIFT;
                        if (vcs & GT_RENDER_USER_INTERRUPT)
@@ -1476,7 +1484,6 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
                        vcs = tmp >> GEN8_VCS2_IRQ_SHIFT;
                        if (vcs & GT_RENDER_USER_INTERRUPT)
                                notify_ring(dev, &dev_priv->ring[VCS2]);
-                       I915_WRITE(GEN8_GT_IIR(1), tmp);
                } else
                        DRM_ERROR("The master control interrupt lied (GT1)!\n");
        }
@@ -1484,10 +1491,10 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
        if (master_ctl & GEN8_GT_PM_IRQ) {
                tmp = I915_READ(GEN8_GT_IIR(2));
                if (tmp & dev_priv->pm_rps_events) {
-                       ret = IRQ_HANDLED;
-                       gen8_rps_irq_handler(dev_priv, tmp);
                        I915_WRITE(GEN8_GT_IIR(2),
                                   tmp & dev_priv->pm_rps_events);
+                       ret = IRQ_HANDLED;
+                       gen8_rps_irq_handler(dev_priv, tmp);
                } else
                        DRM_ERROR("The master control interrupt lied (PM)!\n");
        }
@@ -1495,11 +1502,11 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
        if (master_ctl & GEN8_GT_VECS_IRQ) {
                tmp = I915_READ(GEN8_GT_IIR(3));
                if (tmp) {
+                       I915_WRITE(GEN8_GT_IIR(3), tmp);
                        ret = IRQ_HANDLED;
                        vcs = tmp >> GEN8_VECS_IRQ_SHIFT;
                        if (vcs & GT_RENDER_USER_INTERRUPT)
                                notify_ring(dev, &dev_priv->ring[VECS]);
-                       I915_WRITE(GEN8_GT_IIR(3), tmp);
                } else
                        DRM_ERROR("The master control interrupt lied (GT3)!\n");
        }
@@ -1805,26 +1812,28 @@ static void i9xx_hpd_irq_handler(struct drm_device *dev)
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
 
-       if (IS_G4X(dev)) {
-               u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
+       if (hotplug_status) {
+               I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
+               /*
+                * Make sure hotplug status is cleared before we clear IIR, or else we
+                * may miss hotplug events.
+                */
+               POSTING_READ(PORT_HOTPLUG_STAT);
 
-               intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_g4x);
-       } else {
-               u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
+               if (IS_G4X(dev)) {
+                       u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
 
-               intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915);
-       }
+                       intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_g4x);
+               } else {
+                       u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
 
-       if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) &&
-           hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
-               dp_aux_irq_handler(dev);
+                       intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915);
+               }
 
-       I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
-       /*
-        * Make sure hotplug status is cleared before we clear IIR, or else we
-        * may miss hotplug events.
-        */
-       POSTING_READ(PORT_HOTPLUG_STAT);
+               if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) &&
+                   hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
+                       dp_aux_irq_handler(dev);
+       }
 }
 
 static irqreturn_t valleyview_irq_handler(int irq, void *arg)
@@ -1835,29 +1844,36 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
        irqreturn_t ret = IRQ_NONE;
 
        while (true) {
-               iir = I915_READ(VLV_IIR);
+               /* Find, clear, then process each source of interrupt */
+
                gt_iir = I915_READ(GTIIR);
+               if (gt_iir)
+                       I915_WRITE(GTIIR, gt_iir);
+
                pm_iir = I915_READ(GEN6_PMIIR);
+               if (pm_iir)
+                       I915_WRITE(GEN6_PMIIR, pm_iir);
+
+               iir = I915_READ(VLV_IIR);
+               if (iir) {
+                       /* Consume port before clearing IIR or we'll miss events */
+                       if (iir & I915_DISPLAY_PORT_INTERRUPT)
+                               i9xx_hpd_irq_handler(dev);
+                       I915_WRITE(VLV_IIR, iir);
+               }
 
                if (gt_iir == 0 && pm_iir == 0 && iir == 0)
                        goto out;
 
                ret = IRQ_HANDLED;
 
-               snb_gt_irq_handler(dev, dev_priv, gt_iir);
-
-               valleyview_pipestat_irq_handler(dev, iir);
-
-               /* Consume port.  Then clear IIR or we'll miss events */
-               if (iir & I915_DISPLAY_PORT_INTERRUPT)
-                       i9xx_hpd_irq_handler(dev);
-
+               if (gt_iir)
+                       snb_gt_irq_handler(dev, dev_priv, gt_iir);
                if (pm_iir)
                        gen6_rps_irq_handler(dev_priv, pm_iir);
-
-               I915_WRITE(GTIIR, gt_iir);
-               I915_WRITE(GEN6_PMIIR, pm_iir);
-               I915_WRITE(VLV_IIR, iir);
+               /* Call regardless, as some status bits might not be
+                * signalled in iir */
+               valleyview_pipestat_irq_handler(dev, iir);
        }
 
 out:
@@ -1878,21 +1894,27 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
                if (master_ctl == 0 && iir == 0)
                        break;
 
+               ret = IRQ_HANDLED;
+
                I915_WRITE(GEN8_MASTER_IRQ, 0);
 
-               gen8_gt_irq_handler(dev, dev_priv, master_ctl);
+               /* Find, clear, then process each source of interrupt */
 
-               valleyview_pipestat_irq_handler(dev, iir);
+               if (iir) {
+                       /* Consume port before clearing IIR or we'll miss events */
+                       if (iir & I915_DISPLAY_PORT_INTERRUPT)
+                               i9xx_hpd_irq_handler(dev);
+                       I915_WRITE(VLV_IIR, iir);
+               }
 
-               /* Consume port.  Then clear IIR or we'll miss events */
-               i9xx_hpd_irq_handler(dev);
+               gen8_gt_irq_handler(dev, dev_priv, master_ctl);
 
-               I915_WRITE(VLV_IIR, iir);
+               /* Call regardless, as some status bits might not be
+                * signalled in iir */
+               valleyview_pipestat_irq_handler(dev, iir);
 
                I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
                POSTING_READ(GEN8_MASTER_IRQ);
-
-               ret = IRQ_HANDLED;
        }
 
        return ret;
@@ -2128,6 +2150,14 @@ static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
        }
 }
 
+/*
+ * To handle irqs with the minimum potential races with fresh interrupts, we:
+ * 1 - Disable Master Interrupt Control.
+ * 2 - Find the source(s) of the interrupt.
+ * 3 - Clear the Interrupt Identity bits (IIR).
+ * 4 - Process the interrupt(s) that had bits set in the IIRs.
+ * 5 - Re-enable Master Interrupt Control.
+ */
 static irqreturn_t ironlake_irq_handler(int irq, void *arg)
 {
        struct drm_device *dev = arg;
@@ -2155,32 +2185,34 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
                POSTING_READ(SDEIER);
        }
 
+       /* Find, clear, then process each source of interrupt */
+
        gt_iir = I915_READ(GTIIR);
        if (gt_iir) {
+               I915_WRITE(GTIIR, gt_iir);
+               ret = IRQ_HANDLED;
                if (INTEL_INFO(dev)->gen >= 6)
                        snb_gt_irq_handler(dev, dev_priv, gt_iir);
                else
                        ilk_gt_irq_handler(dev, dev_priv, gt_iir);
-               I915_WRITE(GTIIR, gt_iir);
-               ret = IRQ_HANDLED;
        }
 
        de_iir = I915_READ(DEIIR);
        if (de_iir) {
+               I915_WRITE(DEIIR, de_iir);
+               ret = IRQ_HANDLED;
                if (INTEL_INFO(dev)->gen >= 7)
                        ivb_display_irq_handler(dev, de_iir);
                else
                        ilk_display_irq_handler(dev, de_iir);
-               I915_WRITE(DEIIR, de_iir);
-               ret = IRQ_HANDLED;
        }
 
        if (INTEL_INFO(dev)->gen >= 6) {
                u32 pm_iir = I915_READ(GEN6_PMIIR);
                if (pm_iir) {
-                       gen6_rps_irq_handler(dev_priv, pm_iir);
                        I915_WRITE(GEN6_PMIIR, pm_iir);
                        ret = IRQ_HANDLED;
+                       gen6_rps_irq_handler(dev_priv, pm_iir);
                }
        }
 
@@ -2211,36 +2243,36 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
        I915_WRITE(GEN8_MASTER_IRQ, 0);
        POSTING_READ(GEN8_MASTER_IRQ);
 
+       /* Find, clear, then process each source of interrupt */
+
        ret = gen8_gt_irq_handler(dev, dev_priv, master_ctl);
 
        if (master_ctl & GEN8_DE_MISC_IRQ) {
                tmp = I915_READ(GEN8_DE_MISC_IIR);
-               if (tmp & GEN8_DE_MISC_GSE)
-                       intel_opregion_asle_intr(dev);
-               else if (tmp)
-                       DRM_ERROR("Unexpected DE Misc interrupt\n");
-               else
-                       DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
-
                if (tmp) {
                        I915_WRITE(GEN8_DE_MISC_IIR, tmp);
                        ret = IRQ_HANDLED;
+                       if (tmp & GEN8_DE_MISC_GSE)
+                               intel_opregion_asle_intr(dev);
+                       else
+                               DRM_ERROR("Unexpected DE Misc interrupt\n");
                }
+               else
+                       DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
        }
 
        if (master_ctl & GEN8_DE_PORT_IRQ) {
                tmp = I915_READ(GEN8_DE_PORT_IIR);
-               if (tmp & GEN8_AUX_CHANNEL_A)
-                       dp_aux_irq_handler(dev);
-               else if (tmp)
-                       DRM_ERROR("Unexpected DE Port interrupt\n");
-               else
-                       DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
-
                if (tmp) {
                        I915_WRITE(GEN8_DE_PORT_IIR, tmp);
                        ret = IRQ_HANDLED;
+                       if (tmp & GEN8_AUX_CHANNEL_A)
+                               dp_aux_irq_handler(dev);
+                       else
+                               DRM_ERROR("Unexpected DE Port interrupt\n");
                }
+               else
+                       DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
        }
 
        for_each_pipe(pipe) {
@@ -2250,33 +2282,32 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
                        continue;
 
                pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
-               if (pipe_iir & GEN8_PIPE_VBLANK)
-                       intel_pipe_handle_vblank(dev, pipe);
-
-               if (pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE) {
-                       intel_prepare_page_flip(dev, pipe);
-                       intel_finish_page_flip_plane(dev, pipe);
-               }
+               if (pipe_iir) {
+                       ret = IRQ_HANDLED;
+                       I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
+                       if (pipe_iir & GEN8_PIPE_VBLANK)
+                               intel_pipe_handle_vblank(dev, pipe);
 
-               if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE)
-                       hsw_pipe_crc_irq_handler(dev, pipe);
+                       if (pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE) {
+                               intel_prepare_page_flip(dev, pipe);
+                               intel_finish_page_flip_plane(dev, pipe);
+                       }
 
-               if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) {
-                       if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
-                                                                 false))
-                               DRM_ERROR("Pipe %c FIFO underrun\n",
-                                         pipe_name(pipe));
-               }
+                       if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE)
+                               hsw_pipe_crc_irq_handler(dev, pipe);
 
-               if (pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS) {
-                       DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
-                                 pipe_name(pipe),
-                                 pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS);
-               }
+                       if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) {
+                               if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
+                                                                         false))
+                                       DRM_ERROR("Pipe %c FIFO underrun\n",
+                                                 pipe_name(pipe));
+                       }
 
-               if (pipe_iir) {
-                       ret = IRQ_HANDLED;
-                       I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
+                       if (pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS) {
+                               DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
+                                         pipe_name(pipe),
+                                         pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS);
+                       }
                } else
                        DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
        }
@@ -2288,13 +2319,13 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
                 * on older pch-split platforms. But this needs testing.
                 */
                u32 pch_iir = I915_READ(SDEIIR);
-
-               cpt_irq_handler(dev, pch_iir);
-
                if (pch_iir) {
                        I915_WRITE(SDEIIR, pch_iir);
                        ret = IRQ_HANDLED;
-               }
+                       cpt_irq_handler(dev, pch_iir);
+               } else
+                       DRM_ERROR("The master control interrupt lied (SDE)!\n");
+
        }
 
        I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
index d05a2afa17dc605acc294e0e15ae88ac200b5093..81457293cd3efb06201aa7cccfea05526d8d51b4 100644 (file)
@@ -48,6 +48,7 @@ struct i915_params i915 __read_mostly = {
        .disable_display = 0,
        .enable_cmd_parser = 1,
        .disable_vtd_wa = 0,
+       .use_mmio_flip = 0,
 };
 
 module_param_named(modeset, i915.modeset, int, 0400);
@@ -156,3 +157,7 @@ MODULE_PARM_DESC(disable_vtd_wa, "Disable all VT-d workarounds (default: false)"
 module_param_named(enable_cmd_parser, i915.enable_cmd_parser, int, 0600);
 MODULE_PARM_DESC(enable_cmd_parser,
                 "Enable command parsing (1=enabled [default], 0=disabled)");
+
+module_param_named(use_mmio_flip, i915.use_mmio_flip, int, 0600);
+MODULE_PARM_DESC(use_mmio_flip,
+                "use MMIO flips (-1=never, 0=driver discretion [default], 1=always)");
index e691b30b28179ab9d026daeaa4c6a5e176062327..348856787b7c21146ea23e4b4df2b3b52a57fe71 100644 (file)
@@ -29,8 +29,8 @@
 #define _TRANSCODER(tran, a, b) ((a) + (tran)*((b)-(a)))
 
 #define _PORT(port, a, b) ((a) + (port)*((b)-(a)))
-#define _PIPE3(pipe, a, b, c) (pipe < 2 ? _PIPE(pipe, a, b) : c)
-#define _PORT3(port, a, b, c) (port < 2 ? _PORT(port, a, b) : c)
+#define _PIPE3(pipe, a, b, c) ((pipe) == PIPE_A ? (a) : \
+                              (pipe) == PIPE_B ? (b) : (c))
 
 #define _MASKED_BIT_ENABLE(a) (((a) << 16) | (a))
 #define _MASKED_BIT_DISABLE(a) ((a) << 16)
@@ -529,6 +529,16 @@ enum punit_power_well {
 #define PUNIT_FUSE_BUS2                                0xf6 /* bits 47:40 */
 #define PUNIT_FUSE_BUS1                                0xf5 /* bits 55:48 */
 
+#define PUNIT_GPU_STATUS_REG                   0xdb
+#define PUNIT_GPU_STATUS_MAX_FREQ_SHIFT        16
+#define PUNIT_GPU_STATUS_MAX_FREQ_MASK         0xff
+#define PUNIT_GPU_STATIS_GFX_MIN_FREQ_SHIFT    8
+#define PUNIT_GPU_STATUS_GFX_MIN_FREQ_MASK     0xff
+
+#define PUNIT_GPU_DUTYCYCLE_REG                0xdf
+#define PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT     8
+#define PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK      0xff
+
 #define IOSF_NC_FB_GFX_FREQ_FUSE               0x1c
 #define   FB_GFX_MAX_FREQ_FUSE_SHIFT           3
 #define   FB_GFX_MAX_FREQ_FUSE_MASK            0x000007f8
@@ -761,6 +771,8 @@ enum punit_power_well {
 
 #define _VLV_PCS_DW8_CH0               0x8220
 #define _VLV_PCS_DW8_CH1               0x8420
+#define   CHV_PCS_USEDCLKCHANNEL_OVRRIDE       (1 << 20)
+#define   CHV_PCS_USEDCLKCHANNEL               (1 << 21)
 #define VLV_PCS_DW8(ch) _PORT(ch, _VLV_PCS_DW8_CH0, _VLV_PCS_DW8_CH1)
 
 #define _VLV_PCS01_DW8_CH0             0x0220
@@ -869,6 +881,16 @@ enum punit_power_well {
 #define   DPIO_CHV_PROP_COEFF_SHIFT    0
 #define CHV_PLL_DW6(ch) _PIPE(ch, _CHV_PLL_DW6_CH0, _CHV_PLL_DW6_CH1)
 
+#define _CHV_CMN_DW5_CH0               0x8114
+#define   CHV_BUFRIGHTENA1_DISABLE     (0 << 20)
+#define   CHV_BUFRIGHTENA1_NORMAL      (1 << 20)
+#define   CHV_BUFRIGHTENA1_FORCE       (3 << 20)
+#define   CHV_BUFRIGHTENA1_MASK                (3 << 20)
+#define   CHV_BUFLEFTENA1_DISABLE      (0 << 22)
+#define   CHV_BUFLEFTENA1_NORMAL       (1 << 22)
+#define   CHV_BUFLEFTENA1_FORCE                (3 << 22)
+#define   CHV_BUFLEFTENA1_MASK         (3 << 22)
+
 #define _CHV_CMN_DW13_CH0              0x8134
 #define _CHV_CMN_DW0_CH1               0x8080
 #define   DPIO_CHV_S1_DIV_SHIFT                21
@@ -883,8 +905,21 @@ enum punit_power_well {
 #define _CHV_CMN_DW1_CH1               0x8084
 #define   DPIO_AFC_RECAL               (1 << 14)
 #define   DPIO_DCLKP_EN                        (1 << 13)
+#define   CHV_BUFLEFTENA2_DISABLE      (0 << 17) /* CL2 DW1 only */
+#define   CHV_BUFLEFTENA2_NORMAL       (1 << 17) /* CL2 DW1 only */
+#define   CHV_BUFLEFTENA2_FORCE                (3 << 17) /* CL2 DW1 only */
+#define   CHV_BUFLEFTENA2_MASK         (3 << 17) /* CL2 DW1 only */
+#define   CHV_BUFRIGHTENA2_DISABLE     (0 << 19) /* CL2 DW1 only */
+#define   CHV_BUFRIGHTENA2_NORMAL      (1 << 19) /* CL2 DW1 only */
+#define   CHV_BUFRIGHTENA2_FORCE       (3 << 19) /* CL2 DW1 only */
+#define   CHV_BUFRIGHTENA2_MASK                (3 << 19) /* CL2 DW1 only */
 #define CHV_CMN_DW14(ch) _PIPE(ch, _CHV_CMN_DW14_CH0, _CHV_CMN_DW1_CH1)
 
+#define _CHV_CMN_DW19_CH0              0x814c
+#define _CHV_CMN_DW6_CH1               0x8098
+#define   CHV_CMN_USEDCLKCHANNEL       (1 << 13)
+#define CHV_CMN_DW19(ch) _PIPE(ch, _CHV_CMN_DW19_CH0, _CHV_CMN_DW6_CH1)
+
 #define CHV_CMN_DW30                   0x8178
 #define   DPIO_LRC_BYPASS              (1 << 3)
 
@@ -933,6 +968,7 @@ enum punit_power_well {
 #define   SANDYBRIDGE_FENCE_PITCH_SHIFT        32
 #define   GEN7_FENCE_MAX_PITCH_VAL     0x0800
 
+
 /* control register for cpu gtt access */
 #define TILECTL                                0x101000
 #define   TILECTL_SWZCTL                       (1 << 0)
@@ -1167,6 +1203,8 @@ enum punit_power_well {
 #define VLV_IMR                (VLV_DISPLAY_BASE + 0x20a8)
 #define VLV_ISR                (VLV_DISPLAY_BASE + 0x20ac)
 #define VLV_PCBR       (VLV_DISPLAY_BASE + 0x2120)
+#define VLV_PCBR_ADDR_SHIFT    12
+
 #define   DISPLAY_PLANE_FLIP_PENDING(plane) (1<<(11-(plane))) /* A and B only */
 #define EIR            0x020b0
 #define EMR            0x020b4
@@ -1567,11 +1605,10 @@ enum punit_power_well {
 /*
  * Clock control & power management
  */
-#define DPLL_A_OFFSET 0x6014
-#define DPLL_B_OFFSET 0x6018
-#define CHV_DPLL_C_OFFSET 0x6030
-#define DPLL(pipe) (dev_priv->info.dpll_offsets[pipe] + \
-                   dev_priv->info.display_mmio_offset)
+#define _DPLL_A (dev_priv->info.display_mmio_offset + 0x6014)
+#define _DPLL_B (dev_priv->info.display_mmio_offset + 0x6018)
+#define _CHV_DPLL_C (dev_priv->info.display_mmio_offset + 0x6030)
+#define DPLL(pipe) _PIPE3((pipe), _DPLL_A, _DPLL_B, _CHV_DPLL_C)
 
 #define VGA0   0x6000
 #define VGA1   0x6004
@@ -1659,11 +1696,10 @@ enum punit_power_well {
 #define   SDVO_MULTIPLIER_SHIFT_HIRES          4
 #define   SDVO_MULTIPLIER_SHIFT_VGA            0
 
-#define DPLL_A_MD_OFFSET 0x601c /* 965+ only */
-#define DPLL_B_MD_OFFSET 0x6020 /* 965+ only */
-#define CHV_DPLL_C_MD_OFFSET 0x603c
-#define DPLL_MD(pipe) (dev_priv->info.dpll_md_offsets[pipe] + \
-                      dev_priv->info.display_mmio_offset)
+#define _DPLL_A_MD (dev_priv->info.display_mmio_offset + 0x601c)
+#define _DPLL_B_MD (dev_priv->info.display_mmio_offset + 0x6020)
+#define _CHV_DPLL_C_MD (dev_priv->info.display_mmio_offset + 0x603c)
+#define DPLL_MD(pipe) _PIPE3((pipe), _DPLL_A_MD, _DPLL_B_MD, _CHV_DPLL_C_MD)
 
 /*
  * UDI pixel divider, controlling how many pixels are stuffed into a packet.
@@ -2373,6 +2409,7 @@ enum punit_power_well {
 #define EDP_PSR_BASE(dev)                       (IS_HASWELL(dev) ? 0x64800 : 0x6f800)
 #define EDP_PSR_CTL(dev)                       (EDP_PSR_BASE(dev) + 0)
 #define   EDP_PSR_ENABLE                       (1<<31)
+#define   BDW_PSR_SINGLE_FRAME                 (1<<30)
 #define   EDP_PSR_LINK_DISABLE                 (0<<27)
 #define   EDP_PSR_LINK_STANDBY                 (1<<27)
 #define   EDP_PSR_MIN_LINK_ENTRY_TIME_MASK     (3<<25)
@@ -2530,8 +2567,14 @@ enum punit_power_well {
 #define   PORTC_HOTPLUG_LIVE_STATUS_VLV                (1 << 28)
 #define   PORTB_HOTPLUG_LIVE_STATUS_VLV                (1 << 29)
 #define   PORTD_HOTPLUG_INT_STATUS             (3 << 21)
+#define   PORTD_HOTPLUG_INT_LONG_PULSE         (2 << 21)
+#define   PORTD_HOTPLUG_INT_SHORT_PULSE                (1 << 21)
 #define   PORTC_HOTPLUG_INT_STATUS             (3 << 19)
+#define   PORTC_HOTPLUG_INT_LONG_PULSE         (2 << 19)
+#define   PORTC_HOTPLUG_INT_SHORT_PULSE                (1 << 19)
 #define   PORTB_HOTPLUG_INT_STATUS             (3 << 17)
+#define   PORTB_HOTPLUG_INT_LONG_PULSE         (2 << 17)
+#define   PORTB_HOTPLUG_INT_SHORT_PLUSE                (1 << 17)
 /* CRT/TV common between gen3+ */
 #define   CRT_HOTPLUG_INT_STATUS               (1 << 11)
 #define   TV_HOTPLUG_INT_STATUS                        (1 << 10)
@@ -2585,7 +2628,7 @@ enum punit_power_well {
 
 #define PORT_DFT_I9XX                          0x61150
 #define   DC_BALANCE_RESET                     (1 << 25)
-#define PORT_DFT2_G4X                          0x61154
+#define PORT_DFT2_G4X          (dev_priv->info.display_mmio_offset + 0x61154)
 #define   DC_BALANCE_RESET_VLV                 (1 << 31)
 #define   PIPE_SCRAMBLE_RESET_MASK             (0x3 << 0)
 #define   PIPE_B_SCRAMBLE_RESET                        (1 << 1)
@@ -4627,6 +4670,8 @@ enum punit_power_well {
 #define GEN7_L3CNTLREG1                                0xB01C
 #define  GEN7_WA_FOR_GEN7_L3_CONTROL                   0x3C47FF8C
 #define  GEN7_L3AGDIS                          (1<<19)
+#define GEN7_L3CNTLREG2                                0xB020
+#define GEN7_L3CNTLREG3                                0xB024
 
 #define GEN7_L3_CHICKEN_MODE_REGISTER          0xB030
 #define  GEN7_WA_L3_CHICKEN_MODE                               0x20000000
@@ -4873,8 +4918,7 @@ enum punit_power_well {
 #define _PCH_TRANSA_LINK_M2    0xe0048
 #define _PCH_TRANSA_LINK_N2    0xe004c
 
-/* Per-transcoder DIP controls */
-
+/* Per-transcoder DIP controls (PCH) */
 #define _VIDEO_DIP_CTL_A         0xe0200
 #define _VIDEO_DIP_DATA_A        0xe0208
 #define _VIDEO_DIP_GCP_A         0xe0210
@@ -4887,6 +4931,7 @@ enum punit_power_well {
 #define TVIDEO_DIP_DATA(pipe) _PIPE(pipe, _VIDEO_DIP_DATA_A, _VIDEO_DIP_DATA_B)
 #define TVIDEO_DIP_GCP(pipe) _PIPE(pipe, _VIDEO_DIP_GCP_A, _VIDEO_DIP_GCP_B)
 
+/* Per-transcoder DIP controls (VLV) */
 #define VLV_VIDEO_DIP_CTL_A            (VLV_DISPLAY_BASE + 0x60200)
 #define VLV_VIDEO_DIP_DATA_A           (VLV_DISPLAY_BASE + 0x60208)
 #define VLV_VIDEO_DIP_GDCP_PAYLOAD_A   (VLV_DISPLAY_BASE + 0x60210)
@@ -4895,12 +4940,19 @@ enum punit_power_well {
 #define VLV_VIDEO_DIP_DATA_B           (VLV_DISPLAY_BASE + 0x61174)
 #define VLV_VIDEO_DIP_GDCP_PAYLOAD_B   (VLV_DISPLAY_BASE + 0x61178)
 
+#define CHV_VIDEO_DIP_CTL_C            (VLV_DISPLAY_BASE + 0x611f0)
+#define CHV_VIDEO_DIP_DATA_C           (VLV_DISPLAY_BASE + 0x611f4)
+#define CHV_VIDEO_DIP_GDCP_PAYLOAD_C   (VLV_DISPLAY_BASE + 0x611f8)
+
 #define VLV_TVIDEO_DIP_CTL(pipe) \
-        _PIPE(pipe, VLV_VIDEO_DIP_CTL_A, VLV_VIDEO_DIP_CTL_B)
+       _PIPE3((pipe), VLV_VIDEO_DIP_CTL_A, \
+              VLV_VIDEO_DIP_CTL_B, CHV_VIDEO_DIP_CTL_C)
 #define VLV_TVIDEO_DIP_DATA(pipe) \
-        _PIPE(pipe, VLV_VIDEO_DIP_DATA_A, VLV_VIDEO_DIP_DATA_B)
+       _PIPE3((pipe), VLV_VIDEO_DIP_DATA_A, \
+              VLV_VIDEO_DIP_DATA_B, CHV_VIDEO_DIP_DATA_C)
 #define VLV_TVIDEO_DIP_GCP(pipe) \
-       _PIPE(pipe, VLV_VIDEO_DIP_GDCP_PAYLOAD_A, VLV_VIDEO_DIP_GDCP_PAYLOAD_B)
+       _PIPE3((pipe), VLV_VIDEO_DIP_GDCP_PAYLOAD_A, \
+               VLV_VIDEO_DIP_GDCP_PAYLOAD_B, CHV_VIDEO_DIP_GDCP_PAYLOAD_C)
 
 /* Haswell DIP controls */
 #define HSW_VIDEO_DIP_CTL_A            0x60200
@@ -5771,7 +5823,6 @@ enum punit_power_well {
 #define DDI_BUF_CTL_B                          0x64100
 #define DDI_BUF_CTL(port) _PORT(port, DDI_BUF_CTL_A, DDI_BUF_CTL_B)
 #define  DDI_BUF_CTL_ENABLE                    (1<<31)
-/* Haswell */
 #define  DDI_BUF_EMP_400MV_0DB_HSW             (0<<24)   /* Sel0 */
 #define  DDI_BUF_EMP_400MV_3_5DB_HSW           (1<<24)   /* Sel1 */
 #define  DDI_BUF_EMP_400MV_6DB_HSW             (2<<24)   /* Sel2 */
@@ -5781,16 +5832,6 @@ enum punit_power_well {
 #define  DDI_BUF_EMP_600MV_6DB_HSW             (6<<24)   /* Sel6 */
 #define  DDI_BUF_EMP_800MV_0DB_HSW             (7<<24)   /* Sel7 */
 #define  DDI_BUF_EMP_800MV_3_5DB_HSW           (8<<24)   /* Sel8 */
-/* Broadwell */
-#define  DDI_BUF_EMP_400MV_0DB_BDW             (0<<24)   /* Sel0 */
-#define  DDI_BUF_EMP_400MV_3_5DB_BDW           (1<<24)   /* Sel1 */
-#define  DDI_BUF_EMP_400MV_6DB_BDW             (2<<24)   /* Sel2 */
-#define  DDI_BUF_EMP_600MV_0DB_BDW             (3<<24)   /* Sel3 */
-#define  DDI_BUF_EMP_600MV_3_5DB_BDW           (4<<24)   /* Sel4 */
-#define  DDI_BUF_EMP_600MV_6DB_BDW             (5<<24)   /* Sel5 */
-#define  DDI_BUF_EMP_800MV_0DB_BDW             (6<<24)   /* Sel6 */
-#define  DDI_BUF_EMP_800MV_3_5DB_BDW           (7<<24)   /* Sel7 */
-#define  DDI_BUF_EMP_1200MV_0DB_BDW            (8<<24)   /* Sel8 */
 #define  DDI_BUF_EMP_MASK                      (0xf<<24)
 #define  DDI_BUF_PORT_REVERSAL                 (1<<16)
 #define  DDI_BUF_IS_IDLE                       (1<<7)
@@ -6002,7 +6043,8 @@ enum punit_power_well {
 
 #define _MIPIA_PORT_CTRL                       (VLV_DISPLAY_BASE + 0x61190)
 #define _MIPIB_PORT_CTRL                       (VLV_DISPLAY_BASE + 0x61700)
-#define MIPI_PORT_CTRL(pipe)           _PIPE(pipe, _MIPIA_PORT_CTRL, _MIPIB_PORT_CTRL)
+#define MIPI_PORT_CTRL(tc)             _TRANSCODER(tc, _MIPIA_PORT_CTRL, \
+                                               _MIPIB_PORT_CTRL)
 #define  DPI_ENABLE                                    (1 << 31) /* A + B */
 #define  MIPIA_MIPI4DPHY_DELAY_COUNT_SHIFT             27
 #define  MIPIA_MIPI4DPHY_DELAY_COUNT_MASK              (0xf << 27)
@@ -6044,18 +6086,20 @@ enum punit_power_well {
 
 #define _MIPIA_TEARING_CTRL                    (VLV_DISPLAY_BASE + 0x61194)
 #define _MIPIB_TEARING_CTRL                    (VLV_DISPLAY_BASE + 0x61704)
-#define MIPI_TEARING_CTRL(pipe)                _PIPE(pipe, _MIPIA_TEARING_CTRL, _MIPIB_TEARING_CTRL)
+#define MIPI_TEARING_CTRL(tc)                  _TRANSCODER(tc, \
+                               _MIPIA_TEARING_CTRL, _MIPIB_TEARING_CTRL)
 #define  TEARING_EFFECT_DELAY_SHIFT                    0
 #define  TEARING_EFFECT_DELAY_MASK                     (0xffff << 0)
 
 /* XXX: all bits reserved */
-#define _MIPIA_AUTOPWG                         (VLV_DISPLAY_BASE + 0x611a0)
+#define _MIPIA_AUTOPWG                 (VLV_DISPLAY_BASE + 0x611a0)
 
 /* MIPI DSI Controller and D-PHY registers */
 
-#define _MIPIA_DEVICE_READY                    (VLV_DISPLAY_BASE + 0xb000)
-#define _MIPIB_DEVICE_READY                    (VLV_DISPLAY_BASE + 0xb800)
-#define MIPI_DEVICE_READY(pipe)                _PIPE(pipe, _MIPIA_DEVICE_READY, _MIPIB_DEVICE_READY)
+#define _MIPIA_DEVICE_READY            (dev_priv->mipi_mmio_base + 0xb000)
+#define _MIPIB_DEVICE_READY            (dev_priv->mipi_mmio_base + 0xb800)
+#define MIPI_DEVICE_READY(tc)          _TRANSCODER(tc, _MIPIA_DEVICE_READY, \
+                                               _MIPIB_DEVICE_READY)
 #define  BUS_POSSESSION                                        (1 << 3) /* set to give bus to receiver */
 #define  ULPS_STATE_MASK                               (3 << 1)
 #define  ULPS_STATE_ENTER                              (2 << 1)
@@ -6063,12 +6107,14 @@ enum punit_power_well {
 #define  ULPS_STATE_NORMAL_OPERATION                   (0 << 1)
 #define  DEVICE_READY                                  (1 << 0)
 
-#define _MIPIA_INTR_STAT                       (VLV_DISPLAY_BASE + 0xb004)
-#define _MIPIB_INTR_STAT                       (VLV_DISPLAY_BASE + 0xb804)
-#define MIPI_INTR_STAT(pipe)           _PIPE(pipe, _MIPIA_INTR_STAT, _MIPIB_INTR_STAT)
-#define _MIPIA_INTR_EN                         (VLV_DISPLAY_BASE + 0xb008)
-#define _MIPIB_INTR_EN                         (VLV_DISPLAY_BASE + 0xb808)
-#define MIPI_INTR_EN(pipe)             _PIPE(pipe, _MIPIA_INTR_EN, _MIPIB_INTR_EN)
+#define _MIPIA_INTR_STAT               (dev_priv->mipi_mmio_base + 0xb004)
+#define _MIPIB_INTR_STAT               (dev_priv->mipi_mmio_base + 0xb804)
+#define MIPI_INTR_STAT(tc)             _TRANSCODER(tc, _MIPIA_INTR_STAT, \
+                                       _MIPIB_INTR_STAT)
+#define _MIPIA_INTR_EN                 (dev_priv->mipi_mmio_base + 0xb008)
+#define _MIPIB_INTR_EN                 (dev_priv->mipi_mmio_base + 0xb808)
+#define MIPI_INTR_EN(tc)               _TRANSCODER(tc, _MIPIA_INTR_EN, \
+                                       _MIPIB_INTR_EN)
 #define  TEARING_EFFECT                                        (1 << 31)
 #define  SPL_PKT_SENT_INTERRUPT                                (1 << 30)
 #define  GEN_READ_DATA_AVAIL                           (1 << 29)
@@ -6102,9 +6148,10 @@ enum punit_power_well {
 #define  RXSOT_SYNC_ERROR                              (1 << 1)
 #define  RXSOT_ERROR                                   (1 << 0)
 
-#define _MIPIA_DSI_FUNC_PRG                    (VLV_DISPLAY_BASE + 0xb00c)
-#define _MIPIB_DSI_FUNC_PRG                    (VLV_DISPLAY_BASE + 0xb80c)
-#define MIPI_DSI_FUNC_PRG(pipe)                _PIPE(pipe, _MIPIA_DSI_FUNC_PRG, _MIPIB_DSI_FUNC_PRG)
+#define _MIPIA_DSI_FUNC_PRG            (dev_priv->mipi_mmio_base + 0xb00c)
+#define _MIPIB_DSI_FUNC_PRG            (dev_priv->mipi_mmio_base + 0xb80c)
+#define MIPI_DSI_FUNC_PRG(tc)          _TRANSCODER(tc, _MIPIA_DSI_FUNC_PRG, \
+                                               _MIPIB_DSI_FUNC_PRG)
 #define  CMD_MODE_DATA_WIDTH_MASK                      (7 << 13)
 #define  CMD_MODE_NOT_SUPPORTED                                (0 << 13)
 #define  CMD_MODE_DATA_WIDTH_16_BIT                    (1 << 13)
@@ -6125,78 +6172,94 @@ enum punit_power_well {
 #define  DATA_LANES_PRG_REG_SHIFT                      0
 #define  DATA_LANES_PRG_REG_MASK                       (7 << 0)
 
-#define _MIPIA_HS_TX_TIMEOUT                   (VLV_DISPLAY_BASE + 0xb010)
-#define _MIPIB_HS_TX_TIMEOUT                   (VLV_DISPLAY_BASE + 0xb810)
-#define MIPI_HS_TX_TIMEOUT(pipe)       _PIPE(pipe, _MIPIA_HS_TX_TIMEOUT, _MIPIB_HS_TX_TIMEOUT)
+#define _MIPIA_HS_TX_TIMEOUT           (dev_priv->mipi_mmio_base + 0xb010)
+#define _MIPIB_HS_TX_TIMEOUT           (dev_priv->mipi_mmio_base + 0xb810)
+#define MIPI_HS_TX_TIMEOUT(tc) _TRANSCODER(tc, _MIPIA_HS_TX_TIMEOUT, \
+                                       _MIPIB_HS_TX_TIMEOUT)
 #define  HIGH_SPEED_TX_TIMEOUT_COUNTER_MASK            0xffffff
 
-#define _MIPIA_LP_RX_TIMEOUT                   (VLV_DISPLAY_BASE + 0xb014)
-#define _MIPIB_LP_RX_TIMEOUT                   (VLV_DISPLAY_BASE + 0xb814)
-#define MIPI_LP_RX_TIMEOUT(pipe)       _PIPE(pipe, _MIPIA_LP_RX_TIMEOUT, _MIPIB_LP_RX_TIMEOUT)
+#define _MIPIA_LP_RX_TIMEOUT           (dev_priv->mipi_mmio_base + 0xb014)
+#define _MIPIB_LP_RX_TIMEOUT           (dev_priv->mipi_mmio_base + 0xb814)
+#define MIPI_LP_RX_TIMEOUT(tc) _TRANSCODER(tc, _MIPIA_LP_RX_TIMEOUT, \
+                                       _MIPIB_LP_RX_TIMEOUT)
 #define  LOW_POWER_RX_TIMEOUT_COUNTER_MASK             0xffffff
 
-#define _MIPIA_TURN_AROUND_TIMEOUT             (VLV_DISPLAY_BASE + 0xb018)
-#define _MIPIB_TURN_AROUND_TIMEOUT             (VLV_DISPLAY_BASE + 0xb818)
-#define MIPI_TURN_AROUND_TIMEOUT(pipe) _PIPE(pipe, _MIPIA_TURN_AROUND_TIMEOUT, _MIPIB_TURN_AROUND_TIMEOUT)
+#define _MIPIA_TURN_AROUND_TIMEOUT     (dev_priv->mipi_mmio_base + 0xb018)
+#define _MIPIB_TURN_AROUND_TIMEOUT     (dev_priv->mipi_mmio_base + 0xb818)
+#define MIPI_TURN_AROUND_TIMEOUT(tc)   _TRANSCODER(tc, \
+                       _MIPIA_TURN_AROUND_TIMEOUT, _MIPIB_TURN_AROUND_TIMEOUT)
 #define  TURN_AROUND_TIMEOUT_MASK                      0x3f
 
-#define _MIPIA_DEVICE_RESET_TIMER              (VLV_DISPLAY_BASE + 0xb01c)
-#define _MIPIB_DEVICE_RESET_TIMER              (VLV_DISPLAY_BASE + 0xb81c)
-#define MIPI_DEVICE_RESET_TIMER(pipe)  _PIPE(pipe, _MIPIA_DEVICE_RESET_TIMER, _MIPIB_DEVICE_RESET_TIMER)
+#define _MIPIA_DEVICE_RESET_TIMER      (dev_priv->mipi_mmio_base + 0xb01c)
+#define _MIPIB_DEVICE_RESET_TIMER      (dev_priv->mipi_mmio_base + 0xb81c)
+#define MIPI_DEVICE_RESET_TIMER(tc)    _TRANSCODER(tc, \
+                       _MIPIA_DEVICE_RESET_TIMER, _MIPIB_DEVICE_RESET_TIMER)
 #define  DEVICE_RESET_TIMER_MASK                       0xffff
 
-#define _MIPIA_DPI_RESOLUTION                  (VLV_DISPLAY_BASE + 0xb020)
-#define _MIPIB_DPI_RESOLUTION                  (VLV_DISPLAY_BASE + 0xb820)
-#define MIPI_DPI_RESOLUTION(pipe)      _PIPE(pipe, _MIPIA_DPI_RESOLUTION, _MIPIB_DPI_RESOLUTION)
+#define _MIPIA_DPI_RESOLUTION          (dev_priv->mipi_mmio_base + 0xb020)
+#define _MIPIB_DPI_RESOLUTION          (dev_priv->mipi_mmio_base + 0xb820)
+#define MIPI_DPI_RESOLUTION(tc)        _TRANSCODER(tc, _MIPIA_DPI_RESOLUTION, \
+                                       _MIPIB_DPI_RESOLUTION)
 #define  VERTICAL_ADDRESS_SHIFT                                16
 #define  VERTICAL_ADDRESS_MASK                         (0xffff << 16)
 #define  HORIZONTAL_ADDRESS_SHIFT                      0
 #define  HORIZONTAL_ADDRESS_MASK                       0xffff
 
-#define _MIPIA_DBI_FIFO_THROTTLE               (VLV_DISPLAY_BASE + 0xb024)
-#define _MIPIB_DBI_FIFO_THROTTLE               (VLV_DISPLAY_BASE + 0xb824)
-#define MIPI_DBI_FIFO_THROTTLE(pipe)   _PIPE(pipe, _MIPIA_DBI_FIFO_THROTTLE, _MIPIB_DBI_FIFO_THROTTLE)
+#define _MIPIA_DBI_FIFO_THROTTLE       (dev_priv->mipi_mmio_base + 0xb024)
+#define _MIPIB_DBI_FIFO_THROTTLE       (dev_priv->mipi_mmio_base + 0xb824)
+#define MIPI_DBI_FIFO_THROTTLE(tc)     _TRANSCODER(tc, \
+                       _MIPIA_DBI_FIFO_THROTTLE, _MIPIB_DBI_FIFO_THROTTLE)
 #define  DBI_FIFO_EMPTY_HALF                           (0 << 0)
 #define  DBI_FIFO_EMPTY_QUARTER                                (1 << 0)
 #define  DBI_FIFO_EMPTY_7_LOCATIONS                    (2 << 0)
 
 /* regs below are bits 15:0 */
-#define _MIPIA_HSYNC_PADDING_COUNT             (VLV_DISPLAY_BASE + 0xb028)
-#define _MIPIB_HSYNC_PADDING_COUNT             (VLV_DISPLAY_BASE + 0xb828)
-#define MIPI_HSYNC_PADDING_COUNT(pipe) _PIPE(pipe, _MIPIA_HSYNC_PADDING_COUNT, _MIPIB_HSYNC_PADDING_COUNT)
-
-#define _MIPIA_HBP_COUNT                       (VLV_DISPLAY_BASE + 0xb02c)
-#define _MIPIB_HBP_COUNT                       (VLV_DISPLAY_BASE + 0xb82c)
-#define MIPI_HBP_COUNT(pipe)           _PIPE(pipe, _MIPIA_HBP_COUNT, _MIPIB_HBP_COUNT)
-
-#define _MIPIA_HFP_COUNT                       (VLV_DISPLAY_BASE + 0xb030)
-#define _MIPIB_HFP_COUNT                       (VLV_DISPLAY_BASE + 0xb830)
-#define MIPI_HFP_COUNT(pipe)           _PIPE(pipe, _MIPIA_HFP_COUNT, _MIPIB_HFP_COUNT)
-
-#define _MIPIA_HACTIVE_AREA_COUNT              (VLV_DISPLAY_BASE + 0xb034)
-#define _MIPIB_HACTIVE_AREA_COUNT              (VLV_DISPLAY_BASE + 0xb834)
-#define MIPI_HACTIVE_AREA_COUNT(pipe)  _PIPE(pipe, _MIPIA_HACTIVE_AREA_COUNT, _MIPIB_HACTIVE_AREA_COUNT)
-
-#define _MIPIA_VSYNC_PADDING_COUNT             (VLV_DISPLAY_BASE + 0xb038)
-#define _MIPIB_VSYNC_PADDING_COUNT             (VLV_DISPLAY_BASE + 0xb838)
-#define MIPI_VSYNC_PADDING_COUNT(pipe) _PIPE(pipe, _MIPIA_VSYNC_PADDING_COUNT, _MIPIB_VSYNC_PADDING_COUNT)
-
-#define _MIPIA_VBP_COUNT                       (VLV_DISPLAY_BASE + 0xb03c)
-#define _MIPIB_VBP_COUNT                       (VLV_DISPLAY_BASE + 0xb83c)
-#define MIPI_VBP_COUNT(pipe)           _PIPE(pipe, _MIPIA_VBP_COUNT, _MIPIB_VBP_COUNT)
+#define _MIPIA_HSYNC_PADDING_COUNT     (dev_priv->mipi_mmio_base + 0xb028)
+#define _MIPIB_HSYNC_PADDING_COUNT     (dev_priv->mipi_mmio_base + 0xb828)
+#define MIPI_HSYNC_PADDING_COUNT(tc)   _TRANSCODER(tc, \
+                       _MIPIA_HSYNC_PADDING_COUNT, _MIPIB_HSYNC_PADDING_COUNT)
+
+#define _MIPIA_HBP_COUNT               (dev_priv->mipi_mmio_base + 0xb02c)
+#define _MIPIB_HBP_COUNT               (dev_priv->mipi_mmio_base + 0xb82c)
+#define MIPI_HBP_COUNT(tc)             _TRANSCODER(tc, _MIPIA_HBP_COUNT, \
+                                       _MIPIB_HBP_COUNT)
+
+#define _MIPIA_HFP_COUNT               (dev_priv->mipi_mmio_base + 0xb030)
+#define _MIPIB_HFP_COUNT               (dev_priv->mipi_mmio_base + 0xb830)
+#define MIPI_HFP_COUNT(tc)             _TRANSCODER(tc, _MIPIA_HFP_COUNT, \
+                                       _MIPIB_HFP_COUNT)
+
+#define _MIPIA_HACTIVE_AREA_COUNT      (dev_priv->mipi_mmio_base + 0xb034)
+#define _MIPIB_HACTIVE_AREA_COUNT      (dev_priv->mipi_mmio_base + 0xb834)
+#define MIPI_HACTIVE_AREA_COUNT(tc)    _TRANSCODER(tc, \
+                       _MIPIA_HACTIVE_AREA_COUNT, _MIPIB_HACTIVE_AREA_COUNT)
+
+#define _MIPIA_VSYNC_PADDING_COUNT     (dev_priv->mipi_mmio_base + 0xb038)
+#define _MIPIB_VSYNC_PADDING_COUNT     (dev_priv->mipi_mmio_base + 0xb838)
+#define MIPI_VSYNC_PADDING_COUNT(tc)   _TRANSCODER(tc, \
+                       _MIPIA_VSYNC_PADDING_COUNT, _MIPIB_VSYNC_PADDING_COUNT)
+
+#define _MIPIA_VBP_COUNT               (dev_priv->mipi_mmio_base + 0xb03c)
+#define _MIPIB_VBP_COUNT               (dev_priv->mipi_mmio_base + 0xb83c)
+#define MIPI_VBP_COUNT(tc)             _TRANSCODER(tc, _MIPIA_VBP_COUNT, \
+                                       _MIPIB_VBP_COUNT)
+
+#define _MIPIA_VFP_COUNT               (dev_priv->mipi_mmio_base + 0xb040)
+#define _MIPIB_VFP_COUNT               (dev_priv->mipi_mmio_base + 0xb840)
+#define MIPI_VFP_COUNT(tc)             _TRANSCODER(tc, _MIPIA_VFP_COUNT, \
+                                       _MIPIB_VFP_COUNT)
+
+#define _MIPIA_HIGH_LOW_SWITCH_COUNT   (dev_priv->mipi_mmio_base + 0xb044)
+#define _MIPIB_HIGH_LOW_SWITCH_COUNT   (dev_priv->mipi_mmio_base + 0xb844)
+#define MIPI_HIGH_LOW_SWITCH_COUNT(tc) _TRANSCODER(tc, \
+               _MIPIA_HIGH_LOW_SWITCH_COUNT, _MIPIB_HIGH_LOW_SWITCH_COUNT)
 
-#define _MIPIA_VFP_COUNT                       (VLV_DISPLAY_BASE + 0xb040)
-#define _MIPIB_VFP_COUNT                       (VLV_DISPLAY_BASE + 0xb840)
-#define MIPI_VFP_COUNT(pipe)           _PIPE(pipe, _MIPIA_VFP_COUNT, _MIPIB_VFP_COUNT)
-
-#define _MIPIA_HIGH_LOW_SWITCH_COUNT           (VLV_DISPLAY_BASE + 0xb044)
-#define _MIPIB_HIGH_LOW_SWITCH_COUNT           (VLV_DISPLAY_BASE + 0xb844)
-#define MIPI_HIGH_LOW_SWITCH_COUNT(pipe)       _PIPE(pipe, _MIPIA_HIGH_LOW_SWITCH_COUNT, _MIPIB_HIGH_LOW_SWITCH_COUNT)
 /* regs above are bits 15:0 */
 
-#define _MIPIA_DPI_CONTROL                     (VLV_DISPLAY_BASE + 0xb048)
-#define _MIPIB_DPI_CONTROL                     (VLV_DISPLAY_BASE + 0xb848)
-#define MIPI_DPI_CONTROL(pipe)         _PIPE(pipe, _MIPIA_DPI_CONTROL, _MIPIB_DPI_CONTROL)
+#define _MIPIA_DPI_CONTROL             (dev_priv->mipi_mmio_base + 0xb048)
+#define _MIPIB_DPI_CONTROL             (dev_priv->mipi_mmio_base + 0xb848)
+#define MIPI_DPI_CONTROL(tc)           _TRANSCODER(tc, _MIPIA_DPI_CONTROL, \
+                                       _MIPIB_DPI_CONTROL)
 #define  DPI_LP_MODE                                   (1 << 6)
 #define  BACKLIGHT_OFF                                 (1 << 5)
 #define  BACKLIGHT_ON                                  (1 << 4)
@@ -6205,27 +6268,31 @@ enum punit_power_well {
 #define  TURN_ON                                       (1 << 1)
 #define  SHUTDOWN                                      (1 << 0)
 
-#define _MIPIA_DPI_DATA                                (VLV_DISPLAY_BASE + 0xb04c)
-#define _MIPIB_DPI_DATA                                (VLV_DISPLAY_BASE + 0xb84c)
-#define MIPI_DPI_DATA(pipe)            _PIPE(pipe, _MIPIA_DPI_DATA, _MIPIB_DPI_DATA)
+#define _MIPIA_DPI_DATA                        (dev_priv->mipi_mmio_base + 0xb04c)
+#define _MIPIB_DPI_DATA                        (dev_priv->mipi_mmio_base + 0xb84c)
+#define MIPI_DPI_DATA(tc)              _TRANSCODER(tc, _MIPIA_DPI_DATA, \
+                                       _MIPIB_DPI_DATA)
 #define  COMMAND_BYTE_SHIFT                            0
 #define  COMMAND_BYTE_MASK                             (0x3f << 0)
 
-#define _MIPIA_INIT_COUNT                      (VLV_DISPLAY_BASE + 0xb050)
-#define _MIPIB_INIT_COUNT                      (VLV_DISPLAY_BASE + 0xb850)
-#define MIPI_INIT_COUNT(pipe)          _PIPE(pipe, _MIPIA_INIT_COUNT, _MIPIB_INIT_COUNT)
+#define _MIPIA_INIT_COUNT              (dev_priv->mipi_mmio_base + 0xb050)
+#define _MIPIB_INIT_COUNT              (dev_priv->mipi_mmio_base + 0xb850)
+#define MIPI_INIT_COUNT(tc)            _TRANSCODER(tc, _MIPIA_INIT_COUNT, \
+                                       _MIPIB_INIT_COUNT)
 #define  MASTER_INIT_TIMER_SHIFT                       0
 #define  MASTER_INIT_TIMER_MASK                                (0xffff << 0)
 
-#define _MIPIA_MAX_RETURN_PKT_SIZE             (VLV_DISPLAY_BASE + 0xb054)
-#define _MIPIB_MAX_RETURN_PKT_SIZE             (VLV_DISPLAY_BASE + 0xb854)
-#define MIPI_MAX_RETURN_PKT_SIZE(pipe) _PIPE(pipe, _MIPIA_MAX_RETURN_PKT_SIZE, _MIPIB_MAX_RETURN_PKT_SIZE)
+#define _MIPIA_MAX_RETURN_PKT_SIZE     (dev_priv->mipi_mmio_base + 0xb054)
+#define _MIPIB_MAX_RETURN_PKT_SIZE     (dev_priv->mipi_mmio_base + 0xb854)
+#define MIPI_MAX_RETURN_PKT_SIZE(tc)   _TRANSCODER(tc, \
+                       _MIPIA_MAX_RETURN_PKT_SIZE, _MIPIB_MAX_RETURN_PKT_SIZE)
 #define  MAX_RETURN_PKT_SIZE_SHIFT                     0
 #define  MAX_RETURN_PKT_SIZE_MASK                      (0x3ff << 0)
 
-#define _MIPIA_VIDEO_MODE_FORMAT               (VLV_DISPLAY_BASE + 0xb058)
-#define _MIPIB_VIDEO_MODE_FORMAT               (VLV_DISPLAY_BASE + 0xb858)
-#define MIPI_VIDEO_MODE_FORMAT(pipe)   _PIPE(pipe, _MIPIA_VIDEO_MODE_FORMAT, _MIPIB_VIDEO_MODE_FORMAT)
+#define _MIPIA_VIDEO_MODE_FORMAT       (dev_priv->mipi_mmio_base + 0xb058)
+#define _MIPIB_VIDEO_MODE_FORMAT       (dev_priv->mipi_mmio_base + 0xb858)
+#define MIPI_VIDEO_MODE_FORMAT(tc)     _TRANSCODER(tc, \
+                       _MIPIA_VIDEO_MODE_FORMAT, _MIPIB_VIDEO_MODE_FORMAT)
 #define  RANDOM_DPI_DISPLAY_RESOLUTION                 (1 << 4)
 #define  DISABLE_VIDEO_BTA                             (1 << 3)
 #define  IP_TG_CONFIG                                  (1 << 2)
@@ -6233,9 +6300,10 @@ enum punit_power_well {
 #define  VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS         (2 << 0)
 #define  VIDEO_MODE_BURST                              (3 << 0)
 
-#define _MIPIA_EOT_DISABLE                     (VLV_DISPLAY_BASE + 0xb05c)
-#define _MIPIB_EOT_DISABLE                     (VLV_DISPLAY_BASE + 0xb85c)
-#define MIPI_EOT_DISABLE(pipe)         _PIPE(pipe, _MIPIA_EOT_DISABLE, _MIPIB_EOT_DISABLE)
+#define _MIPIA_EOT_DISABLE             (dev_priv->mipi_mmio_base + 0xb05c)
+#define _MIPIB_EOT_DISABLE             (dev_priv->mipi_mmio_base + 0xb85c)
+#define MIPI_EOT_DISABLE(tc)           _TRANSCODER(tc, _MIPIA_EOT_DISABLE, \
+                                       _MIPIB_EOT_DISABLE)
 #define  LP_RX_TIMEOUT_ERROR_RECOVERY_DISABLE          (1 << 7)
 #define  HS_RX_TIMEOUT_ERROR_RECOVERY_DISABLE          (1 << 6)
 #define  LOW_CONTENTION_RECOVERY_DISABLE               (1 << 5)
@@ -6245,28 +6313,33 @@ enum punit_power_well {
 #define  CLOCKSTOP                                     (1 << 1)
 #define  EOT_DISABLE                                   (1 << 0)
 
-#define _MIPIA_LP_BYTECLK                      (VLV_DISPLAY_BASE + 0xb060)
-#define _MIPIB_LP_BYTECLK                      (VLV_DISPLAY_BASE + 0xb860)
-#define MIPI_LP_BYTECLK(pipe)          _PIPE(pipe, _MIPIA_LP_BYTECLK, _MIPIB_LP_BYTECLK)
+#define _MIPIA_LP_BYTECLK              (dev_priv->mipi_mmio_base + 0xb060)
+#define _MIPIB_LP_BYTECLK              (dev_priv->mipi_mmio_base + 0xb860)
+#define MIPI_LP_BYTECLK(tc)            _TRANSCODER(tc, _MIPIA_LP_BYTECLK, \
+                                       _MIPIB_LP_BYTECLK)
 #define  LP_BYTECLK_SHIFT                              0
 #define  LP_BYTECLK_MASK                               (0xffff << 0)
 
 /* bits 31:0 */
-#define _MIPIA_LP_GEN_DATA                     (VLV_DISPLAY_BASE + 0xb064)
-#define _MIPIB_LP_GEN_DATA                     (VLV_DISPLAY_BASE + 0xb864)
-#define MIPI_LP_GEN_DATA(pipe)         _PIPE(pipe, _MIPIA_LP_GEN_DATA, _MIPIB_LP_GEN_DATA)
+#define _MIPIA_LP_GEN_DATA             (dev_priv->mipi_mmio_base + 0xb064)
+#define _MIPIB_LP_GEN_DATA             (dev_priv->mipi_mmio_base + 0xb864)
+#define MIPI_LP_GEN_DATA(tc)           _TRANSCODER(tc, _MIPIA_LP_GEN_DATA, \
+                                       _MIPIB_LP_GEN_DATA)
 
 /* bits 31:0 */
-#define _MIPIA_HS_GEN_DATA                     (VLV_DISPLAY_BASE + 0xb068)
-#define _MIPIB_HS_GEN_DATA                     (VLV_DISPLAY_BASE + 0xb868)
-#define MIPI_HS_GEN_DATA(pipe)         _PIPE(pipe, _MIPIA_HS_GEN_DATA, _MIPIB_HS_GEN_DATA)
-
-#define _MIPIA_LP_GEN_CTRL                     (VLV_DISPLAY_BASE + 0xb06c)
-#define _MIPIB_LP_GEN_CTRL                     (VLV_DISPLAY_BASE + 0xb86c)
-#define MIPI_LP_GEN_CTRL(pipe)         _PIPE(pipe, _MIPIA_LP_GEN_CTRL, _MIPIB_LP_GEN_CTRL)
-#define _MIPIA_HS_GEN_CTRL                     (VLV_DISPLAY_BASE + 0xb070)
-#define _MIPIB_HS_GEN_CTRL                     (VLV_DISPLAY_BASE + 0xb870)
-#define MIPI_HS_GEN_CTRL(pipe)         _PIPE(pipe, _MIPIA_HS_GEN_CTRL, _MIPIB_HS_GEN_CTRL)
+#define _MIPIA_HS_GEN_DATA             (dev_priv->mipi_mmio_base + 0xb068)
+#define _MIPIB_HS_GEN_DATA             (dev_priv->mipi_mmio_base + 0xb868)
+#define MIPI_HS_GEN_DATA(tc)           _TRANSCODER(tc, _MIPIA_HS_GEN_DATA, \
+                                       _MIPIB_HS_GEN_DATA)
+
+#define _MIPIA_LP_GEN_CTRL             (dev_priv->mipi_mmio_base + 0xb06c)
+#define _MIPIB_LP_GEN_CTRL             (dev_priv->mipi_mmio_base + 0xb86c)
+#define MIPI_LP_GEN_CTRL(tc)           _TRANSCODER(tc, _MIPIA_LP_GEN_CTRL, \
+                                       _MIPIB_LP_GEN_CTRL)
+#define _MIPIA_HS_GEN_CTRL             (dev_priv->mipi_mmio_base + 0xb070)
+#define _MIPIB_HS_GEN_CTRL             (dev_priv->mipi_mmio_base + 0xb870)
+#define MIPI_HS_GEN_CTRL(tc)           _TRANSCODER(tc, _MIPIA_HS_GEN_CTRL, \
+                                       _MIPIB_HS_GEN_CTRL)
 #define  LONG_PACKET_WORD_COUNT_SHIFT                  8
 #define  LONG_PACKET_WORD_COUNT_MASK                   (0xffff << 8)
 #define  SHORT_PACKET_PARAM_SHIFT                      8
@@ -6277,9 +6350,10 @@ enum punit_power_well {
 #define  DATA_TYPE_MASK                                        (3f << 0)
 /* data type values, see include/video/mipi_display.h */
 
-#define _MIPIA_GEN_FIFO_STAT                   (VLV_DISPLAY_BASE + 0xb074)
-#define _MIPIB_GEN_FIFO_STAT                   (VLV_DISPLAY_BASE + 0xb874)
-#define MIPI_GEN_FIFO_STAT(pipe)       _PIPE(pipe, _MIPIA_GEN_FIFO_STAT, _MIPIB_GEN_FIFO_STAT)
+#define _MIPIA_GEN_FIFO_STAT           (dev_priv->mipi_mmio_base + 0xb074)
+#define _MIPIB_GEN_FIFO_STAT           (dev_priv->mipi_mmio_base + 0xb874)
+#define MIPI_GEN_FIFO_STAT(tc) _TRANSCODER(tc, _MIPIA_GEN_FIFO_STAT, \
+                                       _MIPIB_GEN_FIFO_STAT)
 #define  DPI_FIFO_EMPTY                                        (1 << 28)
 #define  DBI_FIFO_EMPTY                                        (1 << 27)
 #define  LP_CTRL_FIFO_EMPTY                            (1 << 26)
@@ -6295,16 +6369,18 @@ enum punit_power_well {
 #define  HS_DATA_FIFO_HALF_EMPTY                       (1 << 1)
 #define  HS_DATA_FIFO_FULL                             (1 << 0)
 
-#define _MIPIA_HS_LS_DBI_ENABLE                        (VLV_DISPLAY_BASE + 0xb078)
-#define _MIPIB_HS_LS_DBI_ENABLE                        (VLV_DISPLAY_BASE + 0xb878)
-#define MIPI_HS_LP_DBI_ENABLE(pipe)    _PIPE(pipe, _MIPIA_HS_LS_DBI_ENABLE, _MIPIB_HS_LS_DBI_ENABLE)
+#define _MIPIA_HS_LS_DBI_ENABLE                (dev_priv->mipi_mmio_base + 0xb078)
+#define _MIPIB_HS_LS_DBI_ENABLE                (dev_priv->mipi_mmio_base + 0xb878)
+#define MIPI_HS_LP_DBI_ENABLE(tc)      _TRANSCODER(tc, \
+                       _MIPIA_HS_LS_DBI_ENABLE, _MIPIB_HS_LS_DBI_ENABLE)
 #define  DBI_HS_LP_MODE_MASK                           (1 << 0)
 #define  DBI_LP_MODE                                   (1 << 0)
 #define  DBI_HS_MODE                                   (0 << 0)
 
-#define _MIPIA_DPHY_PARAM                      (VLV_DISPLAY_BASE + 0xb080)
-#define _MIPIB_DPHY_PARAM                      (VLV_DISPLAY_BASE + 0xb880)
-#define MIPI_DPHY_PARAM(pipe)          _PIPE(pipe, _MIPIA_DPHY_PARAM, _MIPIB_DPHY_PARAM)
+#define _MIPIA_DPHY_PARAM              (dev_priv->mipi_mmio_base + 0xb080)
+#define _MIPIB_DPHY_PARAM              (dev_priv->mipi_mmio_base + 0xb880)
+#define MIPI_DPHY_PARAM(tc)            _TRANSCODER(tc, _MIPIA_DPHY_PARAM, \
+                                       _MIPIB_DPHY_PARAM)
 #define  EXIT_ZERO_COUNT_SHIFT                         24
 #define  EXIT_ZERO_COUNT_MASK                          (0x3f << 24)
 #define  TRAIL_COUNT_SHIFT                             16
@@ -6315,34 +6391,41 @@ enum punit_power_well {
 #define  PREPARE_COUNT_MASK                            (0x3f << 0)
 
 /* bits 31:0 */
-#define _MIPIA_DBI_BW_CTRL                     (VLV_DISPLAY_BASE + 0xb084)
-#define _MIPIB_DBI_BW_CTRL                     (VLV_DISPLAY_BASE + 0xb884)
-#define MIPI_DBI_BW_CTRL(pipe)         _PIPE(pipe, _MIPIA_DBI_BW_CTRL, _MIPIB_DBI_BW_CTRL)
-
-#define _MIPIA_CLK_LANE_SWITCH_TIME_CNT                (VLV_DISPLAY_BASE + 0xb088)
-#define _MIPIB_CLK_LANE_SWITCH_TIME_CNT                (VLV_DISPLAY_BASE + 0xb888)
-#define MIPI_CLK_LANE_SWITCH_TIME_CNT(pipe)    _PIPE(pipe, _MIPIA_CLK_LANE_SWITCH_TIME_CNT, _MIPIB_CLK_LANE_SWITCH_TIME_CNT)
+#define _MIPIA_DBI_BW_CTRL             (dev_priv->mipi_mmio_base + 0xb084)
+#define _MIPIB_DBI_BW_CTRL             (dev_priv->mipi_mmio_base + 0xb884)
+#define MIPI_DBI_BW_CTRL(tc)           _TRANSCODER(tc, _MIPIA_DBI_BW_CTRL, \
+                                       _MIPIB_DBI_BW_CTRL)
+
+#define _MIPIA_CLK_LANE_SWITCH_TIME_CNT                (dev_priv->mipi_mmio_base \
+                                                       + 0xb088)
+#define _MIPIB_CLK_LANE_SWITCH_TIME_CNT                (dev_priv->mipi_mmio_base \
+                                                       + 0xb888)
+#define MIPI_CLK_LANE_SWITCH_TIME_CNT(tc)      _TRANSCODER(tc, \
+       _MIPIA_CLK_LANE_SWITCH_TIME_CNT, _MIPIB_CLK_LANE_SWITCH_TIME_CNT)
 #define  LP_HS_SSW_CNT_SHIFT                           16
 #define  LP_HS_SSW_CNT_MASK                            (0xffff << 16)
 #define  HS_LP_PWR_SW_CNT_SHIFT                                0
 #define  HS_LP_PWR_SW_CNT_MASK                         (0xffff << 0)
 
-#define _MIPIA_STOP_STATE_STALL                        (VLV_DISPLAY_BASE + 0xb08c)
-#define _MIPIB_STOP_STATE_STALL                        (VLV_DISPLAY_BASE + 0xb88c)
-#define MIPI_STOP_STATE_STALL(pipe)    _PIPE(pipe, _MIPIA_STOP_STATE_STALL, _MIPIB_STOP_STATE_STALL)
+#define _MIPIA_STOP_STATE_STALL                (dev_priv->mipi_mmio_base + 0xb08c)
+#define _MIPIB_STOP_STATE_STALL                (dev_priv->mipi_mmio_base + 0xb88c)
+#define MIPI_STOP_STATE_STALL(tc)      _TRANSCODER(tc, \
+                       _MIPIA_STOP_STATE_STALL, _MIPIB_STOP_STATE_STALL)
 #define  STOP_STATE_STALL_COUNTER_SHIFT                        0
 #define  STOP_STATE_STALL_COUNTER_MASK                 (0xff << 0)
 
-#define _MIPIA_INTR_STAT_REG_1                 (VLV_DISPLAY_BASE + 0xb090)
-#define _MIPIB_INTR_STAT_REG_1                 (VLV_DISPLAY_BASE + 0xb890)
-#define MIPI_INTR_STAT_REG_1(pipe)     _PIPE(pipe, _MIPIA_INTR_STAT_REG_1, _MIPIB_INTR_STAT_REG_1)
-#define _MIPIA_INTR_EN_REG_1                   (VLV_DISPLAY_BASE + 0xb094)
-#define _MIPIB_INTR_EN_REG_1                   (VLV_DISPLAY_BASE + 0xb894)
-#define MIPI_INTR_EN_REG_1(pipe)       _PIPE(pipe, _MIPIA_INTR_EN_REG_1, _MIPIB_INTR_EN_REG_1)
+#define _MIPIA_INTR_STAT_REG_1         (dev_priv->mipi_mmio_base + 0xb090)
+#define _MIPIB_INTR_STAT_REG_1         (dev_priv->mipi_mmio_base + 0xb890)
+#define MIPI_INTR_STAT_REG_1(tc)       _TRANSCODER(tc, \
+                               _MIPIA_INTR_STAT_REG_1, _MIPIB_INTR_STAT_REG_1)
+#define _MIPIA_INTR_EN_REG_1           (dev_priv->mipi_mmio_base + 0xb094)
+#define _MIPIB_INTR_EN_REG_1           (dev_priv->mipi_mmio_base + 0xb894)
+#define MIPI_INTR_EN_REG_1(tc) _TRANSCODER(tc, _MIPIA_INTR_EN_REG_1, \
+                                       _MIPIB_INTR_EN_REG_1)
 #define  RX_CONTENTION_DETECTED                                (1 << 0)
 
 /* XXX: only pipe A ?!? */
-#define MIPIA_DBI_TYPEC_CTRL                   (VLV_DISPLAY_BASE + 0xb100)
+#define MIPIA_DBI_TYPEC_CTRL           (dev_priv->mipi_mmio_base + 0xb100)
 #define  DBI_TYPEC_ENABLE                              (1 << 31)
 #define  DBI_TYPEC_WIP                                 (1 << 30)
 #define  DBI_TYPEC_OPTION_SHIFT                                28
@@ -6356,9 +6439,10 @@ enum punit_power_well {
 
 /* MIPI adapter registers */
 
-#define _MIPIA_CTRL                            (VLV_DISPLAY_BASE + 0xb104)
-#define _MIPIB_CTRL                            (VLV_DISPLAY_BASE + 0xb904)
-#define MIPI_CTRL(pipe)                        _PIPE(pipe, _MIPIA_CTRL, _MIPIB_CTRL)
+#define _MIPIA_CTRL                    (dev_priv->mipi_mmio_base + 0xb104)
+#define _MIPIB_CTRL                    (dev_priv->mipi_mmio_base + 0xb904)
+#define MIPI_CTRL(tc)                  _TRANSCODER(tc, _MIPIA_CTRL, \
+                                       _MIPIB_CTRL)
 #define  ESCAPE_CLOCK_DIVIDER_SHIFT                    5 /* A only */
 #define  ESCAPE_CLOCK_DIVIDER_MASK                     (3 << 5)
 #define  ESCAPE_CLOCK_DIVIDER_1                                (0 << 5)
@@ -6370,50 +6454,52 @@ enum punit_power_well {
 #define  READ_REQUEST_PRIORITY_HIGH                    (3 << 3)
 #define  RGB_FLIP_TO_BGR                               (1 << 2)
 
-#define _MIPIA_DATA_ADDRESS                    (VLV_DISPLAY_BASE + 0xb108)
-#define _MIPIB_DATA_ADDRESS                    (VLV_DISPLAY_BASE + 0xb908)
-#define MIPI_DATA_ADDRESS(pipe)                _PIPE(pipe, _MIPIA_DATA_ADDRESS, _MIPIB_DATA_ADDRESS)
+#define _MIPIA_DATA_ADDRESS            (dev_priv->mipi_mmio_base + 0xb108)
+#define _MIPIB_DATA_ADDRESS            (dev_priv->mipi_mmio_base + 0xb908)
+#define MIPI_DATA_ADDRESS(tc)          _TRANSCODER(tc, _MIPIA_DATA_ADDRESS, \
+                                       _MIPIB_DATA_ADDRESS)
 #define  DATA_MEM_ADDRESS_SHIFT                                5
 #define  DATA_MEM_ADDRESS_MASK                         (0x7ffffff << 5)
 #define  DATA_VALID                                    (1 << 0)
 
-#define _MIPIA_DATA_LENGTH                     (VLV_DISPLAY_BASE + 0xb10c)
-#define _MIPIB_DATA_LENGTH                     (VLV_DISPLAY_BASE + 0xb90c)
-#define MIPI_DATA_LENGTH(pipe)         _PIPE(pipe, _MIPIA_DATA_LENGTH, _MIPIB_DATA_LENGTH)
+#define _MIPIA_DATA_LENGTH             (dev_priv->mipi_mmio_base + 0xb10c)
+#define _MIPIB_DATA_LENGTH             (dev_priv->mipi_mmio_base + 0xb90c)
+#define MIPI_DATA_LENGTH(tc)           _TRANSCODER(tc, _MIPIA_DATA_LENGTH, \
+                                       _MIPIB_DATA_LENGTH)
 #define  DATA_LENGTH_SHIFT                             0
 #define  DATA_LENGTH_MASK                              (0xfffff << 0)
 
-#define _MIPIA_COMMAND_ADDRESS                 (VLV_DISPLAY_BASE + 0xb110)
-#define _MIPIB_COMMAND_ADDRESS                 (VLV_DISPLAY_BASE + 0xb910)
-#define MIPI_COMMAND_ADDRESS(pipe)     _PIPE(pipe, _MIPIA_COMMAND_ADDRESS, _MIPIB_COMMAND_ADDRESS)
+#define _MIPIA_COMMAND_ADDRESS         (dev_priv->mipi_mmio_base + 0xb110)
+#define _MIPIB_COMMAND_ADDRESS         (dev_priv->mipi_mmio_base + 0xb910)
+#define MIPI_COMMAND_ADDRESS(tc)       _TRANSCODER(tc, \
+                               _MIPIA_COMMAND_ADDRESS, _MIPIB_COMMAND_ADDRESS)
 #define  COMMAND_MEM_ADDRESS_SHIFT                     5
 #define  COMMAND_MEM_ADDRESS_MASK                      (0x7ffffff << 5)
 #define  AUTO_PWG_ENABLE                               (1 << 2)
 #define  MEMORY_WRITE_DATA_FROM_PIPE_RENDERING         (1 << 1)
 #define  COMMAND_VALID                                 (1 << 0)
 
-#define _MIPIA_COMMAND_LENGTH                  (VLV_DISPLAY_BASE + 0xb114)
-#define _MIPIB_COMMAND_LENGTH                  (VLV_DISPLAY_BASE + 0xb914)
-#define MIPI_COMMAND_LENGTH(pipe)      _PIPE(pipe, _MIPIA_COMMAND_LENGTH, _MIPIB_COMMAND_LENGTH)
+#define _MIPIA_COMMAND_LENGTH          (dev_priv->mipi_mmio_base + 0xb114)
+#define _MIPIB_COMMAND_LENGTH          (dev_priv->mipi_mmio_base + 0xb914)
+#define MIPI_COMMAND_LENGTH(tc)        _TRANSCODER(tc, _MIPIA_COMMAND_LENGTH, \
+                                       _MIPIB_COMMAND_LENGTH)
 #define  COMMAND_LENGTH_SHIFT(n)                       (8 * (n)) /* n: 0...3 */
 #define  COMMAND_LENGTH_MASK(n)                                (0xff << (8 * (n)))
 
-#define _MIPIA_READ_DATA_RETURN0               (VLV_DISPLAY_BASE + 0xb118)
-#define _MIPIB_READ_DATA_RETURN0               (VLV_DISPLAY_BASE + 0xb918)
-#define MIPI_READ_DATA_RETURN(pipe, n) \
-       (_PIPE(pipe, _MIPIA_READ_DATA_RETURN0, _MIPIB_READ_DATA_RETURN0) + 4 * (n)) /* n: 0...7 */
+#define _MIPIA_READ_DATA_RETURN0       (dev_priv->mipi_mmio_base + 0xb118)
+#define _MIPIB_READ_DATA_RETURN0       (dev_priv->mipi_mmio_base + 0xb918)
+#define MIPI_READ_DATA_RETURN(tc, n) \
+       (_TRANSCODER(tc, _MIPIA_READ_DATA_RETURN0, _MIPIB_READ_DATA_RETURN0) \
+                                       + 4 * (n)) /* n: 0...7 */
 
-#define _MIPIA_READ_DATA_VALID                 (VLV_DISPLAY_BASE + 0xb138)
-#define _MIPIB_READ_DATA_VALID                 (VLV_DISPLAY_BASE + 0xb938)
-#define MIPI_READ_DATA_VALID(pipe)     _PIPE(pipe, _MIPIA_READ_DATA_VALID, _MIPIB_READ_DATA_VALID)
+#define _MIPIA_READ_DATA_VALID         (dev_priv->mipi_mmio_base + 0xb138)
+#define _MIPIB_READ_DATA_VALID         (dev_priv->mipi_mmio_base + 0xb938)
+#define MIPI_READ_DATA_VALID(tc)       _TRANSCODER(tc, \
+                               _MIPIA_READ_DATA_VALID, _MIPIB_READ_DATA_VALID)
 #define  READ_DATA_VALID(n)                            (1 << (n))
 
 /* For UMS only (deprecated): */
 #define _PALETTE_A (dev_priv->info.display_mmio_offset + 0xa000)
 #define _PALETTE_B (dev_priv->info.display_mmio_offset + 0xa800)
-#define _DPLL_A (dev_priv->info.display_mmio_offset + 0x6014)
-#define _DPLL_B (dev_priv->info.display_mmio_offset + 0x6018)
-#define _DPLL_A_MD (dev_priv->info.display_mmio_offset + 0x601c)
-#define _DPLL_B_MD (dev_priv->info.display_mmio_offset + 0x6020)
 
 #endif /* _I915_REG_H_ */
index b17b9c7c769f92eca6497a938db2f40310cd0b10..ded60139820ef1028f4fdbf24aef7480a2b018cb 100644 (file)
@@ -76,12 +76,12 @@ static const u32 bdw_ddi_translations_edp[] = {
        0x00FFFFFF, 0x00000012,         /* eDP parameters */
        0x00EBAFFF, 0x00020011,
        0x00C71FFF, 0x0006000F,
+       0x00AAAFFF, 0x000E000A,
        0x00FFFFFF, 0x00020011,
        0x00DB6FFF, 0x0005000F,
        0x00BEEFFF, 0x000A000C,
        0x00FFFFFF, 0x0005000F,
        0x00DB6FFF, 0x000A000C,
-       0x00FFFFFF, 0x000A000C,
        0x00FFFFFF, 0x00140006          /* HDMI parameters 800mV 0dB*/
 };
 
@@ -89,12 +89,12 @@ static const u32 bdw_ddi_translations_dp[] = {
        0x00FFFFFF, 0x0007000E,         /* DP parameters */
        0x00D75FFF, 0x000E000A,
        0x00BEFFFF, 0x00140006,
+       0x80B2CFFF, 0x001B0002,
        0x00FFFFFF, 0x000E000A,
        0x00D75FFF, 0x00180004,
        0x80CB2FFF, 0x001B0002,
        0x00F7DFFF, 0x00180004,
        0x80D75FFF, 0x001B0002,
-       0x80FFFFFF, 0x001B0002,
        0x00FFFFFF, 0x00140006          /* HDMI parameters 800mV 0dB*/
 };
 
index 1112d9ecc226c480252183866e2eb9268514531c..927d2476f60a7adf8c60c35caa600574c04731e5 100644 (file)
 #include "i915_trace.h"
 #include <drm/drm_dp_helper.h>
 #include <drm/drm_crtc_helper.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/drm_rect.h>
 #include <linux/dma_remapping.h>
 
+/* Primary plane formats supported by all gen */
+#define COMMON_PRIMARY_FORMATS \
+       DRM_FORMAT_C8, \
+       DRM_FORMAT_RGB565, \
+       DRM_FORMAT_XRGB8888, \
+       DRM_FORMAT_ARGB8888
+
+/* Primary plane formats for gen <= 3 */
+static const uint32_t intel_primary_formats_gen2[] = {
+       COMMON_PRIMARY_FORMATS,
+       DRM_FORMAT_XRGB1555,
+       DRM_FORMAT_ARGB1555,
+};
+
+/* Primary plane formats for gen >= 4 */
+static const uint32_t intel_primary_formats_gen4[] = {
+       COMMON_PRIMARY_FORMATS, \
+       DRM_FORMAT_XBGR8888,
+       DRM_FORMAT_ABGR8888,
+       DRM_FORMAT_XRGB2101010,
+       DRM_FORMAT_ARGB2101010,
+       DRM_FORMAT_XBGR2101010,
+       DRM_FORMAT_ABGR2101010,
+};
+
+/* Cursor formats */
+static const uint32_t intel_cursor_formats[] = {
+       DRM_FORMAT_ARGB8888,
+};
+
 #define DIV_ROUND_CLOSEST_ULL(ll, d)   \
-       ({ unsigned long long _tmp = (ll)+(d)/2; do_div(_tmp, d); _tmp; })
+({ unsigned long long _tmp = (ll)+(d)/2; do_div(_tmp, d); _tmp; })
 
-static void intel_increase_pllclock(struct drm_crtc *crtc);
+static void intel_increase_pllclock(struct drm_device *dev,
+                                   enum pipe pipe);
 static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
 
 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
@@ -1712,6 +1745,17 @@ static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
        val &= ~DPIO_DCLKP_EN;
        vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
 
+       /* disable left/right clock distribution */
+       if (pipe != PIPE_B) {
+               val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
+               val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
+               vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
+       } else {
+               val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
+               val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
+               vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
+       }
+
        mutex_unlock(&dev_priv->dpio_lock);
 }
 
@@ -1749,6 +1793,9 @@ static void intel_prepare_shared_dpll(struct intel_crtc *crtc)
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
 
+       if (WARN_ON(pll == NULL))
+               return;
+
        WARN_ON(!pll->refcount);
        if (pll->active == 0) {
                DRM_DEBUG_DRIVER("setting up %s\n", pll->name);
@@ -2314,6 +2361,7 @@ static bool intel_alloc_plane_obj(struct intel_crtc *crtc,
                goto out_unref_obj;
        }
 
+       obj->frontbuffer_bits = INTEL_FRONTBUFFER_PRIMARY(crtc->pipe);
        mutex_unlock(&dev->struct_mutex);
 
        DRM_DEBUG_KMS("plane fb obj %p\n", obj);
@@ -2359,6 +2407,7 @@ static void intel_find_plane_obj(struct intel_crtc *intel_crtc,
                if (i915_gem_obj_ggtt_offset(fb->obj) == plane_config->base) {
                        drm_framebuffer_reference(c->primary->fb);
                        intel_crtc->base.primary->fb = c->primary->fb;
+                       fb->obj->frontbuffer_bits |= INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe);
                        break;
                }
        }
@@ -2546,7 +2595,7 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
 
        if (dev_priv->display.disable_fbc)
                dev_priv->display.disable_fbc(dev);
-       intel_increase_pllclock(crtc);
+       intel_increase_pllclock(dev, to_intel_crtc(crtc)->pipe);
 
        dev_priv->display.update_primary_plane(crtc, fb, x, y);
 
@@ -2647,7 +2696,9 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
        struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       enum pipe pipe = intel_crtc->pipe;
        struct drm_framebuffer *old_fb;
+       struct drm_i915_gem_object *obj = to_intel_framebuffer(fb)->obj;
        int ret;
 
        if (intel_crtc_has_pending_flip(crtc)) {
@@ -2668,10 +2719,13 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
                return -EINVAL;
        }
 
+       old_fb = crtc->primary->fb;
+
        mutex_lock(&dev->struct_mutex);
-       ret = intel_pin_and_fence_fb_obj(dev,
-                                        to_intel_framebuffer(fb)->obj,
-                                        NULL);
+       ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
+       if (ret == 0)
+               i915_gem_track_fb(to_intel_framebuffer(old_fb)->obj, obj,
+                                 INTEL_FRONTBUFFER_PRIMARY(pipe));
        mutex_unlock(&dev->struct_mutex);
        if (ret != 0) {
                DRM_ERROR("pin & fence failed\n");
@@ -2711,7 +2765,9 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
 
        dev_priv->display.update_primary_plane(crtc, fb, x, y);
 
-       old_fb = crtc->primary->fb;
+       if (intel_crtc->active)
+               intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_PRIMARY(pipe));
+
        crtc->primary->fb = fb;
        crtc->x = x;
        crtc->y = y;
@@ -2726,7 +2782,6 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
 
        mutex_lock(&dev->struct_mutex);
        intel_update_fbc(dev);
-       intel_edp_psr_update(dev);
        mutex_unlock(&dev->struct_mutex);
 
        return 0;
@@ -3892,6 +3947,8 @@ static void intel_crtc_enable_planes(struct drm_crtc *crtc)
        int pipe = intel_crtc->pipe;
        int plane = intel_crtc->plane;
 
+       drm_vblank_on(dev, pipe);
+
        intel_enable_primary_hw_plane(dev_priv, plane, pipe);
        intel_enable_planes(crtc);
        /* The fixup needs to happen before cursor is enabled */
@@ -3904,8 +3961,14 @@ static void intel_crtc_enable_planes(struct drm_crtc *crtc)
 
        mutex_lock(&dev->struct_mutex);
        intel_update_fbc(dev);
-       intel_edp_psr_update(dev);
        mutex_unlock(&dev->struct_mutex);
+
+       /*
+        * FIXME: Once we grow proper nuclear flip support out of this we need
+        * to compute the mask of flip planes precisely. For the time being
+        * consider this a flip from a NULL plane.
+        */
+       intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe));
 }
 
 static void intel_crtc_disable_planes(struct drm_crtc *crtc)
@@ -3917,7 +3980,6 @@ static void intel_crtc_disable_planes(struct drm_crtc *crtc)
        int plane = intel_crtc->plane;
 
        intel_crtc_wait_for_pending_flips(crtc);
-       drm_crtc_vblank_off(crtc);
 
        if (dev_priv->fbc.plane == plane)
                intel_disable_fbc(dev);
@@ -3928,6 +3990,15 @@ static void intel_crtc_disable_planes(struct drm_crtc *crtc)
        intel_crtc_update_cursor(crtc, false);
        intel_disable_planes(crtc);
        intel_disable_primary_hw_plane(dev_priv, plane, pipe);
+
+       /*
+        * FIXME: Once we grow proper nuclear flip support out of this we need
+        * to compute the mask of flip planes precisely. For the time being
+        * consider this a flip to a NULL plane.
+        */
+       intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe));
+
+       drm_vblank_off(dev, pipe);
 }
 
 static void ironlake_crtc_enable(struct drm_crtc *crtc)
@@ -4006,8 +4077,6 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
                cpt_verify_modeset(dev, intel_crtc->pipe);
 
        intel_crtc_enable_planes(crtc);
-
-       drm_crtc_vblank_on(crtc);
 }
 
 /* IPS only exists on ULT machines and is tied to pipe A. */
@@ -4121,8 +4190,6 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
         * to change the workaround. */
        haswell_mode_set_planes_workaround(intel_crtc);
        intel_crtc_enable_planes(crtc);
-
-       drm_crtc_vblank_on(crtc);
 }
 
 static void ironlake_pfit_disable(struct intel_crtc *crtc)
@@ -4200,7 +4267,6 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
 
        mutex_lock(&dev->struct_mutex);
        intel_update_fbc(dev);
-       intel_edp_psr_update(dev);
        mutex_unlock(&dev->struct_mutex);
 }
 
@@ -4248,7 +4314,6 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
 
        mutex_lock(&dev->struct_mutex);
        intel_update_fbc(dev);
-       intel_edp_psr_update(dev);
        mutex_unlock(&dev->struct_mutex);
 }
 
@@ -4633,8 +4698,6 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
 
        intel_crtc_enable_planes(crtc);
 
-       drm_crtc_vblank_on(crtc);
-
        /* Underruns don't raise interrupts, so check manually. */
        i9xx_check_fifo_underruns(dev);
 }
@@ -4727,8 +4790,6 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
        if (IS_GEN2(dev))
                intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
 
-       drm_crtc_vblank_on(crtc);
-
        /* Underruns don't raise interrupts, so check manually. */
        i9xx_check_fifo_underruns(dev);
 }
@@ -4805,7 +4866,6 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
 
        mutex_lock(&dev->struct_mutex);
        intel_update_fbc(dev);
-       intel_edp_psr_update(dev);
        mutex_unlock(&dev->struct_mutex);
 }
 
@@ -4850,16 +4910,43 @@ void intel_crtc_update_dpms(struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct intel_encoder *intel_encoder;
+       enum intel_display_power_domain domain;
+       unsigned long domains;
        bool enable = false;
 
        for_each_encoder_on_crtc(dev, crtc, intel_encoder)
                enable |= intel_encoder->connectors_active;
 
-       if (enable)
-               dev_priv->display.crtc_enable(crtc);
-       else
-               dev_priv->display.crtc_disable(crtc);
+       if (enable) {
+               if (!intel_crtc->active) {
+                       /*
+                        * FIXME: DDI plls and relevant code isn't converted
+                        * yet, so do runtime PM for DPMS only for all other
+                        * platforms for now.
+                        */
+                       if (!HAS_DDI(dev)) {
+                               domains = get_crtc_power_domains(crtc);
+                               for_each_power_domain(domain, domains)
+                                       intel_display_power_get(dev_priv, domain);
+                               intel_crtc->enabled_power_domains = domains;
+                       }
+
+                       dev_priv->display.crtc_enable(crtc);
+               }
+       } else {
+               if (intel_crtc->active) {
+                       dev_priv->display.crtc_disable(crtc);
+
+                       if (!HAS_DDI(dev)) {
+                               domains = intel_crtc->enabled_power_domains;
+                               for_each_power_domain(domain, domains)
+                                       intel_display_power_put(dev_priv, domain);
+                               intel_crtc->enabled_power_domains = 0;
+                       }
+               }
+       }
 
        intel_crtc_update_sarea(crtc, enable);
 }
@@ -4869,6 +4956,8 @@ static void intel_crtc_disable(struct drm_crtc *crtc)
        struct drm_device *dev = crtc->dev;
        struct drm_connector *connector;
        struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_gem_object *old_obj;
+       enum pipe pipe = to_intel_crtc(crtc)->pipe;
 
        /* crtc should still be enabled when we disable it. */
        WARN_ON(!crtc->enabled);
@@ -4878,12 +4967,15 @@ static void intel_crtc_disable(struct drm_crtc *crtc)
        dev_priv->display.off(crtc);
 
        assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane);
-       assert_cursor_disabled(dev_priv, to_intel_crtc(crtc)->pipe);
-       assert_pipe_disabled(dev->dev_private, to_intel_crtc(crtc)->pipe);
+       assert_cursor_disabled(dev_priv, pipe);
+       assert_pipe_disabled(dev->dev_private, pipe);
 
        if (crtc->primary->fb) {
+               old_obj = to_intel_framebuffer(crtc->primary->fb)->obj;
                mutex_lock(&dev->struct_mutex);
-               intel_unpin_fb_obj(to_intel_framebuffer(crtc->primary->fb)->obj);
+               intel_unpin_fb_obj(old_obj);
+               i915_gem_track_fb(old_obj, NULL,
+                                 INTEL_FRONTBUFFER_PRIMARY(pipe));
                mutex_unlock(&dev->struct_mutex);
                crtc->primary->fb = NULL;
        }
@@ -7991,8 +8083,8 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        int pipe = intel_crtc->pipe;
-       int x = intel_crtc->cursor_x;
-       int y = intel_crtc->cursor_y;
+       int x = crtc->cursor_x;
+       int y = crtc->cursor_y;
        u32 base = 0, pos = 0;
 
        if (on)
@@ -8036,21 +8128,27 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
        intel_crtc->cursor_base = base;
 }
 
-static int intel_crtc_cursor_set(struct drm_crtc *crtc,
-                                struct drm_file *file,
-                                uint32_t handle,
-                                uint32_t width, uint32_t height)
+/*
+ * intel_crtc_cursor_set_obj - Set cursor to specified GEM object
+ *
+ * Note that the object's reference will be consumed if the update fails.  If
+ * the update succeeds, the reference of the old object (if any) will be
+ * consumed.
+ */
+static int intel_crtc_cursor_set_obj(struct drm_crtc *crtc,
+                                    struct drm_i915_gem_object *obj,
+                                    uint32_t width, uint32_t height)
 {
        struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       struct drm_i915_gem_object *obj;
+       enum pipe pipe = intel_crtc->pipe;
        unsigned old_width;
        uint32_t addr;
        int ret;
 
        /* if we want to turn off the cursor ignore width and height */
-       if (!handle) {
+       if (!obj) {
                DRM_DEBUG_KMS("cursor off\n");
                addr = 0;
                obj = NULL;
@@ -8066,12 +8164,8 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
                return -EINVAL;
        }
 
-       obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
-       if (&obj->base == NULL)
-               return -ENOENT;
-
        if (obj->base.size < width * height * 4) {
-               DRM_DEBUG_KMS("buffer is to small\n");
+               DRM_DEBUG_KMS("buffer is too small\n");
                ret = -ENOMEM;
                goto fail;
        }
@@ -8126,9 +8220,10 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
        if (intel_crtc->cursor_bo) {
                if (!INTEL_INFO(dev)->cursor_needs_physical)
                        i915_gem_object_unpin_from_display_plane(intel_crtc->cursor_bo);
-               drm_gem_object_unreference(&intel_crtc->cursor_bo->base);
        }
 
+       i915_gem_track_fb(intel_crtc->cursor_bo, obj,
+                         INTEL_FRONTBUFFER_CURSOR(pipe));
        mutex_unlock(&dev->struct_mutex);
 
        old_width = intel_crtc->cursor_width;
@@ -8144,6 +8239,8 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
                intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL);
        }
 
+       intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_CURSOR(pipe));
+
        return 0;
 fail_unpin:
        i915_gem_object_unpin_from_display_plane(obj);
@@ -8154,19 +8251,6 @@ fail:
        return ret;
 }
 
-static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
-{
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
-       intel_crtc->cursor_x = clamp_t(int, x, SHRT_MIN, SHRT_MAX);
-       intel_crtc->cursor_y = clamp_t(int, y, SHRT_MIN, SHRT_MAX);
-
-       if (intel_crtc->active)
-               intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL);
-
-       return 0;
-}
-
 static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
                                 u16 *blue, uint32_t start, uint32_t size)
 {
@@ -8667,12 +8751,10 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
        return mode;
 }
 
-static void intel_increase_pllclock(struct drm_crtc *crtc)
+static void intel_increase_pllclock(struct drm_device *dev,
+                                   enum pipe pipe)
 {
-       struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       int pipe = intel_crtc->pipe;
        int dpll_reg = DPLL(pipe);
        int dpll;
 
@@ -8773,28 +8855,179 @@ out:
        intel_runtime_pm_put(dev_priv);
 }
 
-void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
-                       struct intel_engine_cs *ring)
+
+/**
+ * intel_mark_fb_busy - mark given planes as busy
+ * @dev: DRM device
+ * @frontbuffer_bits: bits for the affected planes
+ * @ring: optional ring for asynchronous commands
+ *
+ * This function gets called every time the screen contents change. It can be
+ * used to keep e.g. the update rate at the nominal refresh rate with DRRS.
+ */
+static void intel_mark_fb_busy(struct drm_device *dev,
+                              unsigned frontbuffer_bits,
+                              struct intel_engine_cs *ring)
 {
-       struct drm_device *dev = obj->base.dev;
-       struct drm_crtc *crtc;
+       enum pipe pipe;
 
        if (!i915.powersave)
                return;
 
-       for_each_crtc(dev, crtc) {
-               if (!crtc->primary->fb)
-                       continue;
-
-               if (to_intel_framebuffer(crtc->primary->fb)->obj != obj)
+       for_each_pipe(pipe) {
+               if (!(frontbuffer_bits & INTEL_FRONTBUFFER_ALL_MASK(pipe)))
                        continue;
 
-               intel_increase_pllclock(crtc);
+               intel_increase_pllclock(dev, pipe);
                if (ring && intel_fbc_enabled(dev))
                        ring->fbc_dirty = true;
        }
 }
 
+/**
+ * intel_fb_obj_invalidate - invalidate frontbuffer object
+ * @obj: GEM object to invalidate
+ * @ring: set for asynchronous rendering
+ *
+ * This function gets called every time rendering on the given object starts and
+ * frontbuffer caching (fbc, low refresh rate for DRRS, panel self refresh) must
+ * be invalidated. If @ring is non-NULL any subsequent invalidation will be delayed
+ * until the rendering completes or a flip on this frontbuffer plane is
+ * scheduled.
+ */
+void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
+                            struct intel_engine_cs *ring)
+{
+       struct drm_device *dev = obj->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
+       if (!obj->frontbuffer_bits)
+               return;
+
+       if (ring) {
+               mutex_lock(&dev_priv->fb_tracking.lock);
+               dev_priv->fb_tracking.busy_bits
+                       |= obj->frontbuffer_bits;
+               dev_priv->fb_tracking.flip_bits
+                       &= ~obj->frontbuffer_bits;
+               mutex_unlock(&dev_priv->fb_tracking.lock);
+       }
+
+       intel_mark_fb_busy(dev, obj->frontbuffer_bits, ring);
+
+       intel_edp_psr_exit(dev);
+}
+
+/**
+ * intel_frontbuffer_flush - flush frontbuffer
+ * @dev: DRM device
+ * @frontbuffer_bits: frontbuffer plane tracking bits
+ *
+ * This function gets called every time rendering on the given planes has
+ * completed and frontbuffer caching can be started again. Flushes will get
+ * delayed if they're blocked by some oustanding asynchronous rendering.
+ *
+ * Can be called without any locks held.
+ */
+void intel_frontbuffer_flush(struct drm_device *dev,
+                            unsigned frontbuffer_bits)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       /* Delay flushing when rings are still busy.*/
+       mutex_lock(&dev_priv->fb_tracking.lock);
+       frontbuffer_bits &= ~dev_priv->fb_tracking.busy_bits;
+       mutex_unlock(&dev_priv->fb_tracking.lock);
+
+       intel_mark_fb_busy(dev, frontbuffer_bits, NULL);
+
+       intel_edp_psr_exit(dev);
+}
+
+/**
+ * intel_fb_obj_flush - flush frontbuffer object
+ * @obj: GEM object to flush
+ * @retire: set when retiring asynchronous rendering
+ *
+ * This function gets called every time rendering on the given object has
+ * completed and frontbuffer caching can be started again. If @retire is true
+ * then any delayed flushes will be unblocked.
+ */
+void intel_fb_obj_flush(struct drm_i915_gem_object *obj,
+                       bool retire)
+{
+       struct drm_device *dev = obj->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       unsigned frontbuffer_bits;
+
+       WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
+       if (!obj->frontbuffer_bits)
+               return;
+
+       frontbuffer_bits = obj->frontbuffer_bits;
+
+       if (retire) {
+               mutex_lock(&dev_priv->fb_tracking.lock);
+               /* Filter out new bits since rendering started. */
+               frontbuffer_bits &= dev_priv->fb_tracking.busy_bits;
+
+               dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
+               mutex_unlock(&dev_priv->fb_tracking.lock);
+       }
+
+       intel_frontbuffer_flush(dev, frontbuffer_bits);
+}
+
+/**
+ * intel_frontbuffer_flip_prepare - prepare asnychronous frontbuffer flip
+ * @dev: DRM device
+ * @frontbuffer_bits: frontbuffer plane tracking bits
+ *
+ * This function gets called after scheduling a flip on @obj. The actual
+ * frontbuffer flushing will be delayed until completion is signalled with
+ * intel_frontbuffer_flip_complete. If an invalidate happens in between this
+ * flush will be cancelled.
+ *
+ * Can be called without any locks held.
+ */
+void intel_frontbuffer_flip_prepare(struct drm_device *dev,
+                                   unsigned frontbuffer_bits)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       mutex_lock(&dev_priv->fb_tracking.lock);
+       dev_priv->fb_tracking.flip_bits
+               |= frontbuffer_bits;
+       mutex_unlock(&dev_priv->fb_tracking.lock);
+}
+
+/**
+ * intel_frontbuffer_flip_complete - complete asynchronous frontbuffer flush
+ * @dev: DRM device
+ * @frontbuffer_bits: frontbuffer plane tracking bits
+ *
+ * This function gets called after the flip has been latched and will complete
+ * on the next vblank. It will execute the fush if it hasn't been cancalled yet.
+ *
+ * Can be called without any locks held.
+ */
+void intel_frontbuffer_flip_complete(struct drm_device *dev,
+                                    unsigned frontbuffer_bits)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       mutex_lock(&dev_priv->fb_tracking.lock);
+       /* Mask any cancelled flips. */
+       frontbuffer_bits &= dev_priv->fb_tracking.flip_bits;
+       dev_priv->fb_tracking.flip_bits &= ~frontbuffer_bits;
+       mutex_unlock(&dev_priv->fb_tracking.lock);
+
+       intel_frontbuffer_flush(dev, frontbuffer_bits);
+}
+
 static void intel_crtc_destroy(struct drm_crtc *crtc)
 {
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -8812,8 +9045,6 @@ static void intel_crtc_destroy(struct drm_crtc *crtc)
                kfree(work);
        }
 
-       intel_crtc_cursor_set(crtc, NULL, 0, 0, 0);
-
        drm_crtc_cleanup(crtc);
 
        kfree(intel_crtc);
@@ -8824,6 +9055,7 @@ static void intel_unpin_work_fn(struct work_struct *__work)
        struct intel_unpin_work *work =
                container_of(__work, struct intel_unpin_work, work);
        struct drm_device *dev = work->crtc->dev;
+       enum pipe pipe = to_intel_crtc(work->crtc)->pipe;
 
        mutex_lock(&dev->struct_mutex);
        intel_unpin_fb_obj(work->old_fb_obj);
@@ -8833,6 +9065,8 @@ static void intel_unpin_work_fn(struct work_struct *__work)
        intel_update_fbc(dev);
        mutex_unlock(&dev->struct_mutex);
 
+       intel_frontbuffer_flip_complete(dev, INTEL_FRONTBUFFER_PRIMARY(pipe));
+
        BUG_ON(atomic_read(&to_intel_crtc(work->crtc)->unpin_work_count) == 0);
        atomic_dec(&to_intel_crtc(work->crtc)->unpin_work_count);
 
@@ -9202,6 +9436,147 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
        return 0;
 }
 
+static bool use_mmio_flip(struct intel_engine_cs *ring,
+                         struct drm_i915_gem_object *obj)
+{
+       /*
+        * This is not being used for older platforms, because
+        * non-availability of flip done interrupt forces us to use
+        * CS flips. Older platforms derive flip done using some clever
+        * tricks involving the flip_pending status bits and vblank irqs.
+        * So using MMIO flips there would disrupt this mechanism.
+        */
+
+       if (INTEL_INFO(ring->dev)->gen < 5)
+               return false;
+
+       if (i915.use_mmio_flip < 0)
+               return false;
+       else if (i915.use_mmio_flip > 0)
+               return true;
+       else
+               return ring != obj->ring;
+}
+
+static void intel_do_mmio_flip(struct intel_crtc *intel_crtc)
+{
+       struct drm_device *dev = intel_crtc->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_framebuffer *intel_fb =
+               to_intel_framebuffer(intel_crtc->base.primary->fb);
+       struct drm_i915_gem_object *obj = intel_fb->obj;
+       u32 dspcntr;
+       u32 reg;
+
+       intel_mark_page_flip_active(intel_crtc);
+
+       reg = DSPCNTR(intel_crtc->plane);
+       dspcntr = I915_READ(reg);
+
+       if (INTEL_INFO(dev)->gen >= 4) {
+               if (obj->tiling_mode != I915_TILING_NONE)
+                       dspcntr |= DISPPLANE_TILED;
+               else
+                       dspcntr &= ~DISPPLANE_TILED;
+       }
+       I915_WRITE(reg, dspcntr);
+
+       I915_WRITE(DSPSURF(intel_crtc->plane),
+                  intel_crtc->unpin_work->gtt_offset);
+       POSTING_READ(DSPSURF(intel_crtc->plane));
+}
+
+static int intel_postpone_flip(struct drm_i915_gem_object *obj)
+{
+       struct intel_engine_cs *ring;
+       int ret;
+
+       lockdep_assert_held(&obj->base.dev->struct_mutex);
+
+       if (!obj->last_write_seqno)
+               return 0;
+
+       ring = obj->ring;
+
+       if (i915_seqno_passed(ring->get_seqno(ring, true),
+                             obj->last_write_seqno))
+               return 0;
+
+       ret = i915_gem_check_olr(ring, obj->last_write_seqno);
+       if (ret)
+               return ret;
+
+       if (WARN_ON(!ring->irq_get(ring)))
+               return 0;
+
+       return 1;
+}
+
+void intel_notify_mmio_flip(struct intel_engine_cs *ring)
+{
+       struct drm_i915_private *dev_priv = to_i915(ring->dev);
+       struct intel_crtc *intel_crtc;
+       unsigned long irq_flags;
+       u32 seqno;
+
+       seqno = ring->get_seqno(ring, false);
+
+       spin_lock_irqsave(&dev_priv->mmio_flip_lock, irq_flags);
+       for_each_intel_crtc(ring->dev, intel_crtc) {
+               struct intel_mmio_flip *mmio_flip;
+
+               mmio_flip = &intel_crtc->mmio_flip;
+               if (mmio_flip->seqno == 0)
+                       continue;
+
+               if (ring->id != mmio_flip->ring_id)
+                       continue;
+
+               if (i915_seqno_passed(seqno, mmio_flip->seqno)) {
+                       intel_do_mmio_flip(intel_crtc);
+                       mmio_flip->seqno = 0;
+                       ring->irq_put(ring);
+               }
+       }
+       spin_unlock_irqrestore(&dev_priv->mmio_flip_lock, irq_flags);
+}
+
+static int intel_queue_mmio_flip(struct drm_device *dev,
+                                struct drm_crtc *crtc,
+                                struct drm_framebuffer *fb,
+                                struct drm_i915_gem_object *obj,
+                                struct intel_engine_cs *ring,
+                                uint32_t flags)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       unsigned long irq_flags;
+       int ret;
+
+       if (WARN_ON(intel_crtc->mmio_flip.seqno))
+               return -EBUSY;
+
+       ret = intel_postpone_flip(obj);
+       if (ret < 0)
+               return ret;
+       if (ret == 0) {
+               intel_do_mmio_flip(intel_crtc);
+               return 0;
+       }
+
+       spin_lock_irqsave(&dev_priv->mmio_flip_lock, irq_flags);
+       intel_crtc->mmio_flip.seqno = obj->last_write_seqno;
+       intel_crtc->mmio_flip.ring_id = obj->ring->id;
+       spin_unlock_irqrestore(&dev_priv->mmio_flip_lock, irq_flags);
+
+       /*
+        * Double check to catch cases where irq fired before
+        * mmio flip data was ready
+        */
+       intel_notify_mmio_flip(obj->ring);
+       return 0;
+}
+
 static int intel_default_queue_flip(struct drm_device *dev,
                                    struct drm_crtc *crtc,
                                    struct drm_framebuffer *fb,
@@ -9222,6 +9597,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
        struct drm_framebuffer *old_fb = crtc->primary->fb;
        struct drm_i915_gem_object *obj = to_intel_framebuffer(fb)->obj;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       enum pipe pipe = intel_crtc->pipe;
        struct intel_unpin_work *work;
        struct intel_engine_cs *ring;
        unsigned long flags;
@@ -9290,7 +9666,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
        intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
 
        if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
-               work->flip_count = I915_READ(PIPE_FLIPCOUNT_GM45(intel_crtc->pipe)) + 1;
+               work->flip_count = I915_READ(PIPE_FLIPCOUNT_GM45(pipe)) + 1;
 
        if (IS_VALLEYVIEW(dev)) {
                ring = &dev_priv->ring[BCS];
@@ -9309,12 +9685,20 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
        work->gtt_offset =
                i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset;
 
-       ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, ring, page_flip_flags);
+       if (use_mmio_flip(ring, obj))
+               ret = intel_queue_mmio_flip(dev, crtc, fb, obj, ring,
+                                           page_flip_flags);
+       else
+               ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, ring,
+                               page_flip_flags);
        if (ret)
                goto cleanup_unpin;
 
+       i915_gem_track_fb(work->old_fb_obj, obj,
+                         INTEL_FRONTBUFFER_PRIMARY(pipe));
+
        intel_disable_fbc(dev);
-       intel_mark_fb_busy(obj, NULL);
+       intel_frontbuffer_flip_prepare(dev, INTEL_FRONTBUFFER_PRIMARY(pipe));
        mutex_unlock(&dev->struct_mutex);
 
        trace_i915_flip_request(intel_crtc->plane, obj);
@@ -9344,7 +9728,7 @@ out_hang:
                intel_crtc_wait_for_pending_flips(crtc);
                ret = intel_pipe_set_base(crtc, crtc->x, crtc->y, fb);
                if (ret == 0 && event)
-                       drm_send_vblank_event(dev, intel_crtc->pipe, event);
+                       drm_send_vblank_event(dev, pipe, event);
        }
        return ret;
 }
@@ -10379,10 +10763,13 @@ static int __intel_set_mode(struct drm_crtc *crtc,
         */
        for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) {
                struct drm_framebuffer *old_fb;
+               struct drm_i915_gem_object *old_obj = NULL;
+               struct drm_i915_gem_object *obj =
+                       to_intel_framebuffer(fb)->obj;
 
                mutex_lock(&dev->struct_mutex);
                ret = intel_pin_and_fence_fb_obj(dev,
-                                                to_intel_framebuffer(fb)->obj,
+                                                obj,
                                                 NULL);
                if (ret != 0) {
                        DRM_ERROR("pin & fence failed\n");
@@ -10390,8 +10777,12 @@ static int __intel_set_mode(struct drm_crtc *crtc,
                        goto done;
                }
                old_fb = crtc->primary->fb;
-               if (old_fb)
-                       intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj);
+               if (old_fb) {
+                       old_obj = to_intel_framebuffer(old_fb)->obj;
+                       intel_unpin_fb_obj(old_obj);
+               }
+               i915_gem_track_fb(old_obj, obj,
+                                 INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe));
                mutex_unlock(&dev->struct_mutex);
 
                crtc->primary->fb = fb;
@@ -10563,12 +10954,17 @@ intel_set_config_compute_mode_changes(struct drm_mode_set *set,
        if (is_crtc_connector_off(set)) {
                config->mode_changed = true;
        } else if (set->crtc->primary->fb != set->fb) {
-               /* If we have no fb then treat it as a full mode set */
+               /*
+                * If we have no fb, we can only flip as long as the crtc is
+                * active, otherwise we need a full mode set.  The crtc may
+                * be active if we've only disabled the primary plane, or
+                * in fastboot situations.
+                */
                if (set->crtc->primary->fb == NULL) {
                        struct intel_crtc *intel_crtc =
                                to_intel_crtc(set->crtc);
 
-                       if (intel_crtc->active && i915.fastboot) {
+                       if (intel_crtc->active) {
                                DRM_DEBUG_KMS("crtc has no fb, will flip\n");
                                config->fb_changed = true;
                        } else {
@@ -10806,10 +11202,24 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
                ret = intel_set_mode(set->crtc, set->mode,
                                     set->x, set->y, set->fb);
        } else if (config->fb_changed) {
+               struct drm_i915_private *dev_priv = dev->dev_private;
+               struct intel_crtc *intel_crtc = to_intel_crtc(set->crtc);
+
                intel_crtc_wait_for_pending_flips(set->crtc);
 
                ret = intel_pipe_set_base(set->crtc,
                                          set->x, set->y, set->fb);
+
+               /*
+                * We need to make sure the primary plane is re-enabled if it
+                * has previously been turned off.
+                */
+               if (!intel_crtc->primary_enabled && ret == 0) {
+                       WARN_ON(!intel_crtc->active);
+                       intel_enable_primary_hw_plane(dev_priv, intel_crtc->plane,
+                                                     intel_crtc->pipe);
+               }
+
                /*
                 * In the fastboot case this may be our only check of the
                 * state after boot.  It would be better to only do it on
@@ -10850,8 +11260,6 @@ out_config:
 }
 
 static const struct drm_crtc_funcs intel_crtc_funcs = {
-       .cursor_set = intel_crtc_cursor_set,
-       .cursor_move = intel_crtc_cursor_move,
        .gamma_set = intel_crtc_gamma_set,
        .set_config = intel_crtc_set_config,
        .destroy = intel_crtc_destroy,
@@ -10959,17 +11367,318 @@ static void intel_shared_dpll_init(struct drm_device *dev)
        BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS);
 }
 
+static int
+intel_primary_plane_disable(struct drm_plane *plane)
+{
+       struct drm_device *dev = plane->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_plane *intel_plane = to_intel_plane(plane);
+       struct intel_crtc *intel_crtc;
+
+       if (!plane->fb)
+               return 0;
+
+       BUG_ON(!plane->crtc);
+
+       intel_crtc = to_intel_crtc(plane->crtc);
+
+       /*
+        * Even though we checked plane->fb above, it's still possible that
+        * the primary plane has been implicitly disabled because the crtc
+        * coordinates given weren't visible, or because we detected
+        * that it was 100% covered by a sprite plane.  Or, the CRTC may be
+        * off and we've set a fb, but haven't actually turned on the CRTC yet.
+        * In either case, we need to unpin the FB and let the fb pointer get
+        * updated, but otherwise we don't need to touch the hardware.
+        */
+       if (!intel_crtc->primary_enabled)
+               goto disable_unpin;
+
+       intel_crtc_wait_for_pending_flips(plane->crtc);
+       intel_disable_primary_hw_plane(dev_priv, intel_plane->plane,
+                                      intel_plane->pipe);
+disable_unpin:
+       i915_gem_track_fb(to_intel_framebuffer(plane->fb)->obj, NULL,
+                         INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe));
+       intel_unpin_fb_obj(to_intel_framebuffer(plane->fb)->obj);
+       plane->fb = NULL;
+
+       return 0;
+}
+
+static int
+intel_primary_plane_setplane(struct drm_plane *plane, struct drm_crtc *crtc,
+                            struct drm_framebuffer *fb, int crtc_x, int crtc_y,
+                            unsigned int crtc_w, unsigned int crtc_h,
+                            uint32_t src_x, uint32_t src_y,
+                            uint32_t src_w, uint32_t src_h)
+{
+       struct drm_device *dev = crtc->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       struct intel_plane *intel_plane = to_intel_plane(plane);
+       struct drm_i915_gem_object *obj, *old_obj = NULL;
+       struct drm_rect dest = {
+               /* integer pixels */
+               .x1 = crtc_x,
+               .y1 = crtc_y,
+               .x2 = crtc_x + crtc_w,
+               .y2 = crtc_y + crtc_h,
+       };
+       struct drm_rect src = {
+               /* 16.16 fixed point */
+               .x1 = src_x,
+               .y1 = src_y,
+               .x2 = src_x + src_w,
+               .y2 = src_y + src_h,
+       };
+       const struct drm_rect clip = {
+               /* integer pixels */
+               .x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0,
+               .y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0,
+       };
+       bool visible;
+       int ret;
+
+       ret = drm_plane_helper_check_update(plane, crtc, fb,
+                                           &src, &dest, &clip,
+                                           DRM_PLANE_HELPER_NO_SCALING,
+                                           DRM_PLANE_HELPER_NO_SCALING,
+                                           false, true, &visible);
+
+       if (ret)
+               return ret;
+
+       if (plane->fb)
+               old_obj = to_intel_framebuffer(plane->fb)->obj;
+       obj = to_intel_framebuffer(fb)->obj;
+
+       /*
+        * If the CRTC isn't enabled, we're just pinning the framebuffer,
+        * updating the fb pointer, and returning without touching the
+        * hardware.  This allows us to later do a drmModeSetCrtc with fb=-1 to
+        * turn on the display with all planes setup as desired.
+        */
+       if (!crtc->enabled) {
+               /*
+                * If we already called setplane while the crtc was disabled,
+                * we may have an fb pinned; unpin it.
+                */
+               if (plane->fb)
+                       intel_unpin_fb_obj(old_obj);
+
+               i915_gem_track_fb(old_obj, obj,
+                                 INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe));
+
+               /* Pin and return without programming hardware */
+               return intel_pin_and_fence_fb_obj(dev, obj, NULL);
+       }
+
+       intel_crtc_wait_for_pending_flips(crtc);
+
+       /*
+        * If clipping results in a non-visible primary plane, we'll disable
+        * the primary plane.  Note that this is a bit different than what
+        * happens if userspace explicitly disables the plane by passing fb=0
+        * because plane->fb still gets set and pinned.
+        */
+       if (!visible) {
+               /*
+                * Try to pin the new fb first so that we can bail out if we
+                * fail.
+                */
+               if (plane->fb != fb) {
+                       ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
+                       if (ret)
+                               return ret;
+               }
+
+               i915_gem_track_fb(old_obj, obj,
+                                 INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe));
+
+               if (intel_crtc->primary_enabled)
+                       intel_disable_primary_hw_plane(dev_priv,
+                                                      intel_plane->plane,
+                                                      intel_plane->pipe);
+
+
+               if (plane->fb != fb)
+                       if (plane->fb)
+                               intel_unpin_fb_obj(old_obj);
+
+               return 0;
+       }
+
+       ret = intel_pipe_set_base(crtc, src.x1, src.y1, fb);
+       if (ret)
+               return ret;
+
+       if (!intel_crtc->primary_enabled)
+               intel_enable_primary_hw_plane(dev_priv, intel_crtc->plane,
+                                             intel_crtc->pipe);
+
+       return 0;
+}
+
+/* Common destruction function for both primary and cursor planes */
+static void intel_plane_destroy(struct drm_plane *plane)
+{
+       struct intel_plane *intel_plane = to_intel_plane(plane);
+       drm_plane_cleanup(plane);
+       kfree(intel_plane);
+}
+
+static const struct drm_plane_funcs intel_primary_plane_funcs = {
+       .update_plane = intel_primary_plane_setplane,
+       .disable_plane = intel_primary_plane_disable,
+       .destroy = intel_plane_destroy,
+};
+
+static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
+                                                   int pipe)
+{
+       struct intel_plane *primary;
+       const uint32_t *intel_primary_formats;
+       int num_formats;
+
+       primary = kzalloc(sizeof(*primary), GFP_KERNEL);
+       if (primary == NULL)
+               return NULL;
+
+       primary->can_scale = false;
+       primary->max_downscale = 1;
+       primary->pipe = pipe;
+       primary->plane = pipe;
+       if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4)
+               primary->plane = !pipe;
+
+       if (INTEL_INFO(dev)->gen <= 3) {
+               intel_primary_formats = intel_primary_formats_gen2;
+               num_formats = ARRAY_SIZE(intel_primary_formats_gen2);
+       } else {
+               intel_primary_formats = intel_primary_formats_gen4;
+               num_formats = ARRAY_SIZE(intel_primary_formats_gen4);
+       }
+
+       drm_universal_plane_init(dev, &primary->base, 0,
+                                &intel_primary_plane_funcs,
+                                intel_primary_formats, num_formats,
+                                DRM_PLANE_TYPE_PRIMARY);
+       return &primary->base;
+}
+
+static int
+intel_cursor_plane_disable(struct drm_plane *plane)
+{
+       if (!plane->fb)
+               return 0;
+
+       BUG_ON(!plane->crtc);
+
+       return intel_crtc_cursor_set_obj(plane->crtc, NULL, 0, 0);
+}
+
+static int
+intel_cursor_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
+                         struct drm_framebuffer *fb, int crtc_x, int crtc_y,
+                         unsigned int crtc_w, unsigned int crtc_h,
+                         uint32_t src_x, uint32_t src_y,
+                         uint32_t src_w, uint32_t src_h)
+{
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+       struct drm_i915_gem_object *obj = intel_fb->obj;
+       struct drm_rect dest = {
+               /* integer pixels */
+               .x1 = crtc_x,
+               .y1 = crtc_y,
+               .x2 = crtc_x + crtc_w,
+               .y2 = crtc_y + crtc_h,
+       };
+       struct drm_rect src = {
+               /* 16.16 fixed point */
+               .x1 = src_x,
+               .y1 = src_y,
+               .x2 = src_x + src_w,
+               .y2 = src_y + src_h,
+       };
+       const struct drm_rect clip = {
+               /* integer pixels */
+               .x2 = intel_crtc->config.pipe_src_w,
+               .y2 = intel_crtc->config.pipe_src_h,
+       };
+       bool visible;
+       int ret;
+
+       ret = drm_plane_helper_check_update(plane, crtc, fb,
+                                           &src, &dest, &clip,
+                                           DRM_PLANE_HELPER_NO_SCALING,
+                                           DRM_PLANE_HELPER_NO_SCALING,
+                                           true, true, &visible);
+       if (ret)
+               return ret;
+
+       crtc->cursor_x = crtc_x;
+       crtc->cursor_y = crtc_y;
+       if (fb != crtc->cursor->fb) {
+               return intel_crtc_cursor_set_obj(crtc, obj, crtc_w, crtc_h);
+       } else {
+               intel_crtc_update_cursor(crtc, visible);
+               return 0;
+       }
+}
+static const struct drm_plane_funcs intel_cursor_plane_funcs = {
+       .update_plane = intel_cursor_plane_update,
+       .disable_plane = intel_cursor_plane_disable,
+       .destroy = intel_plane_destroy,
+};
+
+static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
+                                                  int pipe)
+{
+       struct intel_plane *cursor;
+
+       cursor = kzalloc(sizeof(*cursor), GFP_KERNEL);
+       if (cursor == NULL)
+               return NULL;
+
+       cursor->can_scale = false;
+       cursor->max_downscale = 1;
+       cursor->pipe = pipe;
+       cursor->plane = pipe;
+
+       drm_universal_plane_init(dev, &cursor->base, 0,
+                                &intel_cursor_plane_funcs,
+                                intel_cursor_formats,
+                                ARRAY_SIZE(intel_cursor_formats),
+                                DRM_PLANE_TYPE_CURSOR);
+       return &cursor->base;
+}
+
 static void intel_crtc_init(struct drm_device *dev, int pipe)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crtc *intel_crtc;
-       int i;
+       struct drm_plane *primary = NULL;
+       struct drm_plane *cursor = NULL;
+       int i, ret;
 
        intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
        if (intel_crtc == NULL)
                return;
 
-       drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs);
+       primary = intel_primary_plane_create(dev, pipe);
+       if (!primary)
+               goto fail;
+
+       cursor = intel_cursor_plane_create(dev, pipe);
+       if (!cursor)
+               goto fail;
+
+       ret = drm_crtc_init_with_planes(dev, &intel_crtc->base, primary,
+                                       cursor, &intel_crtc_funcs);
+       if (ret)
+               goto fail;
 
        drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256);
        for (i = 0; i < 256; i++) {
@@ -10980,7 +11689,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
 
        /*
         * On gen2/3 only plane A can do fbc, but the panel fitter and lvds port
-        * is hooked to plane B. Hence we want plane A feeding pipe B.
+        * is hooked to pipe B. Hence we want plane A feeding pipe B.
         */
        intel_crtc->pipe = pipe;
        intel_crtc->plane = pipe;
@@ -11002,6 +11711,14 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
        drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
 
        WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
+       return;
+
+fail:
+       if (primary)
+               drm_plane_cleanup(primary);
+       if (cursor)
+               drm_plane_cleanup(cursor);
+       kfree(intel_crtc);
 }
 
 enum pipe intel_get_pipe_from_connector(struct intel_connector *connector)
@@ -11236,6 +11953,8 @@ static void intel_setup_outputs(struct drm_device *dev)
        if (SUPPORTS_TV(dev))
                intel_tv_init(dev);
 
+       intel_edp_psr_init(dev);
+
        list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
                encoder->base.possible_crtcs = encoder->crtc_mask;
                encoder->base.possible_clones =
@@ -11249,11 +11968,14 @@ static void intel_setup_outputs(struct drm_device *dev)
 
 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
 {
+       struct drm_device *dev = fb->dev;
        struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
 
        drm_framebuffer_cleanup(fb);
+       mutex_lock(&dev->struct_mutex);
        WARN_ON(!intel_fb->obj->framebuffer_references--);
-       drm_gem_object_unreference_unlocked(&intel_fb->obj->base);
+       drm_gem_object_unreference(&intel_fb->obj->base);
+       mutex_unlock(&dev->struct_mutex);
        kfree(intel_fb);
 }
 
@@ -12266,7 +12988,6 @@ void intel_connector_unregister(struct intel_connector *intel_connector)
 void intel_modeset_cleanup(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_crtc *crtc;
        struct drm_connector *connector;
 
        /*
@@ -12286,14 +13007,6 @@ void intel_modeset_cleanup(struct drm_device *dev)
 
        intel_unregister_dsm_handler();
 
-       for_each_crtc(dev, crtc) {
-               /* Skip inactive CRTCs */
-               if (!crtc->primary->fb)
-                       continue;
-
-               intel_increase_pllclock(crtc);
-       }
-
        intel_disable_fbc(dev);
 
        intel_disable_gt_powersave(dev);
index 99f033f6918978df13e4d832286d9d987d2a79db..b5ec48913b4711066902003efa959d322659978c 100644 (file)
@@ -1613,11 +1613,9 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
        }
 }
 
-static bool is_edp_psr(struct drm_device *dev)
+static bool is_edp_psr(struct intel_dp *intel_dp)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
-       return dev_priv->psr.sink_support;
+       return intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED;
 }
 
 static bool intel_edp_is_psr_enabled(struct drm_device *dev)
@@ -1665,7 +1663,7 @@ static void intel_edp_psr_setup(struct intel_dp *intel_dp)
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct edp_vsc_psr psr_vsc;
 
-       if (intel_dp->psr_setup_done)
+       if (dev_priv->psr.setup_done)
                return;
 
        /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
@@ -1680,21 +1678,26 @@ static void intel_edp_psr_setup(struct intel_dp *intel_dp)
        I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP |
                   EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
 
-       intel_dp->psr_setup_done = true;
+       dev_priv->psr.setup_done = true;
 }
 
 static void intel_edp_psr_enable_sink(struct intel_dp *intel_dp)
 {
-       struct drm_device *dev = intel_dp_to_dev(intel_dp);
+       struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+       struct drm_device *dev = dig_port->base.base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        uint32_t aux_clock_divider;
        int precharge = 0x3;
        int msg_size = 5;       /* Header(4) + Message(1) */
+       bool only_standby = false;
 
        aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
 
+       if (IS_BROADWELL(dev) && dig_port->port != PORT_A)
+               only_standby = true;
+
        /* Enable PSR in sink */
-       if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT)
+       if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT || only_standby)
                drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
                                   DP_PSR_ENABLE & ~DP_PSR_MAIN_LINK_ACTIVE);
        else
@@ -1713,18 +1716,24 @@ static void intel_edp_psr_enable_sink(struct intel_dp *intel_dp)
 
 static void intel_edp_psr_enable_source(struct intel_dp *intel_dp)
 {
-       struct drm_device *dev = intel_dp_to_dev(intel_dp);
+       struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+       struct drm_device *dev = dig_port->base.base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        uint32_t max_sleep_time = 0x1f;
        uint32_t idle_frames = 1;
        uint32_t val = 0x0;
        const uint32_t link_entry_time = EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
+       bool only_standby = false;
+
+       if (IS_BROADWELL(dev) && dig_port->port != PORT_A)
+               only_standby = true;
 
-       if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT) {
+       if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT || only_standby) {
                val |= EDP_PSR_LINK_STANDBY;
                val |= EDP_PSR_TP2_TP3_TIME_0us;
                val |= EDP_PSR_TP1_TIME_0us;
                val |= EDP_PSR_SKIP_AUX_EXIT;
+               val |= IS_BROADWELL(dev) ? BDW_PSR_SINGLE_FRAME : 0;
        } else
                val |= EDP_PSR_LINK_DISABLE;
 
@@ -1752,8 +1761,8 @@ static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp)
                return false;
        }
 
-       if ((intel_encoder->type != INTEL_OUTPUT_EDP) ||
-           (dig_port->port != PORT_A)) {
+       if (IS_HASWELL(dev) && (intel_encoder->type != INTEL_OUTPUT_EDP ||
+                               dig_port->port != PORT_A)) {
                DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n");
                return false;
        }
@@ -1782,6 +1791,10 @@ static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp)
                return false;
        }
 
+       /* Below limitations aren't valid for Broadwell */
+       if (IS_BROADWELL(dev))
+               goto out;
+
        if (I915_READ(SPRCTL(intel_crtc->pipe)) & SPRITE_ENABLE) {
                DRM_DEBUG_KMS("PSR condition failed: Sprite is Enabled\n");
                return false;
@@ -1798,34 +1811,48 @@ static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp)
                return false;
        }
 
+ out:
        dev_priv->psr.source_ok = true;
        return true;
 }
 
 static void intel_edp_psr_do_enable(struct intel_dp *intel_dp)
 {
-       struct drm_device *dev = intel_dp_to_dev(intel_dp);
+       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+       struct drm_device *dev = intel_dig_port->base.base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
 
-       if (!intel_edp_psr_match_conditions(intel_dp) ||
-           intel_edp_is_psr_enabled(dev))
+       if (intel_edp_is_psr_enabled(dev))
                return;
 
-       /* Setup PSR once */
-       intel_edp_psr_setup(intel_dp);
-
        /* Enable PSR on the panel */
        intel_edp_psr_enable_sink(intel_dp);
 
        /* Enable PSR on the host */
        intel_edp_psr_enable_source(intel_dp);
+
+       dev_priv->psr.enabled = true;
+       dev_priv->psr.active = true;
 }
 
 void intel_edp_psr_enable(struct intel_dp *intel_dp)
 {
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
 
-       if (intel_edp_psr_match_conditions(intel_dp) &&
-           !intel_edp_is_psr_enabled(dev))
+       if (!HAS_PSR(dev)) {
+               DRM_DEBUG_KMS("PSR not supported on this platform\n");
+               return;
+       }
+
+       if (!is_edp_psr(intel_dp)) {
+               DRM_DEBUG_KMS("PSR not supported by this panel\n");
+               return;
+       }
+
+       /* Setup PSR once */
+       intel_edp_psr_setup(intel_dp);
+
+       if (intel_edp_psr_match_conditions(intel_dp))
                intel_edp_psr_do_enable(intel_dp);
 }
 
@@ -1834,7 +1861,7 @@ void intel_edp_psr_disable(struct intel_dp *intel_dp)
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
        struct drm_i915_private *dev_priv = dev->dev_private;
 
-       if (!intel_edp_is_psr_enabled(dev))
+       if (!dev_priv->psr.enabled)
                return;
 
        I915_WRITE(EDP_PSR_CTL(dev),
@@ -1844,10 +1871,15 @@ void intel_edp_psr_disable(struct intel_dp *intel_dp)
        if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev)) &
                       EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10))
                DRM_ERROR("Timed out waiting for PSR Idle State\n");
+
+       dev_priv->psr.enabled = false;
 }
 
-void intel_edp_psr_update(struct drm_device *dev)
+static void intel_edp_psr_work(struct work_struct *work)
 {
+       struct drm_i915_private *dev_priv =
+               container_of(work, typeof(*dev_priv), psr.work.work);
+       struct drm_device *dev = dev_priv->dev;
        struct intel_encoder *encoder;
        struct intel_dp *intel_dp = NULL;
 
@@ -1855,17 +1887,52 @@ void intel_edp_psr_update(struct drm_device *dev)
                if (encoder->type == INTEL_OUTPUT_EDP) {
                        intel_dp = enc_to_intel_dp(&encoder->base);
 
-                       if (!is_edp_psr(dev))
-                               return;
-
                        if (!intel_edp_psr_match_conditions(intel_dp))
                                intel_edp_psr_disable(intel_dp);
                        else
-                               if (!intel_edp_is_psr_enabled(dev))
-                                       intel_edp_psr_do_enable(intel_dp);
+                               intel_edp_psr_do_enable(intel_dp);
                }
 }
 
+static void intel_edp_psr_inactivate(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       dev_priv->psr.active = false;
+
+       I915_WRITE(EDP_PSR_CTL(dev), I915_READ(EDP_PSR_CTL(dev))
+                  & ~EDP_PSR_ENABLE);
+}
+
+void intel_edp_psr_exit(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       if (!HAS_PSR(dev))
+               return;
+
+       if (!dev_priv->psr.setup_done)
+               return;
+
+       cancel_delayed_work_sync(&dev_priv->psr.work);
+
+       if (dev_priv->psr.active)
+               intel_edp_psr_inactivate(dev);
+
+       schedule_delayed_work(&dev_priv->psr.work,
+                             msecs_to_jiffies(100));
+}
+
+void intel_edp_psr_init(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       if (!HAS_PSR(dev))
+               return;
+
+       INIT_DELAYED_WORK(&dev_priv->psr.work, intel_edp_psr_work);
+}
+
 static void intel_disable_dp(struct intel_encoder *encoder)
 {
        struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
@@ -2119,6 +2186,70 @@ static void chv_pre_enable_dp(struct intel_encoder *encoder)
        vlv_wait_port_ready(dev_priv, dport);
 }
 
+static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
+{
+       struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+       struct drm_device *dev = encoder->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_crtc *intel_crtc =
+               to_intel_crtc(encoder->base.crtc);
+       enum dpio_channel ch = vlv_dport_to_channel(dport);
+       enum pipe pipe = intel_crtc->pipe;
+       u32 val;
+
+       mutex_lock(&dev_priv->dpio_lock);
+
+       /* program left/right clock distribution */
+       if (pipe != PIPE_B) {
+               val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
+               val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
+               if (ch == DPIO_CH0)
+                       val |= CHV_BUFLEFTENA1_FORCE;
+               if (ch == DPIO_CH1)
+                       val |= CHV_BUFRIGHTENA1_FORCE;
+               vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
+       } else {
+               val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
+               val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
+               if (ch == DPIO_CH0)
+                       val |= CHV_BUFLEFTENA2_FORCE;
+               if (ch == DPIO_CH1)
+                       val |= CHV_BUFRIGHTENA2_FORCE;
+               vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
+       }
+
+       /* program clock channel usage */
+       val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
+       val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
+       if (pipe != PIPE_B)
+               val &= ~CHV_PCS_USEDCLKCHANNEL;
+       else
+               val |= CHV_PCS_USEDCLKCHANNEL;
+       vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
+
+       val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
+       val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
+       if (pipe != PIPE_B)
+               val &= ~CHV_PCS_USEDCLKCHANNEL;
+       else
+               val |= CHV_PCS_USEDCLKCHANNEL;
+       vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
+
+       /*
+        * This a a bit weird since generally CL
+        * matches the pipe, but here we need to
+        * pick the CL based on the port.
+        */
+       val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
+       if (pipe != PIPE_B)
+               val &= ~CHV_CMN_USEDCLKCHANNEL;
+       else
+               val |= CHV_CMN_USEDCLKCHANNEL;
+       vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
+
+       mutex_unlock(&dev_priv->dpio_lock);
+}
+
 /*
  * Native read with retry for link status and receiver capability reads for
  * cases where the sink may still be asleep.
@@ -2156,18 +2287,14 @@ intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_
                                       DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
 }
 
-/*
- * These are source-specific values; current Intel hardware supports
- * a maximum voltage of 800mV and a maximum pre-emphasis of 6dB
- */
-
+/* These are source-specific values. */
 static uint8_t
 intel_dp_voltage_max(struct intel_dp *intel_dp)
 {
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
        enum port port = dp_to_dig_port(intel_dp)->port;
 
-       if (IS_VALLEYVIEW(dev) || IS_BROADWELL(dev))
+       if (IS_VALLEYVIEW(dev))
                return DP_TRAIN_VOLTAGE_SWING_1200;
        else if (IS_GEN7(dev) && port == PORT_A)
                return DP_TRAIN_VOLTAGE_SWING_800;
@@ -2183,18 +2310,7 @@ intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
        enum port port = dp_to_dig_port(intel_dp)->port;
 
-       if (IS_BROADWELL(dev)) {
-               switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
-               case DP_TRAIN_VOLTAGE_SWING_400:
-               case DP_TRAIN_VOLTAGE_SWING_600:
-                       return DP_TRAIN_PRE_EMPHASIS_6;
-               case DP_TRAIN_VOLTAGE_SWING_800:
-                       return DP_TRAIN_PRE_EMPHASIS_3_5;
-               case DP_TRAIN_VOLTAGE_SWING_1200:
-               default:
-                       return DP_TRAIN_PRE_EMPHASIS_0;
-               }
-       } else if (IS_HASWELL(dev)) {
+       if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
                switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
                case DP_TRAIN_VOLTAGE_SWING_400:
                        return DP_TRAIN_PRE_EMPHASIS_9_5;
@@ -2666,41 +2782,6 @@ intel_hsw_signal_levels(uint8_t train_set)
        }
 }
 
-static uint32_t
-intel_bdw_signal_levels(uint8_t train_set)
-{
-       int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
-                                        DP_TRAIN_PRE_EMPHASIS_MASK);
-       switch (signal_levels) {
-       case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
-               return DDI_BUF_EMP_400MV_0DB_BDW;       /* Sel0 */
-       case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
-               return DDI_BUF_EMP_400MV_3_5DB_BDW;     /* Sel1 */
-       case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
-               return DDI_BUF_EMP_400MV_6DB_BDW;       /* Sel2 */
-
-       case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
-               return DDI_BUF_EMP_600MV_0DB_BDW;       /* Sel3 */
-       case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
-               return DDI_BUF_EMP_600MV_3_5DB_BDW;     /* Sel4 */
-       case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
-               return DDI_BUF_EMP_600MV_6DB_BDW;       /* Sel5 */
-
-       case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
-               return DDI_BUF_EMP_800MV_0DB_BDW;       /* Sel6 */
-       case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
-               return DDI_BUF_EMP_800MV_3_5DB_BDW;     /* Sel7 */
-
-       case DP_TRAIN_VOLTAGE_SWING_1200 | DP_TRAIN_PRE_EMPHASIS_0:
-               return DDI_BUF_EMP_1200MV_0DB_BDW;      /* Sel8 */
-
-       default:
-               DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
-                             "0x%x\n", signal_levels);
-               return DDI_BUF_EMP_400MV_0DB_BDW;       /* Sel0 */
-       }
-}
-
 /* Properly updates "DP" with the correct signal levels. */
 static void
 intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
@@ -2711,10 +2792,7 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
        uint32_t signal_levels, mask;
        uint8_t train_set = intel_dp->train_set[0];
 
-       if (IS_BROADWELL(dev)) {
-               signal_levels = intel_bdw_signal_levels(train_set);
-               mask = DDI_BUF_EMP_MASK;
-       } else if (IS_HASWELL(dev)) {
+       if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
                signal_levels = intel_hsw_signal_levels(train_set);
                mask = DDI_BUF_EMP_MASK;
        } else if (IS_CHERRYVIEW(dev)) {
@@ -4279,8 +4357,6 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
 
        intel_dp_aux_init(intel_dp, intel_connector);
 
-       intel_dp->psr_setup_done = false;
-
        if (!intel_edp_init_connector(intel_dp, intel_connector, &power_seq)) {
                drm_dp_aux_unregister(&intel_dp->aux);
                if (is_edp(intel_dp)) {
@@ -4337,6 +4413,7 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
        intel_encoder->get_hw_state = intel_dp_get_hw_state;
        intel_encoder->get_config = intel_dp_get_config;
        if (IS_CHERRYVIEW(dev)) {
+               intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
                intel_encoder->pre_enable = chv_pre_enable_dp;
                intel_encoder->enable = vlv_enable_dp;
                intel_encoder->post_disable = chv_post_disable_dp;
index eaa27ee9e3675606a0e2ef17f7f1134060394c68..5f7c7bd94d909fd8d6fbf8e5d8e7b3a8435c7023 100644 (file)
@@ -358,6 +358,11 @@ struct intel_pipe_wm {
        bool sprites_scaled;
 };
 
+struct intel_mmio_flip {
+       u32 seqno;
+       u32 ring_id;
+};
+
 struct intel_crtc {
        struct drm_crtc base;
        enum pipe pipe;
@@ -384,7 +389,6 @@ struct intel_crtc {
 
        struct drm_i915_gem_object *cursor_bo;
        uint32_t cursor_addr;
-       int16_t cursor_x, cursor_y;
        int16_t cursor_width, cursor_height;
        uint32_t cursor_cntl;
        uint32_t cursor_base;
@@ -412,6 +416,7 @@ struct intel_crtc {
        wait_queue_head_t vbl_wait;
 
        int scanline_offset;
+       struct intel_mmio_flip mmio_flip;
 };
 
 struct intel_plane_wm_parameters {
@@ -428,7 +433,6 @@ struct intel_plane {
        struct drm_i915_gem_object *obj;
        bool can_scale;
        int max_downscale;
-       u32 lut_r[1024], lut_g[1024], lut_b[1024];
        int crtc_x, crtc_y;
        unsigned int crtc_w, crtc_h;
        uint32_t src_x, src_y;
@@ -537,7 +541,6 @@ struct intel_dp {
        unsigned long last_power_cycle;
        unsigned long last_power_on;
        unsigned long last_backlight_off;
-       bool psr_setup_done;
        bool use_tps3;
        struct intel_connector *attached_connector;
 
@@ -721,8 +724,33 @@ bool intel_has_pending_fb_unpin(struct drm_device *dev);
 int intel_pch_rawclk(struct drm_device *dev);
 int valleyview_cur_cdclk(struct drm_i915_private *dev_priv);
 void intel_mark_busy(struct drm_device *dev);
-void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
-                       struct intel_engine_cs *ring);
+void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
+                            struct intel_engine_cs *ring);
+void intel_frontbuffer_flip_prepare(struct drm_device *dev,
+                                   unsigned frontbuffer_bits);
+void intel_frontbuffer_flip_complete(struct drm_device *dev,
+                                    unsigned frontbuffer_bits);
+void intel_frontbuffer_flush(struct drm_device *dev,
+                            unsigned frontbuffer_bits);
+/**
+ * intel_frontbuffer_flip - prepare frontbuffer flip
+ * @dev: DRM device
+ * @frontbuffer_bits: frontbuffer plane tracking bits
+ *
+ * This function gets called after scheduling a flip on @obj. This is for
+ * synchronous plane updates which will happen on the next vblank and which will
+ * not get delayed by pending gpu rendering.
+ *
+ * Can be called without any locks held.
+ */
+static inline
+void intel_frontbuffer_flip(struct drm_device *dev,
+                           unsigned frontbuffer_bits)
+{
+       intel_frontbuffer_flush(dev, frontbuffer_bits);
+}
+
+void intel_fb_obj_flush(struct drm_i915_gem_object *obj, bool retire);
 void intel_mark_idle(struct drm_device *dev);
 void intel_crtc_restore_mode(struct drm_crtc *crtc);
 void intel_crtc_update_dpms(struct drm_crtc *crtc);
@@ -831,11 +859,13 @@ void intel_edp_panel_on(struct intel_dp *intel_dp);
 void intel_edp_panel_off(struct intel_dp *intel_dp);
 void intel_edp_psr_enable(struct intel_dp *intel_dp);
 void intel_edp_psr_disable(struct intel_dp *intel_dp);
-void intel_edp_psr_update(struct drm_device *dev);
 void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate);
+void intel_edp_psr_exit(struct drm_device *dev);
+void intel_edp_psr_init(struct drm_device *dev);
+
 
 /* intel_dsi.c */
-bool intel_dsi_init(struct drm_device *dev);
+void intel_dsi_init(struct drm_device *dev);
 
 
 /* intel_dvo.c */
@@ -961,6 +991,7 @@ void intel_init_gt_powersave(struct drm_device *dev);
 void intel_cleanup_gt_powersave(struct drm_device *dev);
 void intel_enable_gt_powersave(struct drm_device *dev);
 void intel_disable_gt_powersave(struct drm_device *dev);
+void intel_suspend_gt_powersave(struct drm_device *dev);
 void intel_reset_gt_powersave(struct drm_device *dev);
 void ironlake_teardown_rc6(struct drm_device *dev);
 void gen6_update_ring_freq(struct drm_device *dev);
index 7c07ee07a8ee69f8f4e3636da4ca43986cbf9849..2ee1722c0af469c62bb461fd1dab17ac3705bb95 100644 (file)
@@ -657,7 +657,7 @@ static const struct drm_connector_funcs intel_dsi_connector_funcs = {
        .fill_modes = drm_helper_probe_single_connector_modes,
 };
 
-bool intel_dsi_init(struct drm_device *dev)
+void intel_dsi_init(struct drm_device *dev)
 {
        struct intel_dsi *intel_dsi;
        struct intel_encoder *intel_encoder;
@@ -673,29 +673,29 @@ bool intel_dsi_init(struct drm_device *dev)
 
        /* There is no detection method for MIPI so rely on VBT */
        if (!dev_priv->vbt.has_mipi)
-               return false;
+               return;
+
+       if (IS_VALLEYVIEW(dev)) {
+               dev_priv->mipi_mmio_base = VLV_MIPI_BASE;
+       } else {
+               DRM_ERROR("Unsupported Mipi device to reg base");
+               return;
+       }
 
        intel_dsi = kzalloc(sizeof(*intel_dsi), GFP_KERNEL);
        if (!intel_dsi)
-               return false;
+               return;
 
        intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
        if (!intel_connector) {
                kfree(intel_dsi);
-               return false;
+               return;
        }
 
        intel_encoder = &intel_dsi->base;
        encoder = &intel_encoder->base;
        intel_dsi->attached_connector = intel_connector;
 
-       if (IS_VALLEYVIEW(dev)) {
-               dev_priv->mipi_mmio_base = VLV_MIPI_BASE;
-       } else {
-               DRM_ERROR("Unsupported Mipi device to reg base");
-               return false;
-       }
-
        connector = &intel_connector->base;
 
        drm_encoder_init(dev, encoder, &intel_dsi_funcs, DRM_MODE_ENCODER_DSI);
@@ -753,12 +753,10 @@ bool intel_dsi_init(struct drm_device *dev)
        fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
        intel_panel_init(&intel_connector->panel, fixed_mode, NULL);
 
-       return true;
+       return;
 
 err:
        drm_encoder_cleanup(&intel_encoder->base);
        kfree(intel_dsi);
        kfree(intel_connector);
-
-       return false;
 }
index 21a0d348cedc2a716e79105557215a1736d2ac76..47c7584a4aa0336938bb1150cc141e0e1063bfee 100644 (file)
@@ -143,7 +143,7 @@ static u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi, u8 *data)
        case MIPI_DSI_DCS_LONG_WRITE:
                dsi_vc_dcs_write(intel_dsi, vc, data, len);
                break;
-       };
+       }
 
        data += len;
 
@@ -294,7 +294,8 @@ static bool generic_init(struct intel_dsi_device *dsi)
        intel_dsi->rst_timer_val = mipi_config->device_reset_timer;
        intel_dsi->init_count = mipi_config->master_init_timer;
        intel_dsi->bw_timer = mipi_config->dbi_bw_timer;
-       intel_dsi->video_frmt_cfg_bits = mipi_config->bta_enabled ? DISABLE_VIDEO_BTA : 0;
+       intel_dsi->video_frmt_cfg_bits =
+               mipi_config->bta_enabled ? DISABLE_VIDEO_BTA : 0;
 
        switch (intel_dsi->escape_clk_div) {
        case 0:
@@ -351,7 +352,8 @@ static bool generic_init(struct intel_dsi_device *dsi)
         *
         * prepare count
         */
-       ths_prepare_ns = max(mipi_config->ths_prepare, mipi_config->tclk_prepare);
+       ths_prepare_ns = max(mipi_config->ths_prepare,
+                            mipi_config->tclk_prepare);
        prepare_cnt = DIV_ROUND_UP(ths_prepare_ns * ui_den, ui_num * 2);
 
        /* exit zero count */
index 3fb71a04e14f555761e77b4266990f866d182c00..56b47d2ffaf7ea0227fced60ee4ae8979e9803ec 100644 (file)
@@ -112,7 +112,15 @@ static struct intel_dvo *intel_attached_dvo(struct drm_connector *connector)
 
 static bool intel_dvo_connector_get_hw_state(struct intel_connector *connector)
 {
+       struct drm_device *dev = connector->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_dvo *intel_dvo = intel_attached_dvo(&connector->base);
+       u32 tmp;
+
+       tmp = I915_READ(intel_dvo->dev.dvo_reg);
+
+       if (!(tmp & DVO_ENABLE))
+               return false;
 
        return intel_dvo->dev.dev_ops->get_hw_state(&intel_dvo->dev);
 }
index 347d16220cd053dae98cca1b899525fe955b8dc1..44e17fd781b8c8d73f8dc4f9373295d13680cfe5 100644 (file)
 #include <drm/i915_drm.h>
 #include "i915_drv.h"
 
+static int intel_fbdev_set_par(struct fb_info *info)
+{
+       struct drm_fb_helper *fb_helper = info->par;
+       struct intel_fbdev *ifbdev =
+               container_of(fb_helper, struct intel_fbdev, helper);
+       int ret;
+
+       ret = drm_fb_helper_set_par(info);
+
+       if (ret == 0) {
+               /*
+                * FIXME: fbdev presumes that all callbacks also work from
+                * atomic contexts and relies on that for emergency oops
+                * printing. KMS totally doesn't do that and the locking here is
+                * by far not the only place this goes wrong.  Ignore this for
+                * now until we solve this for real.
+                */
+               mutex_lock(&fb_helper->dev->struct_mutex);
+               ret = i915_gem_object_set_to_gtt_domain(ifbdev->fb->obj,
+                                                       true);
+               mutex_unlock(&fb_helper->dev->struct_mutex);
+       }
+
+       return ret;
+}
+
 static struct fb_ops intelfb_ops = {
        .owner = THIS_MODULE,
        .fb_check_var = drm_fb_helper_check_var,
-       .fb_set_par = drm_fb_helper_set_par,
+       .fb_set_par = intel_fbdev_set_par,
        .fb_fillrect = cfb_fillrect,
        .fb_copyarea = cfb_copyarea,
        .fb_imageblit = cfb_imageblit,
@@ -417,7 +443,7 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
                }
                crtcs[i] = new_crtc;
 
-               DRM_DEBUG_KMS("connector %s on pipe %d [CRTC:%d]: %dx%d%s\n",
+               DRM_DEBUG_KMS("connector %s on pipe %c [CRTC:%d]: %dx%d%s\n",
                              connector->name,
                              pipe_name(to_intel_crtc(encoder->crtc)->pipe),
                              encoder->crtc->base.id,
index 0b603102cb3be10a3917b0316db0f9699f9e57d7..24224131ebf16d44ef6d6dacdf4a5a95ac298b8a 100644 (file)
@@ -1229,6 +1229,70 @@ static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder)
        mutex_unlock(&dev_priv->dpio_lock);
 }
 
+static void chv_hdmi_pre_pll_enable(struct intel_encoder *encoder)
+{
+       struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+       struct drm_device *dev = encoder->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_crtc *intel_crtc =
+               to_intel_crtc(encoder->base.crtc);
+       enum dpio_channel ch = vlv_dport_to_channel(dport);
+       enum pipe pipe = intel_crtc->pipe;
+       u32 val;
+
+       mutex_lock(&dev_priv->dpio_lock);
+
+       /* program left/right clock distribution */
+       if (pipe != PIPE_B) {
+               val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
+               val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
+               if (ch == DPIO_CH0)
+                       val |= CHV_BUFLEFTENA1_FORCE;
+               if (ch == DPIO_CH1)
+                       val |= CHV_BUFRIGHTENA1_FORCE;
+               vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
+       } else {
+               val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
+               val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
+               if (ch == DPIO_CH0)
+                       val |= CHV_BUFLEFTENA2_FORCE;
+               if (ch == DPIO_CH1)
+                       val |= CHV_BUFRIGHTENA2_FORCE;
+               vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
+       }
+
+       /* program clock channel usage */
+       val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
+       val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
+       if (pipe != PIPE_B)
+               val &= ~CHV_PCS_USEDCLKCHANNEL;
+       else
+               val |= CHV_PCS_USEDCLKCHANNEL;
+       vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
+
+       val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
+       val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
+       if (pipe != PIPE_B)
+               val &= ~CHV_PCS_USEDCLKCHANNEL;
+       else
+               val |= CHV_PCS_USEDCLKCHANNEL;
+       vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
+
+       /*
+        * This a a bit weird since generally CL
+        * matches the pipe, but here we need to
+        * pick the CL based on the port.
+        */
+       val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
+       if (pipe != PIPE_B)
+               val &= ~CHV_CMN_USEDCLKCHANNEL;
+       else
+               val |= CHV_CMN_USEDCLKCHANNEL;
+       vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
+
+       mutex_unlock(&dev_priv->dpio_lock);
+}
+
 static void vlv_hdmi_post_disable(struct intel_encoder *encoder)
 {
        struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
@@ -1528,6 +1592,7 @@ void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port)
        intel_encoder->get_hw_state = intel_hdmi_get_hw_state;
        intel_encoder->get_config = intel_hdmi_get_config;
        if (IS_CHERRYVIEW(dev)) {
+               intel_encoder->pre_pll_enable = chv_hdmi_pre_pll_enable;
                intel_encoder->pre_enable = chv_hdmi_pre_enable;
                intel_encoder->enable = vlv_enable_hdmi;
                intel_encoder->post_disable = chv_hdmi_post_disable;
index daa118978eec725b471d1c3dbb3933f52f85c1b5..307c2f1842b77c537950743fcafa32629689a73a 100644 (file)
@@ -415,6 +415,10 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
        }
 
        intel_overlay_release_old_vid_tail(overlay);
+
+
+       i915_gem_track_fb(overlay->old_vid_bo, NULL,
+                         INTEL_FRONTBUFFER_OVERLAY(overlay->crtc->pipe));
        return 0;
 }
 
@@ -686,6 +690,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
        bool scale_changed = false;
        struct drm_device *dev = overlay->dev;
        u32 swidth, swidthsw, sheight, ostride;
+       enum pipe pipe = overlay->crtc->pipe;
 
        BUG_ON(!mutex_is_locked(&dev->struct_mutex));
        BUG_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
@@ -713,7 +718,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
                oconfig = OCONF_CC_OUT_8BIT;
                if (IS_GEN4(overlay->dev))
                        oconfig |= OCONF_CSC_MODE_BT709;
-               oconfig |= overlay->crtc->pipe == 0 ?
+               oconfig |= pipe == 0 ?
                        OCONF_PIPE_A : OCONF_PIPE_B;
                iowrite32(oconfig, &regs->OCONFIG);
                intel_overlay_unmap_regs(overlay, regs);
@@ -776,9 +781,15 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
        if (ret)
                goto out_unpin;
 
+       i915_gem_track_fb(overlay->vid_bo, new_bo,
+                         INTEL_FRONTBUFFER_OVERLAY(pipe));
+
        overlay->old_vid_bo = overlay->vid_bo;
        overlay->vid_bo = new_bo;
 
+       intel_frontbuffer_flip(dev,
+                              INTEL_FRONTBUFFER_OVERLAY(pipe));
+
        return 0;
 
 out_unpin:
index ee72807069e4ad54a4b8ac2b4d8e601d6127abaa..b6e09f226230ddbe6c8446b7f562197d52d39496 100644 (file)
@@ -529,7 +529,10 @@ void intel_update_fbc(struct drm_device *dev)
                goto out_disable;
        }
 
-       if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
+       if (INTEL_INFO(dev)->gen >= 8 || IS_HASWELL(dev)) {
+               max_width = 4096;
+               max_height = 4096;
+       } else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
                max_width = 4096;
                max_height = 2048;
        } else {
@@ -864,95 +867,95 @@ static int i845_get_fifo_size(struct drm_device *dev, int plane)
 
 /* Pineview has different values for various configs */
 static const struct intel_watermark_params pineview_display_wm = {
-       PINEVIEW_DISPLAY_FIFO,
-       PINEVIEW_MAX_WM,
-       PINEVIEW_DFT_WM,
-       PINEVIEW_GUARD_WM,
-       PINEVIEW_FIFO_LINE_SIZE
+       .fifo_size = PINEVIEW_DISPLAY_FIFO,
+       .max_wm = PINEVIEW_MAX_WM,
+       .default_wm = PINEVIEW_DFT_WM,
+       .guard_size = PINEVIEW_GUARD_WM,
+       .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
 };
 static const struct intel_watermark_params pineview_display_hplloff_wm = {
-       PINEVIEW_DISPLAY_FIFO,
-       PINEVIEW_MAX_WM,
-       PINEVIEW_DFT_HPLLOFF_WM,
-       PINEVIEW_GUARD_WM,
-       PINEVIEW_FIFO_LINE_SIZE
+       .fifo_size = PINEVIEW_DISPLAY_FIFO,
+       .max_wm = PINEVIEW_MAX_WM,
+       .default_wm = PINEVIEW_DFT_HPLLOFF_WM,
+       .guard_size = PINEVIEW_GUARD_WM,
+       .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
 };
 static const struct intel_watermark_params pineview_cursor_wm = {
-       PINEVIEW_CURSOR_FIFO,
-       PINEVIEW_CURSOR_MAX_WM,
-       PINEVIEW_CURSOR_DFT_WM,
-       PINEVIEW_CURSOR_GUARD_WM,
-       PINEVIEW_FIFO_LINE_SIZE,
+       .fifo_size = PINEVIEW_CURSOR_FIFO,
+       .max_wm = PINEVIEW_CURSOR_MAX_WM,
+       .default_wm = PINEVIEW_CURSOR_DFT_WM,
+       .guard_size = PINEVIEW_CURSOR_GUARD_WM,
+       .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
 };
 static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
-       PINEVIEW_CURSOR_FIFO,
-       PINEVIEW_CURSOR_MAX_WM,
-       PINEVIEW_CURSOR_DFT_WM,
-       PINEVIEW_CURSOR_GUARD_WM,
-       PINEVIEW_FIFO_LINE_SIZE
+       .fifo_size = PINEVIEW_CURSOR_FIFO,
+       .max_wm = PINEVIEW_CURSOR_MAX_WM,
+       .default_wm = PINEVIEW_CURSOR_DFT_WM,
+       .guard_size = PINEVIEW_CURSOR_GUARD_WM,
+       .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
 };
 static const struct intel_watermark_params g4x_wm_info = {
-       G4X_FIFO_SIZE,
-       G4X_MAX_WM,
-       G4X_MAX_WM,
-       2,
-       G4X_FIFO_LINE_SIZE,
+       .fifo_size = G4X_FIFO_SIZE,
+       .max_wm = G4X_MAX_WM,
+       .default_wm = G4X_MAX_WM,
+       .guard_size = 2,
+       .cacheline_size = G4X_FIFO_LINE_SIZE,
 };
 static const struct intel_watermark_params g4x_cursor_wm_info = {
-       I965_CURSOR_FIFO,
-       I965_CURSOR_MAX_WM,
-       I965_CURSOR_DFT_WM,
-       2,
-       G4X_FIFO_LINE_SIZE,
+       .fifo_size = I965_CURSOR_FIFO,
+       .max_wm = I965_CURSOR_MAX_WM,
+       .default_wm = I965_CURSOR_DFT_WM,
+       .guard_size = 2,
+       .cacheline_size = G4X_FIFO_LINE_SIZE,
 };
 static const struct intel_watermark_params valleyview_wm_info = {
-       VALLEYVIEW_FIFO_SIZE,
-       VALLEYVIEW_MAX_WM,
-       VALLEYVIEW_MAX_WM,
-       2,
-       G4X_FIFO_LINE_SIZE,
+       .fifo_size = VALLEYVIEW_FIFO_SIZE,
+       .max_wm = VALLEYVIEW_MAX_WM,
+       .default_wm = VALLEYVIEW_MAX_WM,
+       .guard_size = 2,
+       .cacheline_size = G4X_FIFO_LINE_SIZE,
 };
 static const struct intel_watermark_params valleyview_cursor_wm_info = {
-       I965_CURSOR_FIFO,
-       VALLEYVIEW_CURSOR_MAX_WM,
-       I965_CURSOR_DFT_WM,
-       2,
-       G4X_FIFO_LINE_SIZE,
+       .fifo_size = I965_CURSOR_FIFO,
+       .max_wm = VALLEYVIEW_CURSOR_MAX_WM,
+       .default_wm = I965_CURSOR_DFT_WM,
+       .guard_size = 2,
+       .cacheline_size = G4X_FIFO_LINE_SIZE,
 };
 static const struct intel_watermark_params i965_cursor_wm_info = {
-       I965_CURSOR_FIFO,
-       I965_CURSOR_MAX_WM,
-       I965_CURSOR_DFT_WM,
-       2,
-       I915_FIFO_LINE_SIZE,
+       .fifo_size = I965_CURSOR_FIFO,
+       .max_wm = I965_CURSOR_MAX_WM,
+       .default_wm = I965_CURSOR_DFT_WM,
+       .guard_size = 2,
+       .cacheline_size = I915_FIFO_LINE_SIZE,
 };
 static const struct intel_watermark_params i945_wm_info = {
-       I945_FIFO_SIZE,
-       I915_MAX_WM,
-       1,
-       2,
-       I915_FIFO_LINE_SIZE
+       .fifo_size = I945_FIFO_SIZE,
+       .max_wm = I915_MAX_WM,
+       .default_wm = 1,
+       .guard_size = 2,
+       .cacheline_size = I915_FIFO_LINE_SIZE,
 };
 static const struct intel_watermark_params i915_wm_info = {
-       I915_FIFO_SIZE,
-       I915_MAX_WM,
-       1,
-       2,
-       I915_FIFO_LINE_SIZE
+       .fifo_size = I915_FIFO_SIZE,
+       .max_wm = I915_MAX_WM,
+       .default_wm = 1,
+       .guard_size = 2,
+       .cacheline_size = I915_FIFO_LINE_SIZE,
 };
 static const struct intel_watermark_params i830_wm_info = {
-       I855GM_FIFO_SIZE,
-       I915_MAX_WM,
-       1,
-       2,
-       I830_FIFO_LINE_SIZE
+       .fifo_size = I855GM_FIFO_SIZE,
+       .max_wm = I915_MAX_WM,
+       .default_wm = 1,
+       .guard_size = 2,
+       .cacheline_size = I830_FIFO_LINE_SIZE,
 };
 static const struct intel_watermark_params i845_wm_info = {
-       I830_FIFO_SIZE,
-       I915_MAX_WM,
-       1,
-       2,
-       I830_FIFO_LINE_SIZE
+       .fifo_size = I830_FIFO_SIZE,
+       .max_wm = I915_MAX_WM,
+       .default_wm = 1,
+       .guard_size = 2,
+       .cacheline_size = I830_FIFO_LINE_SIZE,
 };
 
 /**
@@ -3348,6 +3351,13 @@ static void gen6_disable_rps(struct drm_device *dev)
                gen6_disable_rps_interrupts(dev);
 }
 
+static void cherryview_disable_rps(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       I915_WRITE(GEN6_RC_CONTROL, 0);
+}
+
 static void valleyview_disable_rps(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3727,6 +3737,35 @@ void gen6_update_ring_freq(struct drm_device *dev)
        mutex_unlock(&dev_priv->rps.hw_lock);
 }
 
+int cherryview_rps_max_freq(struct drm_i915_private *dev_priv)
+{
+       u32 val, rp0;
+
+       val = vlv_punit_read(dev_priv, PUNIT_GPU_STATUS_REG);
+       rp0 = (val >> PUNIT_GPU_STATUS_MAX_FREQ_SHIFT) & PUNIT_GPU_STATUS_MAX_FREQ_MASK;
+
+       return rp0;
+}
+
+static int cherryview_rps_rpe_freq(struct drm_i915_private *dev_priv)
+{
+       u32 val, rpe;
+
+       val = vlv_punit_read(dev_priv, PUNIT_GPU_DUTYCYCLE_REG);
+       rpe = (val >> PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT) & PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK;
+
+       return rpe;
+}
+
+int cherryview_rps_min_freq(struct drm_i915_private *dev_priv)
+{
+       u32 val, rpn;
+
+       val = vlv_punit_read(dev_priv, PUNIT_GPU_STATUS_REG);
+       rpn = (val >> PUNIT_GPU_STATIS_GFX_MIN_FREQ_SHIFT) & PUNIT_GPU_STATUS_GFX_MIN_FREQ_MASK;
+       return rpn;
+}
+
 int valleyview_rps_max_freq(struct drm_i915_private *dev_priv)
 {
        u32 val, rp0;
@@ -3766,6 +3805,35 @@ static void valleyview_check_pctx(struct drm_i915_private *dev_priv)
                             dev_priv->vlv_pctx->stolen->start);
 }
 
+
+/* Check that the pcbr address is not empty. */
+static void cherryview_check_pctx(struct drm_i915_private *dev_priv)
+{
+       unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
+
+       WARN_ON((pctx_addr >> VLV_PCBR_ADDR_SHIFT) == 0);
+}
+
+static void cherryview_setup_pctx(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       unsigned long pctx_paddr, paddr;
+       struct i915_gtt *gtt = &dev_priv->gtt;
+       u32 pcbr;
+       int pctx_size = 32*1024;
+
+       WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
+       pcbr = I915_READ(VLV_PCBR);
+       if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) {
+               paddr = (dev_priv->mm.stolen_base +
+                        (gtt->stolen_size - pctx_size));
+
+               pctx_paddr = (paddr & (~4095));
+               I915_WRITE(VLV_PCBR, pctx_paddr);
+       }
+}
+
 static void valleyview_setup_pctx(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3855,11 +3923,135 @@ static void valleyview_init_gt_powersave(struct drm_device *dev)
        mutex_unlock(&dev_priv->rps.hw_lock);
 }
 
+static void cherryview_init_gt_powersave(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       cherryview_setup_pctx(dev);
+
+       mutex_lock(&dev_priv->rps.hw_lock);
+
+       dev_priv->rps.max_freq = cherryview_rps_max_freq(dev_priv);
+       dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
+       DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
+                        vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq),
+                        dev_priv->rps.max_freq);
+
+       dev_priv->rps.efficient_freq = cherryview_rps_rpe_freq(dev_priv);
+       DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
+                        vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
+                        dev_priv->rps.efficient_freq);
+
+       dev_priv->rps.min_freq = cherryview_rps_min_freq(dev_priv);
+       DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
+                        vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq),
+                        dev_priv->rps.min_freq);
+
+       /* Preserve min/max settings in case of re-init */
+       if (dev_priv->rps.max_freq_softlimit == 0)
+               dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
+
+       if (dev_priv->rps.min_freq_softlimit == 0)
+               dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
+
+       mutex_unlock(&dev_priv->rps.hw_lock);
+}
+
 static void valleyview_cleanup_gt_powersave(struct drm_device *dev)
 {
        valleyview_cleanup_pctx(dev);
 }
 
+static void cherryview_enable_rps(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_engine_cs *ring;
+       u32 gtfifodbg, val, rc6_mode = 0, pcbr;
+       int i;
+
+       WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+
+       gtfifodbg = I915_READ(GTFIFODBG);
+       if (gtfifodbg) {
+               DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
+                                gtfifodbg);
+               I915_WRITE(GTFIFODBG, gtfifodbg);
+       }
+
+       cherryview_check_pctx(dev_priv);
+
+       /* 1a & 1b: Get forcewake during program sequence. Although the driver
+        * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
+       gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
+
+       /* 2a: Program RC6 thresholds.*/
+       I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
+       I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
+       I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
+
+       for_each_ring(ring, dev_priv, i)
+               I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
+       I915_WRITE(GEN6_RC_SLEEP, 0);
+
+       I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
+
+       /* allows RC6 residency counter to work */
+       I915_WRITE(VLV_COUNTER_CONTROL,
+                  _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
+                                     VLV_MEDIA_RC6_COUNT_EN |
+                                     VLV_RENDER_RC6_COUNT_EN));
+
+       /* For now we assume BIOS is allocating and populating the PCBR  */
+       pcbr = I915_READ(VLV_PCBR);
+
+       DRM_DEBUG_DRIVER("PCBR offset : 0x%x\n", pcbr);
+
+       /* 3: Enable RC6 */
+       if ((intel_enable_rc6(dev) & INTEL_RC6_ENABLE) &&
+                                               (pcbr >> VLV_PCBR_ADDR_SHIFT))
+               rc6_mode = GEN6_RC_CTL_EI_MODE(1);
+
+       I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
+
+       /* 4 Program defaults and thresholds for RPS*/
+       I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
+       I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
+       I915_WRITE(GEN6_RP_UP_EI, 66000);
+       I915_WRITE(GEN6_RP_DOWN_EI, 350000);
+
+       I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
+
+       /* WaDisablePwrmtrEvent:chv (pre-production hw) */
+       I915_WRITE(0xA80C, I915_READ(0xA80C) & 0x00ffffff);
+       I915_WRITE(0xA810, I915_READ(0xA810) & 0xffffff00);
+
+       /* 5: Enable RPS */
+       I915_WRITE(GEN6_RP_CONTROL,
+                  GEN6_RP_MEDIA_HW_NORMAL_MODE |
+                  GEN6_RP_MEDIA_IS_GFX | /* WaSetMaskForGfxBusyness:chv (pre-production hw ?) */
+                  GEN6_RP_ENABLE |
+                  GEN6_RP_UP_BUSY_AVG |
+                  GEN6_RP_DOWN_IDLE_AVG);
+
+       val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
+
+       DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 0x10 ? "yes" : "no");
+       DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
+
+       dev_priv->rps.cur_freq = (val >> 8) & 0xff;
+       DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
+                        vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
+                        dev_priv->rps.cur_freq);
+
+       DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
+                        vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
+                        dev_priv->rps.efficient_freq);
+
+       valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq);
+
+       gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
+}
+
 static void valleyview_enable_rps(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4668,33 +4860,57 @@ void intel_init_gt_powersave(struct drm_device *dev)
 {
        i915.enable_rc6 = sanitize_rc6_option(dev, i915.enable_rc6);
 
-       if (IS_VALLEYVIEW(dev))
+       if (IS_CHERRYVIEW(dev))
+               cherryview_init_gt_powersave(dev);
+       else if (IS_VALLEYVIEW(dev))
                valleyview_init_gt_powersave(dev);
 }
 
 void intel_cleanup_gt_powersave(struct drm_device *dev)
 {
-       if (IS_VALLEYVIEW(dev))
+       if (IS_CHERRYVIEW(dev))
+               return;
+       else if (IS_VALLEYVIEW(dev))
                valleyview_cleanup_gt_powersave(dev);
 }
 
+/**
+ * intel_suspend_gt_powersave - suspend PM work and helper threads
+ * @dev: drm device
+ *
+ * We don't want to disable RC6 or other features here, we just want
+ * to make sure any work we've queued has finished and won't bother
+ * us while we're suspended.
+ */
+void intel_suspend_gt_powersave(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       /* Interrupts should be disabled already to avoid re-arming. */
+       WARN_ON(dev->irq_enabled && !dev_priv->pm.irqs_disabled);
+
+       flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
+       cancel_work_sync(&dev_priv->rps.work);
+}
+
 void intel_disable_gt_powersave(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
 
        /* Interrupts should be disabled already to avoid re-arming. */
-       WARN_ON(dev->irq_enabled);
+       WARN_ON(dev->irq_enabled && !dev_priv->pm.irqs_disabled);
 
        if (IS_IRONLAKE_M(dev)) {
                ironlake_disable_drps(dev);
                ironlake_disable_rc6(dev);
-       } else if (IS_GEN6(dev) || IS_GEN7(dev) || IS_BROADWELL(dev)) {
-               if (cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work))
-                       intel_runtime_pm_put(dev_priv);
+       } else if (INTEL_INFO(dev)->gen >= 6) {
+               intel_suspend_gt_powersave(dev);
 
-               cancel_work_sync(&dev_priv->rps.work);
                mutex_lock(&dev_priv->rps.hw_lock);
-               if (IS_VALLEYVIEW(dev))
+               if (IS_CHERRYVIEW(dev))
+                       cherryview_disable_rps(dev);
+               else if (IS_VALLEYVIEW(dev))
                        valleyview_disable_rps(dev);
                else
                        gen6_disable_rps(dev);
@@ -4712,7 +4928,9 @@ static void intel_gen6_powersave_work(struct work_struct *work)
 
        mutex_lock(&dev_priv->rps.hw_lock);
 
-       if (IS_VALLEYVIEW(dev)) {
+       if (IS_CHERRYVIEW(dev)) {
+               cherryview_enable_rps(dev);
+       } else if (IS_VALLEYVIEW(dev)) {
                valleyview_enable_rps(dev);
        } else if (IS_BROADWELL(dev)) {
                gen8_enable_rps(dev);
@@ -4737,7 +4955,7 @@ void intel_enable_gt_powersave(struct drm_device *dev)
                ironlake_enable_rc6(dev);
                intel_init_emon(dev);
                mutex_unlock(&dev->struct_mutex);
-       } else if (IS_GEN6(dev) || IS_GEN7(dev) || IS_BROADWELL(dev)) {
+       } else if (INTEL_INFO(dev)->gen >= 6) {
                /*
                 * PCU communication is slow and this doesn't need to be
                 * done at any specific time, so do this out of our fast path
index a5e783a9928a40fff80297cf71725151fbdae30f..fd4f66231d30edec7d96236fac145e2b00b7b0c5 100644 (file)
@@ -28,7 +28,6 @@
 
 struct intel_renderstate_rodata {
        const u32 *reloc;
-       const u32 reloc_items;
        const u32 *batch;
        const u32 batch_items;
 };
@@ -40,7 +39,6 @@ extern const struct intel_renderstate_rodata gen8_null_state;
 #define RO_RENDERSTATE(_g)                                             \
        const struct intel_renderstate_rodata gen ## _g ## _null_state = { \
                .reloc = gen ## _g ## _null_state_relocs,               \
-               .reloc_items = sizeof(gen ## _g ## _null_state_relocs)/4, \
                .batch = gen ## _g ## _null_state_batch,                \
                .batch_items = sizeof(gen ## _g ## _null_state_batch)/4, \
        }
index 740538ad09771638b6a184331a0c357cdaebc8ec..56c1429d8a60d2dadaaf3b95c2e7d74b13fc1cb4 100644 (file)
@@ -6,6 +6,7 @@ static const u32 gen6_null_state_relocs[] = {
        0x0000002c,
        0x000001e0,
        0x000001e4,
+       -1,
 };
 
 static const u32 gen6_null_state_batch[] = {
index 6fa7ff2a12983d847c17380f55c22bb9fb87d477..419e35a7b0ff391289c1dff980a90ea7468b7010 100644 (file)
@@ -5,6 +5,7 @@ static const u32 gen7_null_state_relocs[] = {
        0x00000010,
        0x00000018,
        0x000001ec,
+       -1,
 };
 
 static const u32 gen7_null_state_batch[] = {
index 5c875615d42acc58deba2bf75d02a92bff8292ef..75ef1b5de45c12c33337b00db36b30d199d9eb20 100644 (file)
@@ -5,6 +5,7 @@ static const u32 gen8_null_state_relocs[] = {
        0x00000050,
        0x00000060,
        0x000003ec,
+       -1,
 };
 
 static const u32 gen8_null_state_batch[] = {
index 279488addf3f6bd7afb194bf29479a9228390460..2faef2605e97e399beb81c1dd4dfeebed01dea16 100644 (file)
@@ -604,6 +604,8 @@ static int init_render_ring(struct intel_engine_cs *ring)
        struct drm_device *dev = ring->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        int ret = init_ring_common(ring);
+       if (ret)
+               return ret;
 
        /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
        if (INTEL_INFO(dev)->gen >= 4 && INTEL_INFO(dev)->gen < 7)
@@ -1397,6 +1399,9 @@ static int allocate_ring_buffer(struct intel_engine_cs *ring)
        if (obj == NULL)
                return -ENOMEM;
 
+       /* mark ring buffers as read-only from GPU side by default */
+       obj->gt_ro = 1;
+
        ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE);
        if (ret)
                goto err_unref;
@@ -1746,14 +1751,15 @@ int intel_ring_cacheline_align(struct intel_engine_cs *ring)
 
 void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno)
 {
-       struct drm_i915_private *dev_priv = ring->dev->dev_private;
+       struct drm_device *dev = ring->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
 
        BUG_ON(ring->outstanding_lazy_seqno);
 
-       if (INTEL_INFO(ring->dev)->gen >= 6) {
+       if (INTEL_INFO(dev)->gen == 6 || INTEL_INFO(dev)->gen == 7) {
                I915_WRITE(RING_SYNC_0(ring->mmio_base), 0);
                I915_WRITE(RING_SYNC_1(ring->mmio_base), 0);
-               if (HAS_VEBOX(ring->dev))
+               if (HAS_VEBOX(dev))
                        I915_WRITE(RING_SYNC_2(ring->mmio_base), 0);
        }
 
index 9a17b4e92ef4f8ad3eb1e8a2a61293e6f1334175..985317eb1dc9b9044eb66127ebd0ac6d582ef45c 100644 (file)
@@ -819,6 +819,7 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
        struct drm_device *dev = plane->dev;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct intel_plane *intel_plane = to_intel_plane(plane);
+       enum pipe pipe = intel_crtc->pipe;
        struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
        struct drm_i915_gem_object *obj = intel_fb->obj;
        struct drm_i915_gem_object *old_obj = intel_plane->obj;
@@ -1006,6 +1007,8 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
         */
        ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
 
+       i915_gem_track_fb(old_obj, obj,
+                         INTEL_FRONTBUFFER_SPRITE(pipe));
        mutex_unlock(&dev->struct_mutex);
 
        if (ret)
@@ -1039,6 +1042,8 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
                else
                        intel_plane->disable_plane(plane, crtc);
 
+               intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_SPRITE(pipe));
+
                if (!primary_was_enabled && primary_enabled)
                        intel_post_enable_primary(crtc);
        }
@@ -1068,6 +1073,7 @@ intel_disable_plane(struct drm_plane *plane)
        struct drm_device *dev = plane->dev;
        struct intel_plane *intel_plane = to_intel_plane(plane);
        struct intel_crtc *intel_crtc;
+       enum pipe pipe;
 
        if (!plane->fb)
                return 0;
@@ -1076,6 +1082,7 @@ intel_disable_plane(struct drm_plane *plane)
                return -EINVAL;
 
        intel_crtc = to_intel_crtc(plane->crtc);
+       pipe = intel_crtc->pipe;
 
        if (intel_crtc->active) {
                bool primary_was_enabled = intel_crtc->primary_enabled;
@@ -1094,6 +1101,8 @@ intel_disable_plane(struct drm_plane *plane)
 
                mutex_lock(&dev->struct_mutex);
                intel_unpin_fb_obj(intel_plane->obj);
+               i915_gem_track_fb(intel_plane->obj, NULL,
+                                 INTEL_FRONTBUFFER_SPRITE(pipe));
                mutex_unlock(&dev->struct_mutex);
 
                intel_plane->obj = NULL;
index 4f6fef7ac0699049f74abcd5edc6177ff5749f54..29145df8ef6477fba5aaf2e57c163339b8ee8829 100644 (file)
@@ -231,8 +231,8 @@ static void __vlv_force_wake_get(struct drm_i915_private *dev_priv,
        }
 
        /* WaRsForcewakeWaitTC0:vlv */
-       __gen6_gt_wait_for_thread_c0(dev_priv);
-
+       if (!IS_CHERRYVIEW(dev_priv->dev))
+               __gen6_gt_wait_for_thread_c0(dev_priv);
 }
 
 static void __vlv_force_wake_put(struct drm_i915_private *dev_priv,
@@ -250,9 +250,10 @@ static void __vlv_force_wake_put(struct drm_i915_private *dev_priv,
                __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
                                _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
 
-       /* The below doubles as a POSTING_READ */
-       gen6_gt_check_fifodbg(dev_priv);
-
+       /* something from same cacheline, but !FORCEWAKE_VLV */
+       __raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV);
+       if (!IS_CHERRYVIEW(dev_priv->dev))
+               gen6_gt_check_fifodbg(dev_priv);
 }
 
 static void vlv_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine)
@@ -315,7 +316,7 @@ static void gen6_force_wake_timer(unsigned long arg)
        intel_runtime_pm_put(dev_priv);
 }
 
-static void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
+void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        unsigned long irqflags;
@@ -357,16 +358,12 @@ static void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
                        dev_priv->uncore.fifo_count =
                                __raw_i915_read32(dev_priv, GTFIFOCTL) &
                                GT_FIFO_FREE_ENTRIES_MASK;
-       } else {
-               dev_priv->uncore.forcewake_count = 0;
-               dev_priv->uncore.fw_rendercount = 0;
-               dev_priv->uncore.fw_mediacount = 0;
        }
 
        spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 }
 
-void intel_uncore_early_sanitize(struct drm_device *dev)
+void intel_uncore_early_sanitize(struct drm_device *dev, bool restore_forcewake)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
 
@@ -389,7 +386,7 @@ void intel_uncore_early_sanitize(struct drm_device *dev)
                __raw_i915_write32(dev_priv, GTFIFODBG,
                                   __raw_i915_read32(dev_priv, GTFIFODBG));
 
-       intel_uncore_forcewake_reset(dev, false);
+       intel_uncore_forcewake_reset(dev, restore_forcewake);
 }
 
 void intel_uncore_sanitize(struct drm_device *dev)
@@ -469,16 +466,43 @@ void assert_force_wake_inactive(struct drm_i915_private *dev_priv)
 #define NEEDS_FORCE_WAKE(dev_priv, reg) \
         ((reg) < 0x40000 && (reg) != FORCEWAKE)
 
-#define FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg) \
-       (((reg) >= 0x2000 && (reg) < 0x4000) ||\
-       ((reg) >= 0x5000 && (reg) < 0x8000) ||\
-       ((reg) >= 0xB000 && (reg) < 0x12000) ||\
-       ((reg) >= 0x2E000 && (reg) < 0x30000))
+#define REG_RANGE(reg, start, end) ((reg) >= (start) && (reg) < (end))
 
-#define FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)\
-       (((reg) >= 0x12000 && (reg) < 0x14000) ||\
-       ((reg) >= 0x22000 && (reg) < 0x24000) ||\
-       ((reg) >= 0x30000 && (reg) < 0x40000))
+#define FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg) \
+       (REG_RANGE((reg), 0x2000, 0x4000) || \
+        REG_RANGE((reg), 0x5000, 0x8000) || \
+        REG_RANGE((reg), 0xB000, 0x12000) || \
+        REG_RANGE((reg), 0x2E000, 0x30000))
+
+#define FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg) \
+       (REG_RANGE((reg), 0x12000, 0x14000) || \
+        REG_RANGE((reg), 0x22000, 0x24000) || \
+        REG_RANGE((reg), 0x30000, 0x40000))
+
+#define FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg) \
+       (REG_RANGE((reg), 0x2000, 0x4000) || \
+        REG_RANGE((reg), 0x5000, 0x8000) || \
+        REG_RANGE((reg), 0x8300, 0x8500) || \
+        REG_RANGE((reg), 0xB000, 0xC000) || \
+        REG_RANGE((reg), 0xE000, 0xE800))
+
+#define FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg) \
+       (REG_RANGE((reg), 0x8800, 0x8900) || \
+        REG_RANGE((reg), 0xD000, 0xD800) || \
+        REG_RANGE((reg), 0x12000, 0x14000) || \
+        REG_RANGE((reg), 0x1A000, 0x1C000) || \
+        REG_RANGE((reg), 0x1E800, 0x1EA00) || \
+        REG_RANGE((reg), 0x30000, 0x40000))
+
+#define FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg) \
+       (REG_RANGE((reg), 0x4000, 0x5000) || \
+        REG_RANGE((reg), 0x8000, 0x8300) || \
+        REG_RANGE((reg), 0x8500, 0x8600) || \
+        REG_RANGE((reg), 0x9000, 0xB000) || \
+        REG_RANGE((reg), 0xC000, 0xC800) || \
+        REG_RANGE((reg), 0xF000, 0x10000) || \
+        REG_RANGE((reg), 0x14000, 0x14400) || \
+        REG_RANGE((reg), 0x22000, 0x24000))
 
 static void
 ilk_dummy_write(struct drm_i915_private *dev_priv)
@@ -573,7 +597,35 @@ vlv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
        REG_READ_FOOTER; \
 }
 
+#define __chv_read(x) \
+static u##x \
+chv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
+       unsigned fwengine = 0; \
+       REG_READ_HEADER(x); \
+       if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) { \
+               if (dev_priv->uncore.fw_rendercount == 0) \
+                       fwengine = FORCEWAKE_RENDER; \
+       } else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) { \
+               if (dev_priv->uncore.fw_mediacount == 0) \
+                       fwengine = FORCEWAKE_MEDIA; \
+       } else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) { \
+               if (dev_priv->uncore.fw_rendercount == 0) \
+                       fwengine |= FORCEWAKE_RENDER; \
+               if (dev_priv->uncore.fw_mediacount == 0) \
+                       fwengine |= FORCEWAKE_MEDIA; \
+       } \
+       if (fwengine) \
+               dev_priv->uncore.funcs.force_wake_get(dev_priv, fwengine); \
+       val = __raw_i915_read##x(dev_priv, reg); \
+       if (fwengine) \
+               dev_priv->uncore.funcs.force_wake_put(dev_priv, fwengine); \
+       REG_READ_FOOTER; \
+}
 
+__chv_read(8)
+__chv_read(16)
+__chv_read(32)
+__chv_read(64)
 __vlv_read(8)
 __vlv_read(16)
 __vlv_read(32)
@@ -591,6 +643,7 @@ __gen4_read(16)
 __gen4_read(32)
 __gen4_read(64)
 
+#undef __chv_read
 #undef __vlv_read
 #undef __gen6_read
 #undef __gen5_read
@@ -695,6 +748,38 @@ gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace
        REG_WRITE_FOOTER; \
 }
 
+#define __chv_write(x) \
+static void \
+chv_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
+       unsigned fwengine = 0; \
+       bool shadowed = is_gen8_shadowed(dev_priv, reg); \
+       REG_WRITE_HEADER; \
+       if (!shadowed) { \
+               if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) { \
+                       if (dev_priv->uncore.fw_rendercount == 0) \
+                               fwengine = FORCEWAKE_RENDER; \
+               } else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) { \
+                       if (dev_priv->uncore.fw_mediacount == 0) \
+                               fwengine = FORCEWAKE_MEDIA; \
+               } else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) { \
+                       if (dev_priv->uncore.fw_rendercount == 0) \
+                               fwengine |= FORCEWAKE_RENDER; \
+                       if (dev_priv->uncore.fw_mediacount == 0) \
+                               fwengine |= FORCEWAKE_MEDIA; \
+               } \
+       } \
+       if (fwengine) \
+               dev_priv->uncore.funcs.force_wake_get(dev_priv, fwengine); \
+       __raw_i915_write##x(dev_priv, reg, val); \
+       if (fwengine) \
+               dev_priv->uncore.funcs.force_wake_put(dev_priv, fwengine); \
+       REG_WRITE_FOOTER; \
+}
+
+__chv_write(8)
+__chv_write(16)
+__chv_write(32)
+__chv_write(64)
 __gen8_write(8)
 __gen8_write(16)
 __gen8_write(32)
@@ -716,6 +801,7 @@ __gen4_write(16)
 __gen4_write(32)
 __gen4_write(64)
 
+#undef __chv_write
 #undef __gen8_write
 #undef __hsw_write
 #undef __gen6_write
@@ -731,7 +817,7 @@ void intel_uncore_init(struct drm_device *dev)
        setup_timer(&dev_priv->uncore.force_wake_timer,
                    gen6_force_wake_timer, (unsigned long)dev_priv);
 
-       intel_uncore_early_sanitize(dev);
+       intel_uncore_early_sanitize(dev, false);
 
        if (IS_VALLEYVIEW(dev)) {
                dev_priv->uncore.funcs.force_wake_get = __vlv_force_wake_get;
@@ -779,14 +865,26 @@ void intel_uncore_init(struct drm_device *dev)
 
        switch (INTEL_INFO(dev)->gen) {
        default:
-               dev_priv->uncore.funcs.mmio_writeb  = gen8_write8;
-               dev_priv->uncore.funcs.mmio_writew  = gen8_write16;
-               dev_priv->uncore.funcs.mmio_writel  = gen8_write32;
-               dev_priv->uncore.funcs.mmio_writeq  = gen8_write64;
-               dev_priv->uncore.funcs.mmio_readb  = gen6_read8;
-               dev_priv->uncore.funcs.mmio_readw  = gen6_read16;
-               dev_priv->uncore.funcs.mmio_readl  = gen6_read32;
-               dev_priv->uncore.funcs.mmio_readq  = gen6_read64;
+               if (IS_CHERRYVIEW(dev)) {
+                       dev_priv->uncore.funcs.mmio_writeb  = chv_write8;
+                       dev_priv->uncore.funcs.mmio_writew  = chv_write16;
+                       dev_priv->uncore.funcs.mmio_writel  = chv_write32;
+                       dev_priv->uncore.funcs.mmio_writeq  = chv_write64;
+                       dev_priv->uncore.funcs.mmio_readb  = chv_read8;
+                       dev_priv->uncore.funcs.mmio_readw  = chv_read16;
+                       dev_priv->uncore.funcs.mmio_readl  = chv_read32;
+                       dev_priv->uncore.funcs.mmio_readq  = chv_read64;
+
+               } else {
+                       dev_priv->uncore.funcs.mmio_writeb  = gen8_write8;
+                       dev_priv->uncore.funcs.mmio_writew  = gen8_write16;
+                       dev_priv->uncore.funcs.mmio_writel  = gen8_write32;
+                       dev_priv->uncore.funcs.mmio_writeq  = gen8_write64;
+                       dev_priv->uncore.funcs.mmio_readb  = gen6_read8;
+                       dev_priv->uncore.funcs.mmio_readw  = gen6_read16;
+                       dev_priv->uncore.funcs.mmio_readl  = gen6_read32;
+                       dev_priv->uncore.funcs.mmio_readq  = gen6_read64;
+               }
                break;
        case 7:
        case 6:
@@ -1053,18 +1151,16 @@ static int gen6_do_reset(struct drm_device *dev)
 
 int intel_gpu_reset(struct drm_device *dev)
 {
-       switch (INTEL_INFO(dev)->gen) {
-       case 8:
-       case 7:
-       case 6: return gen6_do_reset(dev);
-       case 5: return ironlake_do_reset(dev);
-       case 4:
-               if (IS_G4X(dev))
-                       return g4x_do_reset(dev);
-               else
-                       return i965_do_reset(dev);
-       default: return -ENODEV;
-       }
+       if (INTEL_INFO(dev)->gen >= 6)
+               return gen6_do_reset(dev);
+       else if (IS_GEN5(dev))
+               return ironlake_do_reset(dev);
+       else if (IS_G4X(dev))
+               return g4x_do_reset(dev);
+       else if (IS_GEN4(dev))
+               return i965_do_reset(dev);
+       else
+               return -ENODEV;
 }
 
 void intel_uncore_check_errors(struct drm_device *dev)
index 4da62072701c57ab4c3ee8d8df5984bd3844e1c7..e529b68d503778dedfd2ed09e14fd6b3387390c4 100644 (file)
@@ -331,6 +331,10 @@ struct drm_crtc {
        struct drm_plane *primary;
        struct drm_plane *cursor;
 
+       /* position of cursor plane on crtc */
+       int cursor_x;
+       int cursor_y;
+
        /* Temporary tracking of the old fb while a modeset is ongoing. Used
         * by drm_mode_set_config_internal to implement correct refcounting. */
        struct drm_framebuffer *old_fb;
@@ -858,7 +862,7 @@ struct drm_prop_enum_list {
 extern int drm_crtc_init_with_planes(struct drm_device *dev,
                                     struct drm_crtc *crtc,
                                     struct drm_plane *primary,
-                                    void *cursor,
+                                    struct drm_plane *cursor,
                                     const struct drm_crtc_funcs *funcs);
 extern int drm_crtc_init(struct drm_device *dev,
                         struct drm_crtc *crtc,