]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/commitdiff
Merge tag 'v3.14-rc6' into drm-intel-next-queued
authorDaniel Vetter <daniel.vetter@ffwll.ch>
Mon, 10 Mar 2014 20:43:46 +0000 (21:43 +0100)
committerDaniel Vetter <daniel.vetter@ffwll.ch>
Mon, 10 Mar 2014 20:43:46 +0000 (21:43 +0100)
Linux 3.14-rc6

I need the hdmi/dvi-dual link fixes in 3.14 to avoid ugly conflicts
when merging Ville's new hdmi cloning support into my -next tree

Conflicts:
drivers/gpu/drm/i915/Makefile
drivers/gpu/drm/i915/intel_dp.c

Makefile cleanup conflicts with an acpi build fix, intel_dp.c is
trivial.

Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
46 files changed:
drivers/gpu/drm/drm_crtc.c
drivers/gpu/drm/drm_fb_helper.c
drivers/gpu/drm/i915/Makefile
drivers/gpu/drm/i915/i915_cmd_parser.c [new file with mode: 0644]
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_dma.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_context.c
drivers/gpu/drm/i915/i915_gem_evict.c
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/i915_gem_gtt.c
drivers/gpu/drm/i915/i915_gem_tiling.c
drivers/gpu/drm/i915/i915_gpu_error.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_params.c [new file with mode: 0644]
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/i915_suspend.c
drivers/gpu/drm/i915/i915_sysfs.c
drivers/gpu/drm/i915/i915_trace.h
drivers/gpu/drm/i915/i915_ums.c
drivers/gpu/drm/i915/intel_bios.c
drivers/gpu/drm/i915/intel_bios.h
drivers/gpu/drm/i915/intel_crt.c
drivers/gpu/drm/i915/intel_ddi.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_dsi.c
drivers/gpu/drm/i915/intel_dvo.c
drivers/gpu/drm/i915/intel_fbdev.c
drivers/gpu/drm/i915/intel_hdmi.c
drivers/gpu/drm/i915/intel_lvds.c
drivers/gpu/drm/i915/intel_overlay.c
drivers/gpu/drm/i915/intel_panel.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/i915/intel_ringbuffer.h
drivers/gpu/drm/i915/intel_sdvo.c
drivers/gpu/drm/i915/intel_sprite.c
drivers/gpu/drm/i915/intel_tv.c
drivers/gpu/drm/i915/intel_uncore.c
include/drm/drm_crtc.h
include/drm/drm_dp_helper.h
include/drm/drm_fb_helper.h

index 3b7d32da16046ffcdc238ba41b1b03a3ec152435..35ea15d5ffffed35a4f494afe6df9a1a4e89a8ae 100644 (file)
@@ -215,6 +215,16 @@ static const struct drm_prop_enum_list drm_encoder_enum_list[] =
        { DRM_MODE_ENCODER_DSI, "DSI" },
 };
 
+static const struct drm_prop_enum_list drm_subpixel_enum_list[] =
+{
+       { SubPixelUnknown, "Unknown" },
+       { SubPixelHorizontalRGB, "Horizontal RGB" },
+       { SubPixelHorizontalBGR, "Horizontal BGR" },
+       { SubPixelVerticalRGB, "Vertical RGB" },
+       { SubPixelVerticalBGR, "Vertical BGR" },
+       { SubPixelNone, "None" },
+};
+
 void drm_connector_ida_init(void)
 {
        int i;
@@ -264,6 +274,19 @@ const char *drm_get_connector_status_name(enum drm_connector_status status)
 }
 EXPORT_SYMBOL(drm_get_connector_status_name);
 
+/**
+ * drm_get_subpixel_order_name - return a string for a given subpixel enum
+ * @order: enum of subpixel_order
+ *
+ * Note you could abuse this and return something out of bounds, but that
+ * would be a caller error.  No unscrubbed user data should make it here.
+ */
+const char *drm_get_subpixel_order_name(enum subpixel_order order)
+{
+       return drm_subpixel_enum_list[order].name;
+}
+EXPORT_SYMBOL(drm_get_subpixel_order_name);
+
 static char printable_char(int c)
 {
        return isascii(c) && isprint(c) ? c : '?';
index 98a03639b413d834d08c5c73c0ef08cec3d37c55..d99df15a78bc3be2c8ea9e154ba8d590b90ad3a3 100644 (file)
@@ -1136,7 +1136,7 @@ static int drm_fb_helper_probe_connector_modes(struct drm_fb_helper *fb_helper,
        return count;
 }
 
-static struct drm_display_mode *drm_has_preferred_mode(struct drm_fb_helper_connector *fb_connector, int width, int height)
+struct drm_display_mode *drm_has_preferred_mode(struct drm_fb_helper_connector *fb_connector, int width, int height)
 {
        struct drm_display_mode *mode;
 
@@ -1149,6 +1149,7 @@ static struct drm_display_mode *drm_has_preferred_mode(struct drm_fb_helper_conn
        }
        return NULL;
 }
+EXPORT_SYMBOL(drm_has_preferred_mode);
 
 static bool drm_has_cmdline_mode(struct drm_fb_helper_connector *fb_connector)
 {
@@ -1157,7 +1158,7 @@ static bool drm_has_cmdline_mode(struct drm_fb_helper_connector *fb_connector)
        return cmdline_mode->specified;
 }
 
-static struct drm_display_mode *drm_pick_cmdline_mode(struct drm_fb_helper_connector *fb_helper_conn,
+struct drm_display_mode *drm_pick_cmdline_mode(struct drm_fb_helper_connector *fb_helper_conn,
                                                      int width, int height)
 {
        struct drm_cmdline_mode *cmdline_mode;
@@ -1197,6 +1198,7 @@ create_mode:
        list_add(&mode->head, &fb_helper_conn->connector->modes);
        return mode;
 }
+EXPORT_SYMBOL(drm_pick_cmdline_mode);
 
 static bool drm_connector_enabled(struct drm_connector *connector, bool strict)
 {
index 9fd44f5f3b3b40ff5f1acf497b613a36e153cac2..b1445b73465be0642aa0904df983183f0ae42135 100644 (file)
@@ -3,57 +3,69 @@
 # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
 
 ccflags-y := -Iinclude/drm
-i915-y := i915_drv.o i915_dma.o i915_irq.o \
-         i915_gpu_error.o \
+
+# Please keep these build lists sorted!
+
+# core driver code
+i915-y := i915_drv.o \
+         i915_params.o \
           i915_suspend.o \
-         i915_gem.o \
+         i915_sysfs.o \
+         intel_pm.o
+i915-$(CONFIG_COMPAT)   += i915_ioc32.o
+i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o
+
+# GEM code
+i915-y += i915_cmd_parser.o \
          i915_gem_context.o \
          i915_gem_debug.o \
+         i915_gem_dmabuf.o \
          i915_gem_evict.o \
          i915_gem_execbuffer.o \
          i915_gem_gtt.o \
+         i915_gem.o \
          i915_gem_stolen.o \
          i915_gem_tiling.o \
-         i915_sysfs.o \
+         i915_gpu_error.o \
+         i915_irq.o \
          i915_trace_points.o \
-         i915_ums.o \
+         intel_ringbuffer.o \
+         intel_uncore.o
+
+# modesetting core code
+i915-y += intel_bios.o \
          intel_display.o \
-         intel_crt.o \
-         intel_lvds.o \
-         intel_dsi.o \
-         intel_dsi_cmd.o \
-         intel_dsi_pll.o \
-         intel_bios.o \
-         intel_ddi.o \
-         intel_dp.o \
-         intel_hdmi.o \
-         intel_sdvo.o \
          intel_modes.o \
-         intel_panel.o \
-         intel_pm.o \
-         intel_i2c.o \
-         intel_tv.o \
-         intel_dvo.o \
-         intel_ringbuffer.o \
          intel_overlay.o \
-         intel_sprite.o \
          intel_sideband.o \
-         intel_uncore.o \
+         intel_sprite.o
+i915-$(CONFIG_ACPI)            += intel_acpi.o intel_opregion.o
+i915-$(CONFIG_DRM_I915_FBDEV)  += intel_fbdev.o
+
+# modesetting output/encoder code
+i915-y += dvo_ch7017.o \
          dvo_ch7xxx.o \
-         dvo_ch7017.o \
          dvo_ivch.o \
-         dvo_tfp410.o \
-         dvo_sil164.o \
          dvo_ns2501.o \
-         i915_gem_dmabuf.o
-
-i915-$(CONFIG_COMPAT)   += i915_ioc32.o
-
-i915-$(CONFIG_ACPI)    += intel_acpi.o intel_opregion.o
-
-i915-$(CONFIG_DRM_I915_FBDEV) += intel_fbdev.o
+         dvo_sil164.o \
+         dvo_tfp410.o \
+         intel_crt.o \
+         intel_ddi.o \
+         intel_dp.o \
+         intel_dsi_cmd.o \
+         intel_dsi.o \
+         intel_dsi_pll.o \
+         intel_dvo.o \
+         intel_hdmi.o \
+         intel_i2c.o \
+         intel_lvds.o \
+         intel_panel.o \
+         intel_sdvo.o \
+         intel_tv.o
 
-i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o
+# legacy horrors
+i915-y += i915_dma.o \
+         i915_ums.o
 
 obj-$(CONFIG_DRM_I915)  += i915.o
 
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
new file mode 100644 (file)
index 0000000..7a5756e
--- /dev/null
@@ -0,0 +1,485 @@
+/*
+ * Copyright Â© 2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Brad Volkin <bradley.d.volkin@intel.com>
+ *
+ */
+
+#include "i915_drv.h"
+
+/**
+ * DOC: i915 batch buffer command parser
+ *
+ * Motivation:
+ * Certain OpenGL features (e.g. transform feedback, performance monitoring)
+ * require userspace code to submit batches containing commands such as
+ * MI_LOAD_REGISTER_IMM to access various registers. Unfortunately, some
+ * generations of the hardware will noop these commands in "unsecure" batches
+ * (which includes all userspace batches submitted via i915) even though the
+ * commands may be safe and represent the intended programming model of the
+ * device.
+ *
+ * The software command parser is similar in operation to the command parsing
+ * done in hardware for unsecure batches. However, the software parser allows
+ * some operations that would be noop'd by hardware, if the parser determines
+ * the operation is safe, and submits the batch as "secure" to prevent hardware
+ * parsing.
+ *
+ * Threats:
+ * At a high level, the hardware (and software) checks attempt to prevent
+ * granting userspace undue privileges. There are three categories of privilege.
+ *
+ * First, commands which are explicitly defined as privileged or which should
+ * only be used by the kernel driver. The parser generally rejects such
+ * commands, though it may allow some from the drm master process.
+ *
+ * Second, commands which access registers. To support correct/enhanced
+ * userspace functionality, particularly certain OpenGL extensions, the parser
+ * provides a whitelist of registers which userspace may safely access (for both
+ * normal and drm master processes).
+ *
+ * Third, commands which access privileged memory (i.e. GGTT, HWS page, etc).
+ * The parser always rejects such commands.
+ *
+ * The majority of the problematic commands fall in the MI_* range, with only a
+ * few specific commands on each ring (e.g. PIPE_CONTROL and MI_FLUSH_DW).
+ *
+ * Implementation:
+ * Each ring maintains tables of commands and registers which the parser uses in
+ * scanning batch buffers submitted to that ring.
+ *
+ * Since the set of commands that the parser must check for is significantly
+ * smaller than the number of commands supported, the parser tables contain only
+ * those commands required by the parser. This generally works because command
+ * opcode ranges have standard command length encodings. So for commands that
+ * the parser does not need to check, it can easily skip them. This is
+ * implementated via a per-ring length decoding vfunc.
+ *
+ * Unfortunately, there are a number of commands that do not follow the standard
+ * length encoding for their opcode range, primarily amongst the MI_* commands.
+ * To handle this, the parser provides a way to define explicit "skip" entries
+ * in the per-ring command tables.
+ *
+ * Other command table entries map fairly directly to high level categories
+ * mentioned above: rejected, master-only, register whitelist. The parser
+ * implements a number of checks, including the privileged memory checks, via a
+ * general bitmasking mechanism.
+ */
+
+static u32 gen7_render_get_cmd_length_mask(u32 cmd_header)
+{
+       u32 client = (cmd_header & INSTR_CLIENT_MASK) >> INSTR_CLIENT_SHIFT;
+       u32 subclient =
+               (cmd_header & INSTR_SUBCLIENT_MASK) >> INSTR_SUBCLIENT_SHIFT;
+
+       if (client == INSTR_MI_CLIENT)
+               return 0x3F;
+       else if (client == INSTR_RC_CLIENT) {
+               if (subclient == INSTR_MEDIA_SUBCLIENT)
+                       return 0xFFFF;
+               else
+                       return 0xFF;
+       }
+
+       DRM_DEBUG_DRIVER("CMD: Abnormal rcs cmd length! 0x%08X\n", cmd_header);
+       return 0;
+}
+
+static u32 gen7_bsd_get_cmd_length_mask(u32 cmd_header)
+{
+       u32 client = (cmd_header & INSTR_CLIENT_MASK) >> INSTR_CLIENT_SHIFT;
+       u32 subclient =
+               (cmd_header & INSTR_SUBCLIENT_MASK) >> INSTR_SUBCLIENT_SHIFT;
+
+       if (client == INSTR_MI_CLIENT)
+               return 0x3F;
+       else if (client == INSTR_RC_CLIENT) {
+               if (subclient == INSTR_MEDIA_SUBCLIENT)
+                       return 0xFFF;
+               else
+                       return 0xFF;
+       }
+
+       DRM_DEBUG_DRIVER("CMD: Abnormal bsd cmd length! 0x%08X\n", cmd_header);
+       return 0;
+}
+
+static u32 gen7_blt_get_cmd_length_mask(u32 cmd_header)
+{
+       u32 client = (cmd_header & INSTR_CLIENT_MASK) >> INSTR_CLIENT_SHIFT;
+
+       if (client == INSTR_MI_CLIENT)
+               return 0x3F;
+       else if (client == INSTR_BC_CLIENT)
+               return 0xFF;
+
+       DRM_DEBUG_DRIVER("CMD: Abnormal blt cmd length! 0x%08X\n", cmd_header);
+       return 0;
+}
+
+static void validate_cmds_sorted(struct intel_ring_buffer *ring)
+{
+       int i;
+
+       if (!ring->cmd_tables || ring->cmd_table_count == 0)
+               return;
+
+       for (i = 0; i < ring->cmd_table_count; i++) {
+               const struct drm_i915_cmd_table *table = &ring->cmd_tables[i];
+               u32 previous = 0;
+               int j;
+
+               for (j = 0; j < table->count; j++) {
+                       const struct drm_i915_cmd_descriptor *desc =
+                               &table->table[i];
+                       u32 curr = desc->cmd.value & desc->cmd.mask;
+
+                       if (curr < previous)
+                               DRM_ERROR("CMD: table not sorted ring=%d table=%d entry=%d cmd=0x%08X prev=0x%08X\n",
+                                         ring->id, i, j, curr, previous);
+
+                       previous = curr;
+               }
+       }
+}
+
+static void check_sorted(int ring_id, const u32 *reg_table, int reg_count)
+{
+       int i;
+       u32 previous = 0;
+
+       for (i = 0; i < reg_count; i++) {
+               u32 curr = reg_table[i];
+
+               if (curr < previous)
+                       DRM_ERROR("CMD: table not sorted ring=%d entry=%d reg=0x%08X prev=0x%08X\n",
+                                 ring_id, i, curr, previous);
+
+               previous = curr;
+       }
+}
+
+static void validate_regs_sorted(struct intel_ring_buffer *ring)
+{
+       check_sorted(ring->id, ring->reg_table, ring->reg_count);
+       check_sorted(ring->id, ring->master_reg_table, ring->master_reg_count);
+}
+
+/**
+ * i915_cmd_parser_init_ring() - set cmd parser related fields for a ringbuffer
+ * @ring: the ringbuffer to initialize
+ *
+ * Optionally initializes fields related to batch buffer command parsing in the
+ * struct intel_ring_buffer based on whether the platform requires software
+ * command parsing.
+ */
+void i915_cmd_parser_init_ring(struct intel_ring_buffer *ring)
+{
+       if (!IS_GEN7(ring->dev))
+               return;
+
+       switch (ring->id) {
+       case RCS:
+               ring->get_cmd_length_mask = gen7_render_get_cmd_length_mask;
+               break;
+       case VCS:
+               ring->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
+               break;
+       case BCS:
+               ring->get_cmd_length_mask = gen7_blt_get_cmd_length_mask;
+               break;
+       case VECS:
+               /* VECS can use the same length_mask function as VCS */
+               ring->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
+               break;
+       default:
+               DRM_ERROR("CMD: cmd_parser_init with unknown ring: %d\n",
+                         ring->id);
+               BUG();
+       }
+
+       validate_cmds_sorted(ring);
+       validate_regs_sorted(ring);
+}
+
+static const struct drm_i915_cmd_descriptor*
+find_cmd_in_table(const struct drm_i915_cmd_table *table,
+                 u32 cmd_header)
+{
+       int i;
+
+       for (i = 0; i < table->count; i++) {
+               const struct drm_i915_cmd_descriptor *desc = &table->table[i];
+               u32 masked_cmd = desc->cmd.mask & cmd_header;
+               u32 masked_value = desc->cmd.value & desc->cmd.mask;
+
+               if (masked_cmd == masked_value)
+                       return desc;
+       }
+
+       return NULL;
+}
+
+/*
+ * Returns a pointer to a descriptor for the command specified by cmd_header.
+ *
+ * The caller must supply space for a default descriptor via the default_desc
+ * parameter. If no descriptor for the specified command exists in the ring's
+ * command parser tables, this function fills in default_desc based on the
+ * ring's default length encoding and returns default_desc.
+ */
+static const struct drm_i915_cmd_descriptor*
+find_cmd(struct intel_ring_buffer *ring,
+        u32 cmd_header,
+        struct drm_i915_cmd_descriptor *default_desc)
+{
+       u32 mask;
+       int i;
+
+       for (i = 0; i < ring->cmd_table_count; i++) {
+               const struct drm_i915_cmd_descriptor *desc;
+
+               desc = find_cmd_in_table(&ring->cmd_tables[i], cmd_header);
+               if (desc)
+                       return desc;
+       }
+
+       mask = ring->get_cmd_length_mask(cmd_header);
+       if (!mask)
+               return NULL;
+
+       BUG_ON(!default_desc);
+       default_desc->flags = CMD_DESC_SKIP;
+       default_desc->length.mask = mask;
+
+       return default_desc;
+}
+
+static bool valid_reg(const u32 *table, int count, u32 addr)
+{
+       if (table && count != 0) {
+               int i;
+
+               for (i = 0; i < count; i++) {
+                       if (table[i] == addr)
+                               return true;
+               }
+       }
+
+       return false;
+}
+
+static u32 *vmap_batch(struct drm_i915_gem_object *obj)
+{
+       int i;
+       void *addr = NULL;
+       struct sg_page_iter sg_iter;
+       struct page **pages;
+
+       pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages));
+       if (pages == NULL) {
+               DRM_DEBUG_DRIVER("Failed to get space for pages\n");
+               goto finish;
+       }
+
+       i = 0;
+       for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
+               pages[i] = sg_page_iter_page(&sg_iter);
+               i++;
+       }
+
+       addr = vmap(pages, i, 0, PAGE_KERNEL);
+       if (addr == NULL) {
+               DRM_DEBUG_DRIVER("Failed to vmap pages\n");
+               goto finish;
+       }
+
+finish:
+       if (pages)
+               drm_free_large(pages);
+       return (u32*)addr;
+}
+
+/**
+ * i915_needs_cmd_parser() - should a given ring use software command parsing?
+ * @ring: the ring in question
+ *
+ * Only certain platforms require software batch buffer command parsing, and
+ * only when enabled via module paramter.
+ *
+ * Return: true if the ring requires software command parsing
+ */
+bool i915_needs_cmd_parser(struct intel_ring_buffer *ring)
+{
+       /* No command tables indicates a platform without parsing */
+       if (!ring->cmd_tables)
+               return false;
+
+       return (i915.enable_cmd_parser == 1);
+}
+
+#define LENGTH_BIAS 2
+
+/**
+ * i915_parse_cmds() - parse a submitted batch buffer for privilege violations
+ * @ring: the ring on which the batch is to execute
+ * @batch_obj: the batch buffer in question
+ * @batch_start_offset: byte offset in the batch at which execution starts
+ * @is_master: is the submitting process the drm master?
+ *
+ * Parses the specified batch buffer looking for privilege violations as
+ * described in the overview.
+ *
+ * Return: non-zero if the parser finds violations or otherwise fails
+ */
+int i915_parse_cmds(struct intel_ring_buffer *ring,
+                   struct drm_i915_gem_object *batch_obj,
+                   u32 batch_start_offset,
+                   bool is_master)
+{
+       int ret = 0;
+       u32 *cmd, *batch_base, *batch_end;
+       struct drm_i915_cmd_descriptor default_desc = { 0 };
+       int needs_clflush = 0;
+
+       ret = i915_gem_obj_prepare_shmem_read(batch_obj, &needs_clflush);
+       if (ret) {
+               DRM_DEBUG_DRIVER("CMD: failed to prep read\n");
+               return ret;
+       }
+
+       batch_base = vmap_batch(batch_obj);
+       if (!batch_base) {
+               DRM_DEBUG_DRIVER("CMD: Failed to vmap batch\n");
+               i915_gem_object_unpin_pages(batch_obj);
+               return -ENOMEM;
+       }
+
+       if (needs_clflush)
+               drm_clflush_virt_range((char *)batch_base, batch_obj->base.size);
+
+       cmd = batch_base + (batch_start_offset / sizeof(*cmd));
+       batch_end = cmd + (batch_obj->base.size / sizeof(*batch_end));
+
+       while (cmd < batch_end) {
+               const struct drm_i915_cmd_descriptor *desc;
+               u32 length;
+
+               if (*cmd == MI_BATCH_BUFFER_END)
+                       break;
+
+               desc = find_cmd(ring, *cmd, &default_desc);
+               if (!desc) {
+                       DRM_DEBUG_DRIVER("CMD: Unrecognized command: 0x%08X\n",
+                                        *cmd);
+                       ret = -EINVAL;
+                       break;
+               }
+
+               if (desc->flags & CMD_DESC_FIXED)
+                       length = desc->length.fixed;
+               else
+                       length = ((*cmd & desc->length.mask) + LENGTH_BIAS);
+
+               if ((batch_end - cmd) < length) {
+                       DRM_DEBUG_DRIVER("CMD: Command length exceeds batch length: 0x%08X length=%d batchlen=%ld\n",
+                                        *cmd,
+                                        length,
+                                        batch_end - cmd);
+                       ret = -EINVAL;
+                       break;
+               }
+
+               if (desc->flags & CMD_DESC_REJECT) {
+                       DRM_DEBUG_DRIVER("CMD: Rejected command: 0x%08X\n", *cmd);
+                       ret = -EINVAL;
+                       break;
+               }
+
+               if ((desc->flags & CMD_DESC_MASTER) && !is_master) {
+                       DRM_DEBUG_DRIVER("CMD: Rejected master-only command: 0x%08X\n",
+                                        *cmd);
+                       ret = -EINVAL;
+                       break;
+               }
+
+               if (desc->flags & CMD_DESC_REGISTER) {
+                       u32 reg_addr = cmd[desc->reg.offset] & desc->reg.mask;
+
+                       if (!valid_reg(ring->reg_table,
+                                      ring->reg_count, reg_addr)) {
+                               if (!is_master ||
+                                   !valid_reg(ring->master_reg_table,
+                                              ring->master_reg_count,
+                                              reg_addr)) {
+                                       DRM_DEBUG_DRIVER("CMD: Rejected register 0x%08X in command: 0x%08X (ring=%d)\n",
+                                                        reg_addr,
+                                                        *cmd,
+                                                        ring->id);
+                                       ret = -EINVAL;
+                                       break;
+                               }
+                       }
+               }
+
+               if (desc->flags & CMD_DESC_BITMASK) {
+                       int i;
+
+                       for (i = 0; i < MAX_CMD_DESC_BITMASKS; i++) {
+                               u32 dword;
+
+                               if (desc->bits[i].mask == 0)
+                                       break;
+
+                               dword = cmd[desc->bits[i].offset] &
+                                       desc->bits[i].mask;
+
+                               if (dword != desc->bits[i].expected) {
+                                       DRM_DEBUG_DRIVER("CMD: Rejected command 0x%08X for bitmask 0x%08X (exp=0x%08X act=0x%08X) (ring=%d)\n",
+                                                        *cmd,
+                                                        desc->bits[i].mask,
+                                                        desc->bits[i].expected,
+                                                        dword, ring->id);
+                                       ret = -EINVAL;
+                                       break;
+                               }
+                       }
+
+                       if (ret)
+                               break;
+               }
+
+               cmd += length;
+       }
+
+       if (cmd >= batch_end) {
+               DRM_DEBUG_DRIVER("CMD: Got to the end of the buffer w/o a BBE cmd!\n");
+               ret = -EINVAL;
+       }
+
+       vunmap(batch_base);
+
+       i915_gem_object_unpin_pages(batch_obj);
+
+       return ret;
+}
index b2b46c52294c6d4d9c2a20890fafaa4b122e7fa3..a90d31c786437b246890298a8b14f2ace9dec302 100644 (file)
@@ -98,7 +98,7 @@ static const char *get_pin_flag(struct drm_i915_gem_object *obj)
 {
        if (obj->user_pin_count > 0)
                return "P";
-       else if (obj->pin_count > 0)
+       else if (i915_gem_obj_is_pinned(obj))
                return "p";
        else
                return " ";
@@ -123,6 +123,8 @@ static void
 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
 {
        struct i915_vma *vma;
+       int pin_count = 0;
+
        seq_printf(m, "%pK: %s%s%s %8zdKiB %02x %02x %u %u %u%s%s%s",
                   &obj->base,
                   get_pin_flag(obj),
@@ -139,8 +141,10 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
                   obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
        if (obj->base.name)
                seq_printf(m, " (name: %d)", obj->base.name);
-       if (obj->pin_count)
-               seq_printf(m, " (pinned x %d)", obj->pin_count);
+       list_for_each_entry(vma, &obj->vma_list, vma_link)
+               if (vma->pin_count > 0)
+                       pin_count++;
+               seq_printf(m, " (pinned x %d)", pin_count);
        if (obj->pin_display)
                seq_printf(m, " (display)");
        if (obj->fence_reg != I915_FENCE_REG_NONE)
@@ -447,7 +451,7 @@ static int i915_gem_gtt_info(struct seq_file *m, void *data)
 
        total_obj_size = total_gtt_size = count = 0;
        list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
-               if (list == PINNED_LIST && obj->pin_count == 0)
+               if (list == PINNED_LIST && !i915_gem_obj_is_pinned(obj))
                        continue;
 
                seq_puts(m, "   ");
@@ -598,7 +602,6 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
        intel_runtime_pm_get(dev_priv);
 
        if (INTEL_INFO(dev)->gen >= 8) {
-               int i;
                seq_printf(m, "Master Interrupt Control:\t%08x\n",
                           I915_READ(GEN8_MASTER_IRQ));
 
@@ -611,16 +614,16 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
                                   i, I915_READ(GEN8_GT_IER(i)));
                }
 
-               for_each_pipe(i) {
+               for_each_pipe(pipe) {
                        seq_printf(m, "Pipe %c IMR:\t%08x\n",
-                                  pipe_name(i),
-                                  I915_READ(GEN8_DE_PIPE_IMR(i)));
+                                  pipe_name(pipe),
+                                  I915_READ(GEN8_DE_PIPE_IMR(pipe)));
                        seq_printf(m, "Pipe %c IIR:\t%08x\n",
-                                  pipe_name(i),
-                                  I915_READ(GEN8_DE_PIPE_IIR(i)));
+                                  pipe_name(pipe),
+                                  I915_READ(GEN8_DE_PIPE_IIR(pipe)));
                        seq_printf(m, "Pipe %c IER:\t%08x\n",
-                                  pipe_name(i),
-                                  I915_READ(GEN8_DE_PIPE_IER(i)));
+                                  pipe_name(pipe),
+                                  I915_READ(GEN8_DE_PIPE_IER(pipe)));
                }
 
                seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
@@ -712,8 +715,6 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
                seq_printf(m, "Graphics Interrupt mask:         %08x\n",
                           I915_READ(GTIMR));
        }
-       seq_printf(m, "Interrupts received: %d\n",
-                  atomic_read(&dev_priv->irq_received));
        for_each_ring(ring, dev_priv, i) {
                if (INTEL_INFO(dev)->gen >= 6) {
                        seq_printf(m,
@@ -1346,6 +1347,8 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
                return 0;
        }
 
+       intel_runtime_pm_get(dev_priv);
+
        if (intel_fbc_enabled(dev)) {
                seq_puts(m, "FBC enabled\n");
        } else {
@@ -1389,6 +1392,9 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
                }
                seq_putc(m, '\n');
        }
+
+       intel_runtime_pm_put(dev_priv);
+
        return 0;
 }
 
@@ -1403,11 +1409,15 @@ static int i915_ips_status(struct seq_file *m, void *unused)
                return 0;
        }
 
+       intel_runtime_pm_get(dev_priv);
+
        if (IS_BROADWELL(dev) || I915_READ(IPS_CTL) & IPS_ENABLE)
                seq_puts(m, "enabled\n");
        else
                seq_puts(m, "disabled\n");
 
+       intel_runtime_pm_put(dev_priv);
+
        return 0;
 }
 
@@ -1418,6 +1428,8 @@ static int i915_sr_status(struct seq_file *m, void *unused)
        drm_i915_private_t *dev_priv = dev->dev_private;
        bool sr_enabled = false;
 
+       intel_runtime_pm_get(dev_priv);
+
        if (HAS_PCH_SPLIT(dev))
                sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
        else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev))
@@ -1427,6 +1439,8 @@ static int i915_sr_status(struct seq_file *m, void *unused)
        else if (IS_PINEVIEW(dev))
                sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
 
+       intel_runtime_pm_put(dev_priv);
+
        seq_printf(m, "self-refresh: %s\n",
                   sr_enabled ? "enabled" : "disabled");
 
@@ -1466,7 +1480,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
        struct drm_info_node *node = (struct drm_info_node *) m->private;
        struct drm_device *dev = node->minor->dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
-       int ret;
+       int ret = 0;
        int gpu_freq, ia_freq;
 
        if (!(IS_GEN6(dev) || IS_GEN7(dev))) {
@@ -1474,12 +1488,13 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
                return 0;
        }
 
+       intel_runtime_pm_get(dev_priv);
+
        flush_delayed_work(&dev_priv->rps.delayed_resume_work);
 
        ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
        if (ret)
-               return ret;
-       intel_runtime_pm_get(dev_priv);
+               goto out;
 
        seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
 
@@ -1496,10 +1511,11 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
                           ((ia_freq >> 8) & 0xff) * 100);
        }
 
-       intel_runtime_pm_put(dev_priv);
        mutex_unlock(&dev_priv->rps.hw_lock);
 
-       return 0;
+out:
+       intel_runtime_pm_put(dev_priv);
+       return ret;
 }
 
 static int i915_gfxec(struct seq_file *m, void *unused)
@@ -1733,6 +1749,17 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
        return 0;
 }
 
+static int per_file_ctx(int id, void *ptr, void *data)
+{
+       struct i915_hw_context *ctx = ptr;
+       struct seq_file *m = data;
+       struct i915_hw_ppgtt *ppgtt = ctx_to_ppgtt(ctx);
+
+       ppgtt->debug_dump(ppgtt, m);
+
+       return 0;
+}
+
 static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1744,7 +1771,7 @@ static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev)
                return;
 
        seq_printf(m, "Page directories: %d\n", ppgtt->num_pd_pages);
-       seq_printf(m, "Page tables: %d\n", ppgtt->num_pt_pages);
+       seq_printf(m, "Page tables: %d\n", ppgtt->num_pd_entries);
        for_each_ring(ring, dev_priv, unused) {
                seq_printf(m, "%s\n", ring->name);
                for (i = 0; i < 4; i++) {
@@ -1762,6 +1789,7 @@ static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_ring_buffer *ring;
+       struct drm_file *file;
        int i;
 
        if (INTEL_INFO(dev)->gen == 6)
@@ -1780,6 +1808,20 @@ static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
 
                seq_puts(m, "aliasing PPGTT:\n");
                seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd_offset);
+
+               ppgtt->debug_dump(ppgtt, m);
+       } else
+               return;
+
+       list_for_each_entry_reverse(file, &dev->filelist, lhead) {
+               struct drm_i915_file_private *file_priv = file->driver_priv;
+               struct i915_hw_ppgtt *pvt_ppgtt;
+
+               pvt_ppgtt = ctx_to_ppgtt(file_priv->private_default_ctx);
+               seq_printf(m, "proc: %s\n",
+                          get_pid_task(file->pid, PIDTYPE_PID)->comm);
+               seq_puts(m, "  default context:\n");
+               idr_for_each(&file_priv->context_idr, per_file_ctx, m);
        }
        seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
 }
@@ -1892,6 +1934,47 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
        return 0;
 }
 
+static int i915_sink_crc(struct seq_file *m, void *data)
+{
+       struct drm_info_node *node = m->private;
+       struct drm_device *dev = node->minor->dev;
+       struct intel_encoder *encoder;
+       struct intel_connector *connector;
+       struct intel_dp *intel_dp = NULL;
+       int ret;
+       u8 crc[6];
+
+       drm_modeset_lock_all(dev);
+       list_for_each_entry(connector, &dev->mode_config.connector_list,
+                           base.head) {
+
+               if (connector->base.dpms != DRM_MODE_DPMS_ON)
+                       continue;
+
+               if (!connector->base.encoder)
+                       continue;
+
+               encoder = to_intel_encoder(connector->base.encoder);
+               if (encoder->type != INTEL_OUTPUT_EDP)
+                       continue;
+
+               intel_dp = enc_to_intel_dp(&encoder->base);
+
+               ret = intel_dp_sink_crc(intel_dp, crc);
+               if (ret)
+                       goto out;
+
+               seq_printf(m, "%02x%02x%02x%02x%02x%02x\n",
+                          crc[0], crc[1], crc[2],
+                          crc[3], crc[4], crc[5]);
+               goto out;
+       }
+       ret = -ENODEV;
+out:
+       drm_modeset_unlock_all(dev);
+       return ret;
+}
+
 static int i915_energy_uJ(struct seq_file *m, void *data)
 {
        struct drm_info_node *node = m->private;
@@ -1903,12 +1986,16 @@ static int i915_energy_uJ(struct seq_file *m, void *data)
        if (INTEL_INFO(dev)->gen < 6)
                return -ENODEV;
 
+       intel_runtime_pm_get(dev_priv);
+
        rdmsrl(MSR_RAPL_POWER_UNIT, power);
        power = (power & 0x1f00) >> 8;
        units = 1000000 / (1 << power); /* convert to uJ */
        power = I915_READ(MCH_SECP_NRG_STTS);
        power *= units;
 
+       intel_runtime_pm_put(dev_priv);
+
        seq_printf(m, "%llu", (long long unsigned)power);
 
        return 0;
@@ -1928,7 +2015,7 @@ static int i915_pc8_status(struct seq_file *m, void *unused)
        mutex_lock(&dev_priv->pc8.lock);
        seq_printf(m, "Requirements met: %s\n",
                   yesno(dev_priv->pc8.requirements_met));
-       seq_printf(m, "GPU idle: %s\n", yesno(dev_priv->pc8.gpu_idle));
+       seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->mm.busy));
        seq_printf(m, "Disable count: %d\n", dev_priv->pc8.disable_count);
        seq_printf(m, "IRQs disabled: %s\n",
                   yesno(dev_priv->pc8.irqs_disabled));
@@ -1961,6 +2048,28 @@ static const char *power_domain_str(enum intel_display_power_domain domain)
                return "TRANSCODER_C";
        case POWER_DOMAIN_TRANSCODER_EDP:
                return "TRANSCODER_EDP";
+       case POWER_DOMAIN_PORT_DDI_A_2_LANES:
+               return "PORT_DDI_A_2_LANES";
+       case POWER_DOMAIN_PORT_DDI_A_4_LANES:
+               return "PORT_DDI_A_4_LANES";
+       case POWER_DOMAIN_PORT_DDI_B_2_LANES:
+               return "PORT_DDI_B_2_LANES";
+       case POWER_DOMAIN_PORT_DDI_B_4_LANES:
+               return "PORT_DDI_B_4_LANES";
+       case POWER_DOMAIN_PORT_DDI_C_2_LANES:
+               return "PORT_DDI_C_2_LANES";
+       case POWER_DOMAIN_PORT_DDI_C_4_LANES:
+               return "PORT_DDI_C_4_LANES";
+       case POWER_DOMAIN_PORT_DDI_D_2_LANES:
+               return "PORT_DDI_D_2_LANES";
+       case POWER_DOMAIN_PORT_DDI_D_4_LANES:
+               return "PORT_DDI_D_4_LANES";
+       case POWER_DOMAIN_PORT_DSI:
+               return "PORT_DSI";
+       case POWER_DOMAIN_PORT_CRT:
+               return "PORT_CRT";
+       case POWER_DOMAIN_PORT_OTHER:
+               return "PORT_OTHER";
        case POWER_DOMAIN_VGA:
                return "VGA";
        case POWER_DOMAIN_AUDIO:
@@ -2008,6 +2117,168 @@ static int i915_power_domain_info(struct seq_file *m, void *unused)
        return 0;
 }
 
+static void intel_seq_print_mode(struct seq_file *m, int tabs,
+                                struct drm_display_mode *mode)
+{
+       int i;
+
+       for (i = 0; i < tabs; i++)
+               seq_putc(m, '\t');
+
+       seq_printf(m, "id %d:\"%s\" freq %d clock %d hdisp %d hss %d hse %d htot %d vdisp %d vss %d vse %d vtot %d type 0x%x flags 0x%x\n",
+                  mode->base.id, mode->name,
+                  mode->vrefresh, mode->clock,
+                  mode->hdisplay, mode->hsync_start,
+                  mode->hsync_end, mode->htotal,
+                  mode->vdisplay, mode->vsync_start,
+                  mode->vsync_end, mode->vtotal,
+                  mode->type, mode->flags);
+}
+
+static void intel_encoder_info(struct seq_file *m,
+                              struct intel_crtc *intel_crtc,
+                              struct intel_encoder *intel_encoder)
+{
+       struct drm_info_node *node = (struct drm_info_node *) m->private;
+       struct drm_device *dev = node->minor->dev;
+       struct drm_crtc *crtc = &intel_crtc->base;
+       struct intel_connector *intel_connector;
+       struct drm_encoder *encoder;
+
+       encoder = &intel_encoder->base;
+       seq_printf(m, "\tencoder %d: type: %s, connectors:\n",
+                  encoder->base.id, drm_get_encoder_name(encoder));
+       for_each_connector_on_encoder(dev, encoder, intel_connector) {
+               struct drm_connector *connector = &intel_connector->base;
+               seq_printf(m, "\t\tconnector %d: type: %s, status: %s",
+                          connector->base.id,
+                          drm_get_connector_name(connector),
+                          drm_get_connector_status_name(connector->status));
+               if (connector->status == connector_status_connected) {
+                       struct drm_display_mode *mode = &crtc->mode;
+                       seq_printf(m, ", mode:\n");
+                       intel_seq_print_mode(m, 2, mode);
+               } else {
+                       seq_putc(m, '\n');
+               }
+       }
+}
+
+static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
+{
+       struct drm_info_node *node = (struct drm_info_node *) m->private;
+       struct drm_device *dev = node->minor->dev;
+       struct drm_crtc *crtc = &intel_crtc->base;
+       struct intel_encoder *intel_encoder;
+
+       seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
+                  crtc->fb->base.id, crtc->x, crtc->y,
+                  crtc->fb->width, crtc->fb->height);
+       for_each_encoder_on_crtc(dev, crtc, intel_encoder)
+               intel_encoder_info(m, intel_crtc, intel_encoder);
+}
+
+static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
+{
+       struct drm_display_mode *mode = panel->fixed_mode;
+
+       seq_printf(m, "\tfixed mode:\n");
+       intel_seq_print_mode(m, 2, mode);
+}
+
+static void intel_dp_info(struct seq_file *m,
+                         struct intel_connector *intel_connector)
+{
+       struct intel_encoder *intel_encoder = intel_connector->encoder;
+       struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
+
+       seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
+       seq_printf(m, "\taudio support: %s\n", intel_dp->has_audio ? "yes" :
+                  "no");
+       if (intel_encoder->type == INTEL_OUTPUT_EDP)
+               intel_panel_info(m, &intel_connector->panel);
+}
+
+static void intel_hdmi_info(struct seq_file *m,
+                           struct intel_connector *intel_connector)
+{
+       struct intel_encoder *intel_encoder = intel_connector->encoder;
+       struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
+
+       seq_printf(m, "\taudio support: %s\n", intel_hdmi->has_audio ? "yes" :
+                  "no");
+}
+
+static void intel_lvds_info(struct seq_file *m,
+                           struct intel_connector *intel_connector)
+{
+       intel_panel_info(m, &intel_connector->panel);
+}
+
+static void intel_connector_info(struct seq_file *m,
+                                struct drm_connector *connector)
+{
+       struct intel_connector *intel_connector = to_intel_connector(connector);
+       struct intel_encoder *intel_encoder = intel_connector->encoder;
+       struct drm_display_mode *mode;
+
+       seq_printf(m, "connector %d: type %s, status: %s\n",
+                  connector->base.id, drm_get_connector_name(connector),
+                  drm_get_connector_status_name(connector->status));
+       if (connector->status == connector_status_connected) {
+               seq_printf(m, "\tname: %s\n", connector->display_info.name);
+               seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
+                          connector->display_info.width_mm,
+                          connector->display_info.height_mm);
+               seq_printf(m, "\tsubpixel order: %s\n",
+                          drm_get_subpixel_order_name(connector->display_info.subpixel_order));
+               seq_printf(m, "\tCEA rev: %d\n",
+                          connector->display_info.cea_rev);
+       }
+       if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
+           intel_encoder->type == INTEL_OUTPUT_EDP)
+               intel_dp_info(m, intel_connector);
+       else if (intel_encoder->type == INTEL_OUTPUT_HDMI)
+               intel_hdmi_info(m, intel_connector);
+       else if (intel_encoder->type == INTEL_OUTPUT_LVDS)
+               intel_lvds_info(m, intel_connector);
+
+       seq_printf(m, "\tmodes:\n");
+       list_for_each_entry(mode, &connector->modes, head)
+               intel_seq_print_mode(m, 2, mode);
+}
+
+static int i915_display_info(struct seq_file *m, void *unused)
+{
+       struct drm_info_node *node = (struct drm_info_node *) m->private;
+       struct drm_device *dev = node->minor->dev;
+       struct drm_crtc *crtc;
+       struct drm_connector *connector;
+
+       drm_modeset_lock_all(dev);
+       seq_printf(m, "CRTC info\n");
+       seq_printf(m, "---------\n");
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+               struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+               seq_printf(m, "CRTC %d: pipe: %c, active: %s\n",
+                          crtc->base.id, pipe_name(intel_crtc->pipe),
+                          intel_crtc->active ? "yes" : "no");
+               if (intel_crtc->active)
+                       intel_crtc_info(m, intel_crtc);
+       }
+
+       seq_printf(m, "\n");
+       seq_printf(m, "Connector info\n");
+       seq_printf(m, "--------------\n");
+       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+               intel_connector_info(m, connector);
+       }
+       drm_modeset_unlock_all(dev);
+
+       return 0;
+}
+
 struct pipe_crc_info {
        const char *name;
        struct drm_device *dev;
@@ -2756,6 +3027,174 @@ static const struct file_operations i915_display_crc_ctl_fops = {
        .write = display_crc_ctl_write
 };
 
+static void wm_latency_show(struct seq_file *m, const uint16_t wm[5])
+{
+       struct drm_device *dev = m->private;
+       int num_levels = IS_HASWELL(dev) || IS_BROADWELL(dev) ? 5 : 4;
+       int level;
+
+       drm_modeset_lock_all(dev);
+
+       for (level = 0; level < num_levels; level++) {
+               unsigned int latency = wm[level];
+
+               /* WM1+ latency values in 0.5us units */
+               if (level > 0)
+                       latency *= 5;
+
+               seq_printf(m, "WM%d %u (%u.%u usec)\n",
+                          level, wm[level],
+                          latency / 10, latency % 10);
+       }
+
+       drm_modeset_unlock_all(dev);
+}
+
+static int pri_wm_latency_show(struct seq_file *m, void *data)
+{
+       struct drm_device *dev = m->private;
+
+       wm_latency_show(m, to_i915(dev)->wm.pri_latency);
+
+       return 0;
+}
+
+static int spr_wm_latency_show(struct seq_file *m, void *data)
+{
+       struct drm_device *dev = m->private;
+
+       wm_latency_show(m, to_i915(dev)->wm.spr_latency);
+
+       return 0;
+}
+
+static int cur_wm_latency_show(struct seq_file *m, void *data)
+{
+       struct drm_device *dev = m->private;
+
+       wm_latency_show(m, to_i915(dev)->wm.cur_latency);
+
+       return 0;
+}
+
+static int pri_wm_latency_open(struct inode *inode, struct file *file)
+{
+       struct drm_device *dev = inode->i_private;
+
+       if (!HAS_PCH_SPLIT(dev))
+               return -ENODEV;
+
+       return single_open(file, pri_wm_latency_show, dev);
+}
+
+static int spr_wm_latency_open(struct inode *inode, struct file *file)
+{
+       struct drm_device *dev = inode->i_private;
+
+       if (!HAS_PCH_SPLIT(dev))
+               return -ENODEV;
+
+       return single_open(file, spr_wm_latency_show, dev);
+}
+
+static int cur_wm_latency_open(struct inode *inode, struct file *file)
+{
+       struct drm_device *dev = inode->i_private;
+
+       if (!HAS_PCH_SPLIT(dev))
+               return -ENODEV;
+
+       return single_open(file, cur_wm_latency_show, dev);
+}
+
+static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
+                               size_t len, loff_t *offp, uint16_t wm[5])
+{
+       struct seq_file *m = file->private_data;
+       struct drm_device *dev = m->private;
+       uint16_t new[5] = { 0 };
+       int num_levels = IS_HASWELL(dev) || IS_BROADWELL(dev) ? 5 : 4;
+       int level;
+       int ret;
+       char tmp[32];
+
+       if (len >= sizeof(tmp))
+               return -EINVAL;
+
+       if (copy_from_user(tmp, ubuf, len))
+               return -EFAULT;
+
+       tmp[len] = '\0';
+
+       ret = sscanf(tmp, "%hu %hu %hu %hu %hu", &new[0], &new[1], &new[2], &new[3], &new[4]);
+       if (ret != num_levels)
+               return -EINVAL;
+
+       drm_modeset_lock_all(dev);
+
+       for (level = 0; level < num_levels; level++)
+               wm[level] = new[level];
+
+       drm_modeset_unlock_all(dev);
+
+       return len;
+}
+
+
+static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
+                                   size_t len, loff_t *offp)
+{
+       struct seq_file *m = file->private_data;
+       struct drm_device *dev = m->private;
+
+       return wm_latency_write(file, ubuf, len, offp, to_i915(dev)->wm.pri_latency);
+}
+
+static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
+                                   size_t len, loff_t *offp)
+{
+       struct seq_file *m = file->private_data;
+       struct drm_device *dev = m->private;
+
+       return wm_latency_write(file, ubuf, len, offp, to_i915(dev)->wm.spr_latency);
+}
+
+static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
+                                   size_t len, loff_t *offp)
+{
+       struct seq_file *m = file->private_data;
+       struct drm_device *dev = m->private;
+
+       return wm_latency_write(file, ubuf, len, offp, to_i915(dev)->wm.cur_latency);
+}
+
+static const struct file_operations i915_pri_wm_latency_fops = {
+       .owner = THIS_MODULE,
+       .open = pri_wm_latency_open,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = single_release,
+       .write = pri_wm_latency_write
+};
+
+static const struct file_operations i915_spr_wm_latency_fops = {
+       .owner = THIS_MODULE,
+       .open = spr_wm_latency_open,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = single_release,
+       .write = spr_wm_latency_write
+};
+
+static const struct file_operations i915_cur_wm_latency_fops = {
+       .owner = THIS_MODULE,
+       .open = cur_wm_latency_open,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = single_release,
+       .write = cur_wm_latency_write
+};
+
 static int
 i915_wedged_get(void *data, u64 *val)
 {
@@ -2772,9 +3211,8 @@ i915_wedged_set(void *data, u64 val)
 {
        struct drm_device *dev = data;
 
-       DRM_INFO("Manually setting wedged to %llu\n", val);
-       i915_handle_error(dev, val);
-
+       i915_handle_error(dev, val,
+                         "Manually setting wedged to %llu", val);
        return 0;
 }
 
@@ -2929,7 +3367,7 @@ i915_drop_caches_set(void *data, u64 val)
                list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
                        list_for_each_entry_safe(vma, x, &vm->inactive_list,
                                                 mm_list) {
-                               if (vma->obj->pin_count)
+                               if (vma->pin_count)
                                        continue;
 
                                ret = i915_vma_unbind(vma);
@@ -2989,6 +3427,7 @@ i915_max_freq_set(void *data, u64 val)
 {
        struct drm_device *dev = data;
        struct drm_i915_private *dev_priv = dev->dev_private;
+       u32 rp_state_cap, hw_max, hw_min;
        int ret;
 
        if (!(IS_GEN6(dev) || IS_GEN7(dev)))
@@ -3007,14 +3446,29 @@ i915_max_freq_set(void *data, u64 val)
         */
        if (IS_VALLEYVIEW(dev)) {
                val = vlv_freq_opcode(dev_priv, val);
-               dev_priv->rps.max_delay = val;
-               valleyview_set_rps(dev, val);
+
+               hw_max = valleyview_rps_max_freq(dev_priv);
+               hw_min = valleyview_rps_min_freq(dev_priv);
        } else {
                do_div(val, GT_FREQUENCY_MULTIPLIER);
-               dev_priv->rps.max_delay = val;
-               gen6_set_rps(dev, val);
+
+               rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+               hw_max = dev_priv->rps.hw_max;
+               hw_min = (rp_state_cap >> 16) & 0xff;
+       }
+
+       if (val < hw_min || val > hw_max || val < dev_priv->rps.min_delay) {
+               mutex_unlock(&dev_priv->rps.hw_lock);
+               return -EINVAL;
        }
 
+       dev_priv->rps.max_delay = val;
+
+       if (IS_VALLEYVIEW(dev))
+               valleyview_set_rps(dev, val);
+       else
+               gen6_set_rps(dev, val);
+
        mutex_unlock(&dev_priv->rps.hw_lock);
 
        return 0;
@@ -3054,6 +3508,7 @@ i915_min_freq_set(void *data, u64 val)
 {
        struct drm_device *dev = data;
        struct drm_i915_private *dev_priv = dev->dev_private;
+       u32 rp_state_cap, hw_max, hw_min;
        int ret;
 
        if (!(IS_GEN6(dev) || IS_GEN7(dev)))
@@ -3072,13 +3527,29 @@ i915_min_freq_set(void *data, u64 val)
         */
        if (IS_VALLEYVIEW(dev)) {
                val = vlv_freq_opcode(dev_priv, val);
-               dev_priv->rps.min_delay = val;
-               valleyview_set_rps(dev, val);
+
+               hw_max = valleyview_rps_max_freq(dev_priv);
+               hw_min = valleyview_rps_min_freq(dev_priv);
        } else {
                do_div(val, GT_FREQUENCY_MULTIPLIER);
-               dev_priv->rps.min_delay = val;
-               gen6_set_rps(dev, val);
+
+               rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+               hw_max = dev_priv->rps.hw_max;
+               hw_min = (rp_state_cap >> 16) & 0xff;
+       }
+
+       if (val < hw_min || val > hw_max || val > dev_priv->rps.max_delay) {
+               mutex_unlock(&dev_priv->rps.hw_lock);
+               return -EINVAL;
        }
+
+       dev_priv->rps.min_delay = val;
+
+       if (IS_VALLEYVIEW(dev))
+               valleyview_set_rps(dev, val);
+       else
+               gen6_set_rps(dev, val);
+
        mutex_unlock(&dev_priv->rps.hw_lock);
 
        return 0;
@@ -3248,9 +3719,11 @@ static const struct drm_info_list i915_debugfs_list[] = {
        {"i915_dpio", i915_dpio_info, 0},
        {"i915_llc", i915_llc, 0},
        {"i915_edp_psr_status", i915_edp_psr_status, 0},
+       {"i915_sink_crc_eDP1", i915_sink_crc, 0},
        {"i915_energy_uJ", i915_energy_uJ, 0},
        {"i915_pc8_status", i915_pc8_status, 0},
        {"i915_power_domain_info", i915_power_domain_info, 0},
+       {"i915_display_info", i915_display_info, 0},
 };
 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
 
@@ -3269,6 +3742,9 @@ static const struct i915_debugfs_files {
        {"i915_error_state", &i915_error_state_fops},
        {"i915_next_seqno", &i915_next_seqno_fops},
        {"i915_display_crc_ctl", &i915_display_crc_ctl_fops},
+       {"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
+       {"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
+       {"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
 };
 
 void intel_display_crc_init(struct drm_device *dev)
index 15a74f979b4bf8c773b6bde25c65f0badf6afb8e..e4d2b9f15ae29b3e69673ed2c726a4db948475d9 100644 (file)
@@ -626,9 +626,8 @@ static int i915_batchbuffer(struct drm_device *dev, void *data,
                            struct drm_file *file_priv)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-       struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
-       drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
-           master_priv->sarea_priv;
+       struct drm_i915_master_private *master_priv;
+       drm_i915_sarea_t *sarea_priv;
        drm_i915_batchbuffer_t *batch = data;
        int ret;
        struct drm_clip_rect *cliprects = NULL;
@@ -636,6 +635,9 @@ static int i915_batchbuffer(struct drm_device *dev, void *data,
        if (drm_core_check_feature(dev, DRIVER_MODESET))
                return -ENODEV;
 
+       master_priv = dev->primary->master->driver_priv;
+       sarea_priv = (drm_i915_sarea_t *) master_priv->sarea_priv;
+
        if (!dev_priv->dri1.allow_batchbuffer) {
                DRM_ERROR("Batchbuffer ioctl disabled\n");
                return -EINVAL;
@@ -682,9 +684,8 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
                          struct drm_file *file_priv)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-       struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
-       drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
-           master_priv->sarea_priv;
+       struct drm_i915_master_private *master_priv;
+       drm_i915_sarea_t *sarea_priv;
        drm_i915_cmdbuffer_t *cmdbuf = data;
        struct drm_clip_rect *cliprects = NULL;
        void *batch_data;
@@ -696,6 +697,9 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
        if (drm_core_check_feature(dev, DRIVER_MODESET))
                return -ENODEV;
 
+       master_priv = dev->primary->master->driver_priv;
+       sarea_priv = (drm_i915_sarea_t *) master_priv->sarea_priv;
+
        RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
 
        if (cmdbuf->num_cliprects < 0)
@@ -990,7 +994,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
                value = HAS_WT(dev);
                break;
        case I915_PARAM_HAS_ALIASING_PPGTT:
-               value = dev_priv->mm.aliasing_ppgtt ? 1 : 0;
+               value = dev_priv->mm.aliasing_ppgtt || USES_FULL_PPGTT(dev);
                break;
        case I915_PARAM_HAS_WAIT_TIMEOUT:
                value = 1;
@@ -1317,12 +1321,12 @@ static int i915_load_modeset_init(struct drm_device *dev)
        if (ret)
                goto cleanup_vga_switcheroo;
 
+       intel_power_domains_init_hw(dev_priv);
+
        ret = drm_irq_install(dev);
        if (ret)
                goto cleanup_gem_stolen;
 
-       intel_power_domains_init_hw(dev);
-
        /* Important: The output setup functions called by modeset_init need
         * working irqs for e.g. gmbus and dp aux transfers. */
        intel_modeset_init(dev);
@@ -1339,7 +1343,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
        /* FIXME: do pre/post-mode set stuff in core KMS code */
        dev->vblank_disable_allowed = true;
        if (INTEL_INFO(dev)->num_pipes == 0) {
-               intel_display_power_put(dev, POWER_DOMAIN_VGA);
+               intel_display_power_put(dev_priv, POWER_DOMAIN_VGA);
                return 0;
        }
 
@@ -1374,10 +1378,10 @@ cleanup_gem:
        i915_gem_cleanup_ringbuffer(dev);
        i915_gem_context_fini(dev);
        mutex_unlock(&dev->struct_mutex);
-       i915_gem_cleanup_aliasing_ppgtt(dev);
+       WARN_ON(dev_priv->mm.aliasing_ppgtt);
        drm_mm_takedown(&dev_priv->gtt.base.mm);
 cleanup_power:
-       intel_display_power_put(dev, POWER_DOMAIN_VGA);
+       intel_display_power_put(dev_priv, POWER_DOMAIN_VGA);
        drm_irq_uninstall(dev);
 cleanup_gem_stolen:
        i915_gem_cleanup_stolen(dev);
@@ -1442,7 +1446,7 @@ static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
 
 static void i915_dump_device_info(struct drm_i915_private *dev_priv)
 {
-       const struct intel_device_info *info = dev_priv->info;
+       const struct intel_device_info *info = &dev_priv->info;
 
 #define PRINT_S(name) "%s"
 #define SEP_EMPTY
@@ -1459,6 +1463,62 @@ static void i915_dump_device_info(struct drm_i915_private *dev_priv)
 #undef SEP_COMMA
 }
 
+/*
+ * Determine various intel_device_info fields at runtime.
+ *
+ * Use it when either:
+ *   - it's judged too laborious to fill n static structures with the limit
+ *     when a simple if statement does the job,
+ *   - run-time checks (eg read fuse/strap registers) are needed.
+ *
+ * This function needs to be called:
+ *   - after the MMIO has been setup as we are reading registers,
+ *   - after the PCH has been detected,
+ *   - before the first usage of the fields it can tweak.
+ */
+static void intel_device_info_runtime_init(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_device_info *info;
+       enum pipe pipe;
+
+       info = (struct intel_device_info *)&dev_priv->info;
+
+       if (IS_VALLEYVIEW(dev))
+               for_each_pipe(pipe)
+                       info->num_sprites[pipe] = 2;
+       else
+               for_each_pipe(pipe)
+                       info->num_sprites[pipe] = 1;
+
+       if (i915.disable_display) {
+               DRM_INFO("Display disabled (module parameter)\n");
+               info->num_pipes = 0;
+       } else if (info->num_pipes > 0 &&
+                  (INTEL_INFO(dev)->gen == 7 || INTEL_INFO(dev)->gen == 8) &&
+                  !IS_VALLEYVIEW(dev)) {
+               u32 fuse_strap = I915_READ(FUSE_STRAP);
+               u32 sfuse_strap = I915_READ(SFUSE_STRAP);
+
+               /*
+                * SFUSE_STRAP is supposed to have a bit signalling the display
+                * is fused off. Unfortunately it seems that, at least in
+                * certain cases, fused off display means that PCH display
+                * reads don't land anywhere. In that case, we read 0s.
+                *
+                * On CPT/PPT, we can detect this case as SFUSE_STRAP_FUSE_LOCK
+                * should be set when taking over after the firmware.
+                */
+               if (fuse_strap & ILK_INTERNAL_DISPLAY_DISABLE ||
+                   sfuse_strap & SFUSE_STRAP_DISPLAY_DISABLED ||
+                   (dev_priv->pch_type == PCH_CPT &&
+                    !(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) {
+                       DRM_INFO("Display fused off, disabling\n");
+                       info->num_pipes = 0;
+               }
+       }
+}
+
 /**
  * i915_driver_load - setup chip and create an initial config
  * @dev: DRM device
@@ -1473,7 +1533,7 @@ static void i915_dump_device_info(struct drm_i915_private *dev_priv)
 int i915_driver_load(struct drm_device *dev, unsigned long flags)
 {
        struct drm_i915_private *dev_priv;
-       struct intel_device_info *info;
+       struct intel_device_info *info, *device_info;
        int ret = 0, mmio_bar, mmio_size;
        uint32_t aperture_size;
 
@@ -1496,7 +1556,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
 
        dev->dev_private = (void *)dev_priv;
        dev_priv->dev = dev;
-       dev_priv->info = info;
+
+       /* copy initial configuration to dev_priv->info */
+       device_info = (struct intel_device_info *)&dev_priv->info;
+       *device_info = *info;
 
        spin_lock_init(&dev_priv->irq_lock);
        spin_lock_init(&dev_priv->gpu_error.lock);
@@ -1635,9 +1698,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
        if (!IS_I945G(dev) && !IS_I945GM(dev))
                pci_enable_msi(dev->pdev);
 
-       dev_priv->num_plane = 1;
-       if (IS_VALLEYVIEW(dev))
-               dev_priv->num_plane = 2;
+       intel_device_info_runtime_init(dev);
 
        if (INTEL_INFO(dev)->num_pipes) {
                ret = drm_vblank_init(dev, INTEL_INFO(dev)->num_pipes);
@@ -1645,7 +1706,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
                        goto out_gem_unload;
        }
 
-       intel_power_domains_init(dev);
+       intel_power_domains_init(dev_priv);
 
        if (drm_core_check_feature(dev, DRIVER_MODESET)) {
                ret = i915_load_modeset_init(dev);
@@ -1674,7 +1735,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
        return 0;
 
 out_power_well:
-       intel_power_domains_remove(dev);
+       intel_power_domains_remove(dev_priv);
        drm_vblank_cleanup(dev);
 out_gem_unload:
        if (dev_priv->mm.inactive_shrinker.scan_objects)
@@ -1724,8 +1785,8 @@ int i915_driver_unload(struct drm_device *dev)
        /* The i915.ko module is still not prepared to be loaded when
         * the power well is not enabled, so just enable it in case
         * we're going to unload/reload. */
-       intel_display_set_init_power(dev, true);
-       intel_power_domains_remove(dev);
+       intel_display_set_init_power(dev_priv, true);
+       intel_power_domains_remove(dev_priv);
 
        i915_teardown_sysfs(dev);
 
@@ -1776,8 +1837,8 @@ int i915_driver_unload(struct drm_device *dev)
                i915_gem_free_all_phys_object(dev);
                i915_gem_cleanup_ringbuffer(dev);
                i915_gem_context_fini(dev);
+               WARN_ON(dev_priv->mm.aliasing_ppgtt);
                mutex_unlock(&dev->struct_mutex);
-               i915_gem_cleanup_aliasing_ppgtt(dev);
                i915_gem_cleanup_stolen(dev);
 
                if (!I915_NEED_GFX_HWS(dev))
index ec7bb0fc71bcfc8c19c2231c9bb0e3165f89e047..658fe24961ebfaa5ddac81043fd95c3cd26fa412 100644 (file)
 #include <linux/module.h>
 #include <drm/drm_crtc_helper.h>
 
-static int i915_modeset __read_mostly = -1;
-module_param_named(modeset, i915_modeset, int, 0400);
-MODULE_PARM_DESC(modeset,
-               "Use kernel modesetting [KMS] (0=DRM_I915_KMS from .config, "
-               "1=on, -1=force vga console preference [default])");
-
-unsigned int i915_fbpercrtc __always_unused = 0;
-module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400);
-
-int i915_panel_ignore_lid __read_mostly = 1;
-module_param_named(panel_ignore_lid, i915_panel_ignore_lid, int, 0600);
-MODULE_PARM_DESC(panel_ignore_lid,
-               "Override lid status (0=autodetect, 1=autodetect disabled [default], "
-               "-1=force lid closed, -2=force lid open)");
-
-unsigned int i915_powersave __read_mostly = 1;
-module_param_named(powersave, i915_powersave, int, 0600);
-MODULE_PARM_DESC(powersave,
-               "Enable powersavings, fbc, downclocking, etc. (default: true)");
-
-int i915_semaphores __read_mostly = -1;
-module_param_named(semaphores, i915_semaphores, int, 0400);
-MODULE_PARM_DESC(semaphores,
-               "Use semaphores for inter-ring sync (default: -1 (use per-chip defaults))");
-
-int i915_enable_rc6 __read_mostly = -1;
-module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0400);
-MODULE_PARM_DESC(i915_enable_rc6,
-               "Enable power-saving render C-state 6. "
-               "Different stages can be selected via bitmask values "
-               "(0 = disable; 1 = enable rc6; 2 = enable deep rc6; 4 = enable deepest rc6). "
-               "For example, 3 would enable rc6 and deep rc6, and 7 would enable everything. "
-               "default: -1 (use per-chip default)");
-
-int i915_enable_fbc __read_mostly = -1;
-module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600);
-MODULE_PARM_DESC(i915_enable_fbc,
-               "Enable frame buffer compression for power savings "
-               "(default: -1 (use per-chip default))");
-
-unsigned int i915_lvds_downclock __read_mostly = 0;
-module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400);
-MODULE_PARM_DESC(lvds_downclock,
-               "Use panel (LVDS/eDP) downclocking for power savings "
-               "(default: false)");
-
-int i915_lvds_channel_mode __read_mostly;
-module_param_named(lvds_channel_mode, i915_lvds_channel_mode, int, 0600);
-MODULE_PARM_DESC(lvds_channel_mode,
-                "Specify LVDS channel mode "
-                "(0=probe BIOS [default], 1=single-channel, 2=dual-channel)");
-
-int i915_panel_use_ssc __read_mostly = -1;
-module_param_named(lvds_use_ssc, i915_panel_use_ssc, int, 0600);
-MODULE_PARM_DESC(lvds_use_ssc,
-               "Use Spread Spectrum Clock with panels [LVDS/eDP] "
-               "(default: auto from VBT)");
-
-int i915_vbt_sdvo_panel_type __read_mostly = -1;
-module_param_named(vbt_sdvo_panel_type, i915_vbt_sdvo_panel_type, int, 0600);
-MODULE_PARM_DESC(vbt_sdvo_panel_type,
-               "Override/Ignore selection of SDVO panel mode in the VBT "
-               "(-2=ignore, -1=auto [default], index in VBT BIOS table)");
-
-static bool i915_try_reset __read_mostly = true;
-module_param_named(reset, i915_try_reset, bool, 0600);
-MODULE_PARM_DESC(reset, "Attempt GPU resets (default: true)");
-
-bool i915_enable_hangcheck __read_mostly = true;
-module_param_named(enable_hangcheck, i915_enable_hangcheck, bool, 0644);
-MODULE_PARM_DESC(enable_hangcheck,
-               "Periodically check GPU activity for detecting hangs. "
-               "WARNING: Disabling this can cause system wide hangs. "
-               "(default: true)");
-
-int i915_enable_ppgtt __read_mostly = -1;
-module_param_named(i915_enable_ppgtt, i915_enable_ppgtt, int, 0400);
-MODULE_PARM_DESC(i915_enable_ppgtt,
-               "Enable PPGTT (default: true)");
-
-int i915_enable_psr __read_mostly = 0;
-module_param_named(enable_psr, i915_enable_psr, int, 0600);
-MODULE_PARM_DESC(enable_psr, "Enable PSR (default: false)");
-
-unsigned int i915_preliminary_hw_support __read_mostly = IS_ENABLED(CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT);
-module_param_named(preliminary_hw_support, i915_preliminary_hw_support, int, 0600);
-MODULE_PARM_DESC(preliminary_hw_support,
-               "Enable preliminary hardware support.");
-
-int i915_disable_power_well __read_mostly = 1;
-module_param_named(disable_power_well, i915_disable_power_well, int, 0600);
-MODULE_PARM_DESC(disable_power_well,
-                "Disable the power well when possible (default: true)");
-
-int i915_enable_ips __read_mostly = 1;
-module_param_named(enable_ips, i915_enable_ips, int, 0600);
-MODULE_PARM_DESC(enable_ips, "Enable IPS (default: true)");
-
-bool i915_fastboot __read_mostly = 0;
-module_param_named(fastboot, i915_fastboot, bool, 0600);
-MODULE_PARM_DESC(fastboot, "Try to skip unnecessary mode sets at boot time "
-                "(default: false)");
-
-int i915_enable_pc8 __read_mostly = 1;
-module_param_named(enable_pc8, i915_enable_pc8, int, 0600);
-MODULE_PARM_DESC(enable_pc8, "Enable support for low power package C states (PC8+) (default: true)");
-
-int i915_pc8_timeout __read_mostly = 5000;
-module_param_named(pc8_timeout, i915_pc8_timeout, int, 0600);
-MODULE_PARM_DESC(pc8_timeout, "Number of msecs of idleness required to enter PC8+ (default: 5000)");
-
-bool i915_prefault_disable __read_mostly;
-module_param_named(prefault_disable, i915_prefault_disable, bool, 0600);
-MODULE_PARM_DESC(prefault_disable,
-               "Disable page prefaulting for pread/pwrite/reloc (default:false). For developers only.");
-
 static struct drm_driver driver;
 
+#define GEN_DEFAULT_PIPEOFFSETS \
+       .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
+                         PIPE_C_OFFSET, PIPE_EDP_OFFSET }, \
+       .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
+                          TRANSCODER_C_OFFSET, TRANSCODER_EDP_OFFSET }, \
+       .dpll_offsets = { DPLL_A_OFFSET, DPLL_B_OFFSET }, \
+       .dpll_md_offsets = { DPLL_A_MD_OFFSET, DPLL_B_MD_OFFSET }, \
+       .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET }
+
+
 static const struct intel_device_info intel_i830_info = {
        .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2,
        .has_overlay = 1, .overlay_needs_physical = 1,
        .ring_mask = RENDER_RING,
+       GEN_DEFAULT_PIPEOFFSETS,
 };
 
 static const struct intel_device_info intel_845g_info = {
        .gen = 2, .num_pipes = 1,
        .has_overlay = 1, .overlay_needs_physical = 1,
        .ring_mask = RENDER_RING,
+       GEN_DEFAULT_PIPEOFFSETS,
 };
 
 static const struct intel_device_info intel_i85x_info = {
@@ -174,18 +70,21 @@ static const struct intel_device_info intel_i85x_info = {
        .has_overlay = 1, .overlay_needs_physical = 1,
        .has_fbc = 1,
        .ring_mask = RENDER_RING,
+       GEN_DEFAULT_PIPEOFFSETS,
 };
 
 static const struct intel_device_info intel_i865g_info = {
        .gen = 2, .num_pipes = 1,
        .has_overlay = 1, .overlay_needs_physical = 1,
        .ring_mask = RENDER_RING,
+       GEN_DEFAULT_PIPEOFFSETS,
 };
 
 static const struct intel_device_info intel_i915g_info = {
        .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2,
        .has_overlay = 1, .overlay_needs_physical = 1,
        .ring_mask = RENDER_RING,
+       GEN_DEFAULT_PIPEOFFSETS,
 };
 static const struct intel_device_info intel_i915gm_info = {
        .gen = 3, .is_mobile = 1, .num_pipes = 2,
@@ -194,11 +93,13 @@ static const struct intel_device_info intel_i915gm_info = {
        .supports_tv = 1,
        .has_fbc = 1,
        .ring_mask = RENDER_RING,
+       GEN_DEFAULT_PIPEOFFSETS,
 };
 static const struct intel_device_info intel_i945g_info = {
        .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2,
        .has_overlay = 1, .overlay_needs_physical = 1,
        .ring_mask = RENDER_RING,
+       GEN_DEFAULT_PIPEOFFSETS,
 };
 static const struct intel_device_info intel_i945gm_info = {
        .gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2,
@@ -207,6 +108,7 @@ static const struct intel_device_info intel_i945gm_info = {
        .supports_tv = 1,
        .has_fbc = 1,
        .ring_mask = RENDER_RING,
+       GEN_DEFAULT_PIPEOFFSETS,
 };
 
 static const struct intel_device_info intel_i965g_info = {
@@ -214,6 +116,7 @@ static const struct intel_device_info intel_i965g_info = {
        .has_hotplug = 1,
        .has_overlay = 1,
        .ring_mask = RENDER_RING,
+       GEN_DEFAULT_PIPEOFFSETS,
 };
 
 static const struct intel_device_info intel_i965gm_info = {
@@ -222,6 +125,7 @@ static const struct intel_device_info intel_i965gm_info = {
        .has_overlay = 1,
        .supports_tv = 1,
        .ring_mask = RENDER_RING,
+       GEN_DEFAULT_PIPEOFFSETS,
 };
 
 static const struct intel_device_info intel_g33_info = {
@@ -229,12 +133,14 @@ static const struct intel_device_info intel_g33_info = {
        .need_gfx_hws = 1, .has_hotplug = 1,
        .has_overlay = 1,
        .ring_mask = RENDER_RING,
+       GEN_DEFAULT_PIPEOFFSETS,
 };
 
 static const struct intel_device_info intel_g45_info = {
        .gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2,
        .has_pipe_cxsr = 1, .has_hotplug = 1,
        .ring_mask = RENDER_RING | BSD_RING,
+       GEN_DEFAULT_PIPEOFFSETS,
 };
 
 static const struct intel_device_info intel_gm45_info = {
@@ -243,18 +149,21 @@ static const struct intel_device_info intel_gm45_info = {
        .has_pipe_cxsr = 1, .has_hotplug = 1,
        .supports_tv = 1,
        .ring_mask = RENDER_RING | BSD_RING,
+       GEN_DEFAULT_PIPEOFFSETS,
 };
 
 static const struct intel_device_info intel_pineview_info = {
        .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .num_pipes = 2,
        .need_gfx_hws = 1, .has_hotplug = 1,
        .has_overlay = 1,
+       GEN_DEFAULT_PIPEOFFSETS,
 };
 
 static const struct intel_device_info intel_ironlake_d_info = {
        .gen = 5, .num_pipes = 2,
        .need_gfx_hws = 1, .has_hotplug = 1,
        .ring_mask = RENDER_RING | BSD_RING,
+       GEN_DEFAULT_PIPEOFFSETS,
 };
 
 static const struct intel_device_info intel_ironlake_m_info = {
@@ -262,6 +171,7 @@ static const struct intel_device_info intel_ironlake_m_info = {
        .need_gfx_hws = 1, .has_hotplug = 1,
        .has_fbc = 1,
        .ring_mask = RENDER_RING | BSD_RING,
+       GEN_DEFAULT_PIPEOFFSETS,
 };
 
 static const struct intel_device_info intel_sandybridge_d_info = {
@@ -270,6 +180,7 @@ static const struct intel_device_info intel_sandybridge_d_info = {
        .has_fbc = 1,
        .ring_mask = RENDER_RING | BSD_RING | BLT_RING,
        .has_llc = 1,
+       GEN_DEFAULT_PIPEOFFSETS,
 };
 
 static const struct intel_device_info intel_sandybridge_m_info = {
@@ -278,6 +189,7 @@ static const struct intel_device_info intel_sandybridge_m_info = {
        .has_fbc = 1,
        .ring_mask = RENDER_RING | BSD_RING | BLT_RING,
        .has_llc = 1,
+       GEN_DEFAULT_PIPEOFFSETS,
 };
 
 #define GEN7_FEATURES  \
@@ -290,18 +202,21 @@ static const struct intel_device_info intel_sandybridge_m_info = {
 static const struct intel_device_info intel_ivybridge_d_info = {
        GEN7_FEATURES,
        .is_ivybridge = 1,
+       GEN_DEFAULT_PIPEOFFSETS,
 };
 
 static const struct intel_device_info intel_ivybridge_m_info = {
        GEN7_FEATURES,
        .is_ivybridge = 1,
        .is_mobile = 1,
+       GEN_DEFAULT_PIPEOFFSETS,
 };
 
 static const struct intel_device_info intel_ivybridge_q_info = {
        GEN7_FEATURES,
        .is_ivybridge = 1,
        .num_pipes = 0, /* legal, last one wins */
+       GEN_DEFAULT_PIPEOFFSETS,
 };
 
 static const struct intel_device_info intel_valleyview_m_info = {
@@ -312,6 +227,7 @@ static const struct intel_device_info intel_valleyview_m_info = {
        .display_mmio_offset = VLV_DISPLAY_BASE,
        .has_fbc = 0, /* legal, last one wins */
        .has_llc = 0, /* legal, last one wins */
+       GEN_DEFAULT_PIPEOFFSETS,
 };
 
 static const struct intel_device_info intel_valleyview_d_info = {
@@ -321,6 +237,7 @@ static const struct intel_device_info intel_valleyview_d_info = {
        .display_mmio_offset = VLV_DISPLAY_BASE,
        .has_fbc = 0, /* legal, last one wins */
        .has_llc = 0, /* legal, last one wins */
+       GEN_DEFAULT_PIPEOFFSETS,
 };
 
 static const struct intel_device_info intel_haswell_d_info = {
@@ -329,6 +246,7 @@ static const struct intel_device_info intel_haswell_d_info = {
        .has_ddi = 1,
        .has_fpga_dbg = 1,
        .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
+       GEN_DEFAULT_PIPEOFFSETS,
 };
 
 static const struct intel_device_info intel_haswell_m_info = {
@@ -338,6 +256,7 @@ static const struct intel_device_info intel_haswell_m_info = {
        .has_ddi = 1,
        .has_fpga_dbg = 1,
        .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
+       GEN_DEFAULT_PIPEOFFSETS,
 };
 
 static const struct intel_device_info intel_broadwell_d_info = {
@@ -346,6 +265,8 @@ static const struct intel_device_info intel_broadwell_d_info = {
        .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
        .has_llc = 1,
        .has_ddi = 1,
+       .has_fbc = 1,
+       GEN_DEFAULT_PIPEOFFSETS,
 };
 
 static const struct intel_device_info intel_broadwell_m_info = {
@@ -354,6 +275,8 @@ static const struct intel_device_info intel_broadwell_m_info = {
        .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
        .has_llc = 1,
        .has_ddi = 1,
+       .has_fbc = 1,
+       GEN_DEFAULT_PIPEOFFSETS,
 };
 
 /*
@@ -475,14 +398,12 @@ bool i915_semaphore_is_enabled(struct drm_device *dev)
        if (INTEL_INFO(dev)->gen < 6)
                return false;
 
+       if (i915.semaphores >= 0)
+               return i915.semaphores;
+
        /* Until we get further testing... */
-       if (IS_GEN8(dev)) {
-               WARN_ON(!i915_preliminary_hw_support);
+       if (IS_GEN8(dev))
                return false;
-       }
-
-       if (i915_semaphores >= 0)
-               return i915_semaphores;
 
 #ifdef CONFIG_INTEL_IOMMU
        /* Enable semaphores on SNB when IO remapping is off */
@@ -508,7 +429,7 @@ static int i915_drm_freeze(struct drm_device *dev)
        /* We do a lot of poking in a lot of registers, make sure they work
         * properly. */
        hsw_disable_package_c8(dev_priv);
-       intel_display_set_init_power(dev, true);
+       intel_display_set_init_power(dev_priv, true);
 
        drm_kms_helper_poll_disable(dev);
 
@@ -551,6 +472,8 @@ static int i915_drm_freeze(struct drm_device *dev)
        intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED);
        console_unlock();
 
+       dev_priv->suspend_count++;
+
        return 0;
 }
 
@@ -630,7 +553,7 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
                mutex_unlock(&dev->struct_mutex);
        }
 
-       intel_power_domains_init_hw(dev);
+       intel_power_domains_init_hw(dev_priv);
 
        i915_restore_state(dev);
        intel_opregion_setup(dev);
@@ -638,6 +561,7 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
        /* KMS EnterVT equivalent */
        if (drm_core_check_feature(dev, DRIVER_MODESET)) {
                intel_init_pch_refclk(dev);
+               drm_mode_config_reset(dev);
 
                mutex_lock(&dev->struct_mutex);
 
@@ -650,7 +574,6 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
                intel_modeset_init_hw(dev);
 
                drm_modeset_lock_all(dev);
-               drm_mode_config_reset(dev);
                intel_modeset_setup_hw_state(dev, true);
                drm_modeset_unlock_all(dev);
 
@@ -747,7 +670,7 @@ int i915_reset(struct drm_device *dev)
        bool simulated;
        int ret;
 
-       if (!i915_try_reset)
+       if (!i915.reset)
                return 0;
 
        mutex_lock(&dev->struct_mutex);
@@ -802,6 +725,17 @@ int i915_reset(struct drm_device *dev)
 
                drm_irq_uninstall(dev);
                drm_irq_install(dev);
+
+               /* rps/rc6 re-init is necessary to restore state lost after the
+                * reset and the re-install of drm irq. Skip for ironlake per
+                * previous concerns that it doesn't respond well to some forms
+                * of re-init after reset. */
+               if (INTEL_INFO(dev)->gen > 5) {
+                       mutex_lock(&dev->struct_mutex);
+                       intel_enable_gt_powersave(dev);
+                       mutex_unlock(&dev->struct_mutex);
+               }
+
                intel_hpd_init(dev);
        } else {
                mutex_unlock(&dev->struct_mutex);
@@ -815,7 +749,7 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        struct intel_device_info *intel_info =
                (struct intel_device_info *) ent->driver_data;
 
-       if (IS_PRELIMINARY_HW(intel_info) && !i915_preliminary_hw_support) {
+       if (IS_PRELIMINARY_HW(intel_info) && !i915.preliminary_hw_support) {
                DRM_INFO("This hardware requires preliminary hardware support.\n"
                         "See CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT, and/or modparam preliminary_hw_support\n");
                return -ENODEV;
@@ -910,6 +844,7 @@ static int i915_runtime_suspend(struct device *device)
        struct drm_i915_private *dev_priv = dev->dev_private;
 
        WARN_ON(!HAS_RUNTIME_PM(dev));
+       assert_force_wake_inactive(dev_priv);
 
        DRM_DEBUG_KMS("Suspending device\n");
 
@@ -1046,14 +981,14 @@ static int __init i915_init(void)
         * the default behavior.
         */
 #if defined(CONFIG_DRM_I915_KMS)
-       if (i915_modeset != 0)
+       if (i915.modeset != 0)
                driver.driver_features |= DRIVER_MODESET;
 #endif
-       if (i915_modeset == 1)
+       if (i915.modeset == 1)
                driver.driver_features |= DRIVER_MODESET;
 
 #ifdef CONFIG_VGA_CONSOLE
-       if (vgacon_text_force() && i915_modeset == -1)
+       if (vgacon_text_force() && i915.modeset == -1)
                driver.driver_features &= ~DRIVER_MODESET;
 #endif
 
index df77e20e3c3d00ee9173d4c160274f0e837c26de..70fbe904016f30f1c23b2ca9d5c20ddaf5ca74e1 100644 (file)
@@ -58,7 +58,8 @@ enum pipe {
        PIPE_A = 0,
        PIPE_B,
        PIPE_C,
-       I915_MAX_PIPES
+       _PIPE_EDP,
+       I915_MAX_PIPES = _PIPE_EDP
 };
 #define pipe_name(p) ((p) + 'A')
 
@@ -66,7 +67,8 @@ enum transcoder {
        TRANSCODER_A = 0,
        TRANSCODER_B,
        TRANSCODER_C,
-       TRANSCODER_EDP = 0xF,
+       TRANSCODER_EDP,
+       I915_MAX_TRANSCODERS
 };
 #define transcoder_name(t) ((t) + 'A')
 
@@ -77,7 +79,7 @@ enum plane {
 };
 #define plane_name(p) ((p) + 'A')
 
-#define sprite_name(p, s) ((p) * dev_priv->num_plane + (s) + 'A')
+#define sprite_name(p, s) ((p) * INTEL_INFO(dev)->num_sprites[(p)] + (s) + 'A')
 
 enum port {
        PORT_A = 0,
@@ -112,6 +114,17 @@ enum intel_display_power_domain {
        POWER_DOMAIN_TRANSCODER_B,
        POWER_DOMAIN_TRANSCODER_C,
        POWER_DOMAIN_TRANSCODER_EDP,
+       POWER_DOMAIN_PORT_DDI_A_2_LANES,
+       POWER_DOMAIN_PORT_DDI_A_4_LANES,
+       POWER_DOMAIN_PORT_DDI_B_2_LANES,
+       POWER_DOMAIN_PORT_DDI_B_4_LANES,
+       POWER_DOMAIN_PORT_DDI_C_2_LANES,
+       POWER_DOMAIN_PORT_DDI_C_4_LANES,
+       POWER_DOMAIN_PORT_DDI_D_2_LANES,
+       POWER_DOMAIN_PORT_DDI_D_4_LANES,
+       POWER_DOMAIN_PORT_DSI,
+       POWER_DOMAIN_PORT_CRT,
+       POWER_DOMAIN_PORT_OTHER,
        POWER_DOMAIN_VGA,
        POWER_DOMAIN_AUDIO,
        POWER_DOMAIN_INIT,
@@ -119,8 +132,6 @@ enum intel_display_power_domain {
        POWER_DOMAIN_NUM,
 };
 
-#define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
-
 #define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A)
 #define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \
                ((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER)
@@ -128,14 +139,6 @@ enum intel_display_power_domain {
        ((tran) == TRANSCODER_EDP ? POWER_DOMAIN_TRANSCODER_EDP : \
         (tran) + POWER_DOMAIN_TRANSCODER_A)
 
-#define HSW_ALWAYS_ON_POWER_DOMAINS (          \
-       BIT(POWER_DOMAIN_PIPE_A) |              \
-       BIT(POWER_DOMAIN_TRANSCODER_EDP))
-#define BDW_ALWAYS_ON_POWER_DOMAINS (          \
-       BIT(POWER_DOMAIN_PIPE_A) |              \
-       BIT(POWER_DOMAIN_TRANSCODER_EDP) |      \
-       BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER))
-
 enum hpd_pin {
        HPD_NONE = 0,
        HPD_PORT_A = HPD_NONE, /* PORT_A is internal */
@@ -157,11 +160,16 @@ enum hpd_pin {
         I915_GEM_DOMAIN_VERTEX)
 
 #define for_each_pipe(p) for ((p) = 0; (p) < INTEL_INFO(dev)->num_pipes; (p)++)
+#define for_each_sprite(p, s) for ((s) = 0; (s) < INTEL_INFO(dev)->num_sprites[(p)]; (s)++)
 
 #define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \
        list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \
                if ((intel_encoder)->base.crtc == (__crtc))
 
+#define for_each_connector_on_encoder(dev, __encoder, intel_connector) \
+       list_for_each_entry((intel_connector), &(dev)->mode_config.connector_list, base.head) \
+               if ((intel_connector)->base.encoder == (__encoder))
+
 struct drm_i915_private;
 
 enum intel_dpll_id {
@@ -295,53 +303,87 @@ struct intel_display_error_state;
 
 struct drm_i915_error_state {
        struct kref ref;
+       struct timeval time;
+
+       char error_msg[128];
+       u32 reset_count;
+       u32 suspend_count;
+
+       /* Generic register state */
        u32 eir;
        u32 pgtbl_er;
        u32 ier;
        u32 ccid;
        u32 derrmr;
        u32 forcewake;
-       bool waiting[I915_NUM_RINGS];
-       u32 pipestat[I915_MAX_PIPES];
-       u32 tail[I915_NUM_RINGS];
-       u32 head[I915_NUM_RINGS];
-       u32 ctl[I915_NUM_RINGS];
-       u32 ipeir[I915_NUM_RINGS];
-       u32 ipehr[I915_NUM_RINGS];
-       u32 instdone[I915_NUM_RINGS];
-       u32 acthd[I915_NUM_RINGS];
-       u32 semaphore_mboxes[I915_NUM_RINGS][I915_NUM_RINGS - 1];
-       u32 semaphore_seqno[I915_NUM_RINGS][I915_NUM_RINGS - 1];
-       u32 rc_psmi[I915_NUM_RINGS]; /* sleep state */
-       /* our own tracking of ring head and tail */
-       u32 cpu_ring_head[I915_NUM_RINGS];
-       u32 cpu_ring_tail[I915_NUM_RINGS];
        u32 error; /* gen6+ */
        u32 err_int; /* gen7 */
-       u32 bbstate[I915_NUM_RINGS];
-       u32 instpm[I915_NUM_RINGS];
-       u32 instps[I915_NUM_RINGS];
-       u32 extra_instdone[I915_NUM_INSTDONE_REG];
-       u32 seqno[I915_NUM_RINGS];
-       u64 bbaddr[I915_NUM_RINGS];
-       u32 fault_reg[I915_NUM_RINGS];
        u32 done_reg;
-       u32 faddr[I915_NUM_RINGS];
+       u32 gac_eco;
+       u32 gam_ecochk;
+       u32 gab_ctl;
+       u32 gfx_mode;
+       u32 extra_instdone[I915_NUM_INSTDONE_REG];
+       u32 pipestat[I915_MAX_PIPES];
        u64 fence[I915_MAX_NUM_FENCES];
-       struct timeval time;
+       struct intel_overlay_error_state *overlay;
+       struct intel_display_error_state *display;
+
        struct drm_i915_error_ring {
                bool valid;
+               /* Software tracked state */
+               bool waiting;
+               int hangcheck_score;
+               enum intel_ring_hangcheck_action hangcheck_action;
+               int num_requests;
+
+               /* our own tracking of ring head and tail */
+               u32 cpu_ring_head;
+               u32 cpu_ring_tail;
+
+               u32 semaphore_seqno[I915_NUM_RINGS - 1];
+
+               /* Register state */
+               u32 tail;
+               u32 head;
+               u32 ctl;
+               u32 hws;
+               u32 ipeir;
+               u32 ipehr;
+               u32 instdone;
+               u32 acthd;
+               u32 bbstate;
+               u32 instpm;
+               u32 instps;
+               u32 seqno;
+               u64 bbaddr;
+               u32 fault_reg;
+               u32 faddr;
+               u32 rc_psmi; /* sleep state */
+               u32 semaphore_mboxes[I915_NUM_RINGS - 1];
+
                struct drm_i915_error_object {
                        int page_count;
                        u32 gtt_offset;
                        u32 *pages[0];
-               } *ringbuffer, *batchbuffer, *ctx;
+               } *ringbuffer, *batchbuffer, *wa_batchbuffer, *ctx, *hws_page;
+
                struct drm_i915_error_request {
                        long jiffies;
                        u32 seqno;
                        u32 tail;
                } *requests;
-               int num_requests;
+
+               struct {
+                       u32 gfx_mode;
+                       union {
+                               u64 pdp[4];
+                               u32 pp_dir_base;
+                       };
+               } vm_info;
+
+               pid_t pid;
+               char comm[TASK_COMM_LEN];
        } ring[I915_NUM_RINGS];
        struct drm_i915_error_buffer {
                u32 size;
@@ -358,15 +400,13 @@ struct drm_i915_error_state {
                s32 ring:4;
                u32 cache_level:3;
        } **active_bo, **pinned_bo;
+
        u32 *active_bo_count, *pinned_bo_count;
-       struct intel_overlay_error_state *overlay;
-       struct intel_display_error_state *display;
-       int hangcheck_score[I915_NUM_RINGS];
-       enum intel_ring_hangcheck_action hangcheck_action[I915_NUM_RINGS];
 };
 
 struct intel_connector;
 struct intel_crtc_config;
+struct intel_plane_config;
 struct intel_crtc;
 struct intel_limit;
 struct dpll;
@@ -405,6 +445,8 @@ struct drm_i915_display_funcs {
         * fills out the pipe-config with the hw state. */
        bool (*get_pipe_config)(struct intel_crtc *,
                                struct intel_crtc_config *);
+       void (*get_plane_config)(struct intel_crtc *,
+                                struct intel_plane_config *);
        int (*crtc_mode_set)(struct drm_crtc *crtc,
                             int x, int y,
                             struct drm_framebuffer *old_fb);
@@ -469,7 +511,7 @@ struct intel_uncore {
        unsigned fw_rendercount;
        unsigned fw_mediacount;
 
-       struct delayed_work force_wake_work;
+       struct timer_list force_wake_timer;
 };
 
 #define DEV_INFO_FOR_EACH_FLAG(func, sep) \
@@ -504,9 +546,16 @@ struct intel_uncore {
 struct intel_device_info {
        u32 display_mmio_offset;
        u8 num_pipes:3;
+       u8 num_sprites[I915_MAX_PIPES];
        u8 gen;
        u8 ring_mask; /* Rings supported by the HW */
        DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON);
+       /* Register offsets for the various display pipes and transcoders */
+       int pipe_offsets[I915_MAX_TRANSCODERS];
+       int trans_offsets[I915_MAX_TRANSCODERS];
+       int dpll_offsets[I915_MAX_PIPES];
+       int dpll_md_offsets[I915_MAX_PIPES];
+       int palette_offsets[I915_MAX_PIPES];
 };
 
 #undef DEFINE_FLAG
@@ -524,6 +573,57 @@ enum i915_cache_level {
 
 typedef uint32_t gen6_gtt_pte_t;
 
+/**
+ * A VMA represents a GEM BO that is bound into an address space. Therefore, a
+ * VMA's presence cannot be guaranteed before binding, or after unbinding the
+ * object into/from the address space.
+ *
+ * To make things as simple as possible (ie. no refcounting), a VMA's lifetime
+ * will always be <= an objects lifetime. So object refcounting should cover us.
+ */
+struct i915_vma {
+       struct drm_mm_node node;
+       struct drm_i915_gem_object *obj;
+       struct i915_address_space *vm;
+
+       /** This object's place on the active/inactive lists */
+       struct list_head mm_list;
+
+       struct list_head vma_link; /* Link in the object's VMA list */
+
+       /** This vma's place in the batchbuffer or on the eviction list */
+       struct list_head exec_list;
+
+       /**
+        * Used for performing relocations during execbuffer insertion.
+        */
+       struct hlist_node exec_node;
+       unsigned long exec_handle;
+       struct drm_i915_gem_exec_object2 *exec_entry;
+
+       /**
+        * How many users have pinned this object in GTT space. The following
+        * users can each hold at most one reference: pwrite/pread, pin_ioctl
+        * (via user_pin_count), execbuffer (objects are not allowed multiple
+        * times for the same batchbuffer), and the framebuffer code. When
+        * switching/pageflipping, the framebuffer code has at most two buffers
+        * pinned per crtc.
+        *
+        * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
+        * bits with absolutely no headroom. So use 4 bits. */
+       unsigned int pin_count:4;
+#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
+
+       /** Unmap an object from an address space. This usually consists of
+        * setting the valid PTE entries to a reserved scratch page. */
+       void (*unbind_vma)(struct i915_vma *vma);
+       /* Map an object into an address space with the given cache flags. */
+#define GLOBAL_BIND (1<<0)
+       void (*bind_vma)(struct i915_vma *vma,
+                        enum i915_cache_level cache_level,
+                        u32 flags);
+};
+
 struct i915_address_space {
        struct drm_mm mm;
        struct drm_device *dev;
@@ -564,12 +664,12 @@ struct i915_address_space {
                                     enum i915_cache_level level,
                                     bool valid); /* Create a valid PTE */
        void (*clear_range)(struct i915_address_space *vm,
-                           unsigned int first_entry,
-                           unsigned int num_entries,
+                           uint64_t start,
+                           uint64_t length,
                            bool use_scratch);
        void (*insert_entries)(struct i915_address_space *vm,
                               struct sg_table *st,
-                              unsigned int first_entry,
+                              uint64_t start,
                               enum i915_cache_level cache_level);
        void (*cleanup)(struct i915_address_space *vm);
 };
@@ -603,55 +703,32 @@ struct i915_gtt {
 };
 #define gtt_total_entries(gtt) ((gtt).base.total >> PAGE_SHIFT)
 
+#define GEN8_LEGACY_PDPS 4
 struct i915_hw_ppgtt {
        struct i915_address_space base;
+       struct kref ref;
+       struct drm_mm_node node;
        unsigned num_pd_entries;
+       unsigned num_pd_pages; /* gen8+ */
        union {
                struct page **pt_pages;
-               struct page *gen8_pt_pages;
+               struct page **gen8_pt_pages[GEN8_LEGACY_PDPS];
        };
        struct page *pd_pages;
-       int num_pd_pages;
-       int num_pt_pages;
        union {
                uint32_t pd_offset;
-               dma_addr_t pd_dma_addr[4];
+               dma_addr_t pd_dma_addr[GEN8_LEGACY_PDPS];
        };
        union {
                dma_addr_t *pt_dma_addr;
                dma_addr_t *gen8_pt_dma_addr[4];
        };
-       int (*enable)(struct drm_device *dev);
-};
-
-/**
- * A VMA represents a GEM BO that is bound into an address space. Therefore, a
- * VMA's presence cannot be guaranteed before binding, or after unbinding the
- * object into/from the address space.
- *
- * To make things as simple as possible (ie. no refcounting), a VMA's lifetime
- * will always be <= an objects lifetime. So object refcounting should cover us.
- */
-struct i915_vma {
-       struct drm_mm_node node;
-       struct drm_i915_gem_object *obj;
-       struct i915_address_space *vm;
-
-       /** This object's place on the active/inactive lists */
-       struct list_head mm_list;
-
-       struct list_head vma_link; /* Link in the object's VMA list */
-
-       /** This vma's place in the batchbuffer or on the eviction list */
-       struct list_head exec_list;
-
-       /**
-        * Used for performing relocations during execbuffer insertion.
-        */
-       struct hlist_node exec_node;
-       unsigned long exec_handle;
-       struct drm_i915_gem_exec_object2 *exec_entry;
 
+       int (*enable)(struct i915_hw_ppgtt *ppgtt);
+       int (*switch_mm)(struct i915_hw_ppgtt *ppgtt,
+                        struct intel_ring_buffer *ring,
+                        bool synchronous);
+       void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m);
 };
 
 struct i915_ctx_hang_stats {
@@ -676,9 +753,10 @@ struct i915_hw_context {
        bool is_initialized;
        uint8_t remap_slice;
        struct drm_i915_file_private *file_priv;
-       struct intel_ring_buffer *ring;
+       struct intel_ring_buffer *last_ring;
        struct drm_i915_gem_object *obj;
        struct i915_ctx_hang_stats hang_stats;
+       struct i915_address_space *vm;
 
        struct list_head link;
 };
@@ -831,11 +909,7 @@ struct i915_suspend_saved_registers {
        u32 savePFIT_CONTROL;
        u32 save_palette_a[256];
        u32 save_palette_b[256];
-       u32 saveDPFC_CB_BASE;
-       u32 saveFBC_CFB_BASE;
-       u32 saveFBC_LL_BASE;
        u32 saveFBC_CONTROL;
-       u32 saveFBC_CONTROL2;
        u32 saveIER;
        u32 saveIIR;
        u32 saveIMR;
@@ -905,8 +979,6 @@ struct intel_gen6_power_mgmt {
        struct work_struct work;
        u32 pm_iir;
 
-       /* The below variables an all the rps hw state are protected by
-        * dev->struct mutext. */
        u8 cur_delay;
        u8 min_delay;
        u8 max_delay;
@@ -915,6 +987,9 @@ struct intel_gen6_power_mgmt {
        u8 rp0_delay;
        u8 hw_max;
 
+       bool rp_up_masked;
+       bool rp_down_masked;
+
        int last_adj;
        enum { LOW_POWER, BETWEEN, HIGH_POWER } power;
 
@@ -953,6 +1028,36 @@ struct intel_ilk_power_mgmt {
        struct drm_i915_gem_object *renderctx;
 };
 
+struct drm_i915_private;
+struct i915_power_well;
+
+struct i915_power_well_ops {
+       /*
+        * Synchronize the well's hw state to match the current sw state, for
+        * example enable/disable it based on the current refcount. Called
+        * during driver init and resume time, possibly after first calling
+        * the enable/disable handlers.
+        */
+       void (*sync_hw)(struct drm_i915_private *dev_priv,
+                       struct i915_power_well *power_well);
+       /*
+        * Enable the well and resources that depend on it (for example
+        * interrupts located on the well). Called after the 0->1 refcount
+        * transition.
+        */
+       void (*enable)(struct drm_i915_private *dev_priv,
+                      struct i915_power_well *power_well);
+       /*
+        * Disable the well and resources that depend on it. Called after
+        * the 1->0 refcount transition.
+        */
+       void (*disable)(struct drm_i915_private *dev_priv,
+                       struct i915_power_well *power_well);
+       /* Returns the hw enabled state. */
+       bool (*is_enabled)(struct drm_i915_private *dev_priv,
+                          struct i915_power_well *power_well);
+};
+
 /* Power well structure for haswell */
 struct i915_power_well {
        const char *name;
@@ -960,11 +1065,8 @@ struct i915_power_well {
        /* power well enable/disable usage count */
        int count;
        unsigned long domains;
-       void *data;
-       void (*set)(struct drm_device *dev, struct i915_power_well *power_well,
-                   bool enable);
-       bool (*is_enabled)(struct drm_device *dev,
-                          struct i915_power_well *power_well);
+       unsigned long data;
+       const struct i915_power_well_ops *ops;
 };
 
 struct i915_power_domains {
@@ -1061,6 +1163,14 @@ struct i915_gem_mm {
         */
        bool interruptible;
 
+       /**
+        * Is the GPU currently considered idle, or busy executing userspace
+        * requests?  Whilst idle, we attempt to power down the hardware and
+        * display clocks. In order to reduce the effect on performance, there
+        * is a slight delay before we do so.
+        */
+       bool busy;
+
        /** Bit 6 swizzling required for X tiling */
        uint32_t bit_6_swizzle_x;
        /** Bit 6 swizzling required for Y tiling */
@@ -1250,11 +1360,10 @@ struct ilk_wm_values {
  * Ideally every piece of our code that needs PC8+ disabled would call
  * hsw_disable_package_c8, which would increment disable_count and prevent the
  * system from reaching PC8+. But we don't have a symmetric way to do this for
- * everything, so we have the requirements_met and gpu_idle variables. When we
- * switch requirements_met or gpu_idle to true we decrease disable_count, and
- * increase it in the opposite case. The requirements_met variable is true when
- * all the CRTCs, encoders and the power well are disabled. The gpu_idle
- * variable is true when the GPU is idle.
+ * everything, so we have the requirements_met variable. When we switch
+ * requirements_met to true we decrease disable_count, and increase it in the
+ * opposite case. The requirements_met variable is true when all the CRTCs,
+ * encoders and the power well are disabled.
  *
  * In addition to everything, we only actually enable PC8+ if disable_count
  * stays at zero for at least some seconds. This is implemented with the
@@ -1277,7 +1386,6 @@ struct ilk_wm_values {
  */
 struct i915_package_c8 {
        bool requirements_met;
-       bool gpu_idle;
        bool irqs_disabled;
        /* Only true after the delayed work task actually enables it. */
        bool enabled;
@@ -1332,7 +1440,7 @@ typedef struct drm_i915_private {
        struct drm_device *dev;
        struct kmem_cache *slab;
 
-       const struct intel_device_info *info;
+       const struct intel_device_info info;
 
        int relative_constants_mode;
 
@@ -1361,11 +1469,11 @@ typedef struct drm_i915_private {
        drm_dma_handle_t *status_page_dmah;
        struct resource mch_res;
 
-       atomic_t irq_received;
-
        /* protects the irq masks */
        spinlock_t irq_lock;
 
+       bool display_irqs_enabled;
+
        /* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
        struct pm_qos_request pm_qos;
 
@@ -1379,6 +1487,7 @@ typedef struct drm_i915_private {
        };
        u32 gt_irq_mask;
        u32 pm_irq_mask;
+       u32 pipestat_irq_mask[I915_MAX_PIPES];
 
        struct work_struct hotplug_work;
        bool enable_hotplug_processing;
@@ -1394,8 +1503,6 @@ typedef struct drm_i915_private {
        u32 hpd_event_bits;
        struct timer_list hotplug_reenable_timer;
 
-       int num_plane;
-
        struct i915_fbc fbc;
        struct intel_opregion opregion;
        struct intel_vbt_data vbt;
@@ -1445,8 +1552,8 @@ typedef struct drm_i915_private {
 
        struct sdvo_device_mapping sdvo_mappings[2];
 
-       struct drm_crtc *plane_to_crtc_mapping[3];
-       struct drm_crtc *pipe_to_crtc_mapping[3];
+       struct drm_crtc *plane_to_crtc_mapping[I915_MAX_PIPES];
+       struct drm_crtc *pipe_to_crtc_mapping[I915_MAX_PIPES];
        wait_queue_head_t pending_flip_queue;
 
 #ifdef CONFIG_DEBUG_FS
@@ -1506,6 +1613,7 @@ typedef struct drm_i915_private {
 
        u32 fdi_rx_config;
 
+       u32 suspend_count;
        struct i915_suspend_saved_registers regfile;
 
        struct {
@@ -1627,18 +1735,6 @@ struct drm_i915_gem_object {
         */
        unsigned int fence_dirty:1;
 
-       /** How many users have pinned this object in GTT space. The following
-        * users can each hold at most one reference: pwrite/pread, pin_ioctl
-        * (via user_pin_count), execbuffer (objects are not allowed multiple
-        * times for the same batchbuffer), and the framebuffer code. When
-        * switching/pageflipping, the framebuffer code has at most two buffers
-        * pinned per crtc.
-        *
-        * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
-        * bits with absolutely no headroom. So use 4 bits. */
-       unsigned int pin_count:4;
-#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
-
        /**
         * Is the object at the current location in the gtt mappable and
         * fenceable? Used to avoid costly recalculations.
@@ -1697,7 +1793,6 @@ struct drm_i915_gem_object {
        /** for phy allocated objects */
        struct drm_i915_gem_phys_object *phys_obj;
 };
-#define to_gem_object(obj) (&((struct drm_i915_gem_object *)(obj))->base)
 
 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
 
@@ -1743,6 +1838,7 @@ struct drm_i915_gem_request {
 
 struct drm_i915_file_private {
        struct drm_i915_private *dev_priv;
+       struct drm_file *file;
 
        struct {
                spinlock_t lock;
@@ -1751,11 +1847,95 @@ struct drm_i915_file_private {
        } mm;
        struct idr context_idr;
 
-       struct i915_ctx_hang_stats hang_stats;
+       struct i915_hw_context *private_default_ctx;
        atomic_t rps_wait_boost;
 };
 
-#define INTEL_INFO(dev)        (to_i915(dev)->info)
+/*
+ * A command that requires special handling by the command parser.
+ */
+struct drm_i915_cmd_descriptor {
+       /*
+        * Flags describing how the command parser processes the command.
+        *
+        * CMD_DESC_FIXED: The command has a fixed length if this is set,
+        *                 a length mask if not set
+        * CMD_DESC_SKIP: The command is allowed but does not follow the
+        *                standard length encoding for the opcode range in
+        *                which it falls
+        * CMD_DESC_REJECT: The command is never allowed
+        * CMD_DESC_REGISTER: The command should be checked against the
+        *                    register whitelist for the appropriate ring
+        * CMD_DESC_MASTER: The command is allowed if the submitting process
+        *                  is the DRM master
+        */
+       u32 flags;
+#define CMD_DESC_FIXED    (1<<0)
+#define CMD_DESC_SKIP     (1<<1)
+#define CMD_DESC_REJECT   (1<<2)
+#define CMD_DESC_REGISTER (1<<3)
+#define CMD_DESC_BITMASK  (1<<4)
+#define CMD_DESC_MASTER   (1<<5)
+
+       /*
+        * The command's unique identification bits and the bitmask to get them.
+        * This isn't strictly the opcode field as defined in the spec and may
+        * also include type, subtype, and/or subop fields.
+        */
+       struct {
+               u32 value;
+               u32 mask;
+       } cmd;
+
+       /*
+        * The command's length. The command is either fixed length (i.e. does
+        * not include a length field) or has a length field mask. The flag
+        * CMD_DESC_FIXED indicates a fixed length. Otherwise, the command has
+        * a length mask. All command entries in a command table must include
+        * length information.
+        */
+       union {
+               u32 fixed;
+               u32 mask;
+       } length;
+
+       /*
+        * Describes where to find a register address in the command to check
+        * against the ring's register whitelist. Only valid if flags has the
+        * CMD_DESC_REGISTER bit set.
+        */
+       struct {
+               u32 offset;
+               u32 mask;
+       } reg;
+
+#define MAX_CMD_DESC_BITMASKS 3
+       /*
+        * Describes command checks where a particular dword is masked and
+        * compared against an expected value. If the command does not match
+        * the expected value, the parser rejects it. Only valid if flags has
+        * the CMD_DESC_BITMASK bit set. Only entries where mask is non-zero
+        * are valid.
+        */
+       struct {
+               u32 offset;
+               u32 mask;
+               u32 expected;
+       } bits[MAX_CMD_DESC_BITMASKS];
+};
+
+/*
+ * A table of commands requiring special handling by the command parser.
+ *
+ * Each ring has an array of tables. Each table consists of an array of command
+ * descriptors, which must be sorted with command opcodes in ascending order.
+ */
+struct drm_i915_cmd_table {
+       const struct drm_i915_cmd_descriptor *table;
+       int count;
+};
+
+#define INTEL_INFO(dev)        (&to_i915(dev)->info)
 
 #define IS_I830(dev)           ((dev)->pdev->device == 0x3577)
 #define IS_845G(dev)           ((dev)->pdev->device == 0x2562)
@@ -1824,7 +2004,11 @@ struct drm_i915_file_private {
 #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
 
 #define HAS_HW_CONTEXTS(dev)   (INTEL_INFO(dev)->gen >= 6)
-#define HAS_ALIASING_PPGTT(dev)        (INTEL_INFO(dev)->gen >=6 && !IS_VALLEYVIEW(dev))
+#define HAS_ALIASING_PPGTT(dev)        (INTEL_INFO(dev)->gen >= 6 && !IS_VALLEYVIEW(dev))
+#define HAS_PPGTT(dev)         (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev) \
+                                && !IS_BROADWELL(dev))
+#define USES_PPGTT(dev)                intel_enable_ppgtt(dev, false)
+#define USES_FULL_PPGTT(dev)   intel_enable_ppgtt(dev, true)
 
 #define HAS_OVERLAY(dev)               (INTEL_INFO(dev)->has_overlay)
 #define OVERLAY_NEEDS_PHYSICAL(dev)    (INTEL_INFO(dev)->overlay_needs_physical)
@@ -1887,32 +2071,42 @@ struct drm_i915_file_private {
 
 extern const struct drm_ioctl_desc i915_ioctls[];
 extern int i915_max_ioctl;
-extern unsigned int i915_fbpercrtc __always_unused;
-extern int i915_panel_ignore_lid __read_mostly;
-extern unsigned int i915_powersave __read_mostly;
-extern int i915_semaphores __read_mostly;
-extern unsigned int i915_lvds_downclock __read_mostly;
-extern int i915_lvds_channel_mode __read_mostly;
-extern int i915_panel_use_ssc __read_mostly;
-extern int i915_vbt_sdvo_panel_type __read_mostly;
-extern int i915_enable_rc6 __read_mostly;
-extern int i915_enable_fbc __read_mostly;
-extern bool i915_enable_hangcheck __read_mostly;
-extern int i915_enable_ppgtt __read_mostly;
-extern int i915_enable_psr __read_mostly;
-extern unsigned int i915_preliminary_hw_support __read_mostly;
-extern int i915_disable_power_well __read_mostly;
-extern int i915_enable_ips __read_mostly;
-extern bool i915_fastboot __read_mostly;
-extern int i915_enable_pc8 __read_mostly;
-extern int i915_pc8_timeout __read_mostly;
-extern bool i915_prefault_disable __read_mostly;
 
 extern int i915_suspend(struct drm_device *dev, pm_message_t state);
 extern int i915_resume(struct drm_device *dev);
 extern int i915_master_create(struct drm_device *dev, struct drm_master *master);
 extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master);
 
+/* i915_params.c */
+struct i915_params {
+       int modeset;
+       int panel_ignore_lid;
+       unsigned int powersave;
+       int semaphores;
+       unsigned int lvds_downclock;
+       int lvds_channel_mode;
+       int panel_use_ssc;
+       int vbt_sdvo_panel_type;
+       int enable_rc6;
+       int enable_fbc;
+       int enable_ppgtt;
+       int enable_psr;
+       unsigned int preliminary_hw_support;
+       int disable_power_well;
+       int enable_ips;
+       int enable_pc8;
+       int pc8_timeout;
+       int invert_brightness;
+       int enable_cmd_parser;
+       /* leave bools at the end to not create holes */
+       bool enable_hangcheck;
+       bool fastboot;
+       bool prefault_disable;
+       bool reset;
+       bool disable_display;
+};
+extern struct i915_params i915 __read_mostly;
+
                                /* i915_dma.c */
 void i915_update_dri1_breadcrumb(struct drm_device *dev);
 extern void i915_kernel_lost_context(struct drm_device * dev);
@@ -1943,8 +2137,12 @@ extern void intel_console_resume(struct work_struct *work);
 
 /* i915_irq.c */
 void i915_queue_hangcheck(struct drm_device *dev);
-void i915_handle_error(struct drm_device *dev, bool wedged);
+__printf(3, 4)
+void i915_handle_error(struct drm_device *dev, bool wedged,
+                      const char *fmt, ...);
 
+void gen6_set_pm_mask(struct drm_i915_private *dev_priv, u32 pm_iir,
+                                                       int new_delay);
 extern void intel_irq_init(struct drm_device *dev);
 extern void intel_hpd_init(struct drm_device *dev);
 
@@ -1955,10 +2153,15 @@ extern void intel_uncore_check_errors(struct drm_device *dev);
 extern void intel_uncore_fini(struct drm_device *dev);
 
 void
-i915_enable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask);
+i915_enable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe,
+                    u32 status_mask);
 
 void
-i915_disable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask);
+i915_disable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe,
+                     u32 status_mask);
+
+void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv);
+void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv);
 
 /* i915_gem.c */
 int i915_gem_init_ioctl(struct drm_device *dev, void *data,
@@ -2014,22 +2217,27 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
                         const struct drm_i915_gem_object_ops *ops);
 struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
                                                  size_t size);
+void i915_init_vm(struct drm_i915_private *dev_priv,
+                 struct i915_address_space *vm);
 void i915_gem_free_object(struct drm_gem_object *obj);
 void i915_gem_vma_destroy(struct i915_vma *vma);
 
+#define PIN_MAPPABLE 0x1
+#define PIN_NONBLOCK 0x2
+#define PIN_GLOBAL 0x4
 int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
                                     struct i915_address_space *vm,
                                     uint32_t alignment,
-                                    bool map_and_fenceable,
-                                    bool nonblocking);
-void i915_gem_object_unpin(struct drm_i915_gem_object *obj);
+                                    unsigned flags);
 int __must_check i915_vma_unbind(struct i915_vma *vma);
-int __must_check i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj);
 int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
 void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv);
 void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
 void i915_gem_lastclose(struct drm_device *dev);
 
+int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
+                                   int *needs_clflush);
+
 int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
 static inline struct page *i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n)
 {
@@ -2096,8 +2304,10 @@ i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
        }
 }
 
+struct drm_i915_gem_request *
+i915_gem_find_active_request(struct intel_ring_buffer *ring);
+
 bool i915_gem_retire_requests(struct drm_device *dev);
-void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring);
 int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
                                      bool interruptible);
 static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
@@ -2186,6 +2396,13 @@ i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
                                  struct i915_address_space *vm);
 
 struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj);
+static inline bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj) {
+       struct i915_vma *vma;
+       list_for_each_entry(vma, &obj->vma_list, vma_link)
+               if (vma->pin_count > 0)
+                       return true;
+       return false;
+}
 
 /* Some GGTT VM helpers */
 #define obj_to_ggtt(obj) \
@@ -2217,54 +2434,69 @@ i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj)
 static inline int __must_check
 i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj,
                      uint32_t alignment,
-                     bool map_and_fenceable,
-                     bool nonblocking)
+                     unsigned flags)
+{
+       return i915_gem_object_pin(obj, obj_to_ggtt(obj), alignment, flags | PIN_GLOBAL);
+}
+
+static inline int
+i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj)
 {
-       return i915_gem_object_pin(obj, obj_to_ggtt(obj), alignment,
-                                  map_and_fenceable, nonblocking);
+       return i915_vma_unbind(i915_gem_obj_to_ggtt(obj));
 }
 
+void i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj);
+
 /* i915_gem_context.c */
+#define ctx_to_ppgtt(ctx) container_of((ctx)->vm, struct i915_hw_ppgtt, base)
 int __must_check i915_gem_context_init(struct drm_device *dev);
 void i915_gem_context_fini(struct drm_device *dev);
+void i915_gem_context_reset(struct drm_device *dev);
+int i915_gem_context_open(struct drm_device *dev, struct drm_file *file);
+int i915_gem_context_enable(struct drm_i915_private *dev_priv);
 void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
 int i915_switch_context(struct intel_ring_buffer *ring,
-                       struct drm_file *file, int to_id);
+                       struct drm_file *file, struct i915_hw_context *to);
+struct i915_hw_context *
+i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id);
 void i915_gem_context_free(struct kref *ctx_ref);
 static inline void i915_gem_context_reference(struct i915_hw_context *ctx)
 {
-       kref_get(&ctx->ref);
+       if (ctx->obj && HAS_HW_CONTEXTS(ctx->obj->base.dev))
+               kref_get(&ctx->ref);
 }
 
 static inline void i915_gem_context_unreference(struct i915_hw_context *ctx)
 {
-       kref_put(&ctx->ref, i915_gem_context_free);
+       if (ctx->obj && HAS_HW_CONTEXTS(ctx->obj->base.dev))
+               kref_put(&ctx->ref, i915_gem_context_free);
+}
+
+static inline bool i915_gem_context_is_default(const struct i915_hw_context *c)
+{
+       return c->id == DEFAULT_CONTEXT_ID;
 }
 
-struct i915_ctx_hang_stats * __must_check
-i915_gem_context_get_hang_stats(struct drm_device *dev,
-                               struct drm_file *file,
-                               u32 id);
 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
                                  struct drm_file *file);
 int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
                                   struct drm_file *file);
 
-/* i915_gem_gtt.c */
-void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev);
-void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
-                           struct drm_i915_gem_object *obj,
-                           enum i915_cache_level cache_level);
-void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
-                             struct drm_i915_gem_object *obj);
+/* i915_gem_evict.c */
+int __must_check i915_gem_evict_something(struct drm_device *dev,
+                                         struct i915_address_space *vm,
+                                         int min_size,
+                                         unsigned alignment,
+                                         unsigned cache_level,
+                                         unsigned flags);
+int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
+int i915_gem_evict_everything(struct drm_device *dev);
 
+/* i915_gem_gtt.c */
 void i915_check_and_clear_faults(struct drm_device *dev);
 void i915_gem_suspend_gtt_mappings(struct drm_device *dev);
 void i915_gem_restore_gtt_mappings(struct drm_device *dev);
 int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj);
-void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
-                               enum i915_cache_level cache_level);
-void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj);
 void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
 void i915_gem_init_global_gtt(struct drm_device *dev);
 void i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start,
@@ -2275,18 +2507,8 @@ static inline void i915_gem_chipset_flush(struct drm_device *dev)
        if (INTEL_INFO(dev)->gen < 6)
                intel_gtt_chipset_flush();
 }
-
-
-/* i915_gem_evict.c */
-int __must_check i915_gem_evict_something(struct drm_device *dev,
-                                         struct i915_address_space *vm,
-                                         int min_size,
-                                         unsigned alignment,
-                                         unsigned cache_level,
-                                         bool mappable,
-                                         bool nonblock);
-int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
-int i915_gem_evict_everything(struct drm_device *dev);
+int i915_gem_init_ppgtt(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt);
+bool intel_enable_ppgtt(struct drm_device *dev, bool full);
 
 /* i915_gem_stolen.c */
 int i915_gem_init_stolen(struct drm_device *dev);
@@ -2343,7 +2565,8 @@ static inline void i915_error_state_buf_release(
 {
        kfree(eb->buf);
 }
-void i915_capture_error_state(struct drm_device *dev);
+void i915_capture_error_state(struct drm_device *dev, bool wedge,
+                             const char *error_msg);
 void i915_error_state_get(struct drm_device *dev,
                          struct i915_error_state_file_priv *error_priv);
 void i915_error_state_put(struct i915_error_state_file_priv *error_priv);
@@ -2352,6 +2575,14 @@ void i915_destroy_error_state(struct drm_device *dev);
 void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone);
 const char *i915_cache_level_str(int type);
 
+/* i915_cmd_parser.c */
+void i915_cmd_parser_init_ring(struct intel_ring_buffer *ring);
+bool i915_needs_cmd_parser(struct intel_ring_buffer *ring);
+int i915_parse_cmds(struct intel_ring_buffer *ring,
+                   struct drm_i915_gem_object *batch_obj,
+                   u32 batch_start_offset,
+                   bool is_master);
+
 /* i915_suspend.c */
 extern int i915_save_state(struct drm_device *dev);
 extern int i915_restore_state(struct drm_device *dev);
@@ -2425,10 +2656,12 @@ extern void intel_modeset_suspend_hw(struct drm_device *dev);
 extern void intel_modeset_init(struct drm_device *dev);
 extern void intel_modeset_gem_init(struct drm_device *dev);
 extern void intel_modeset_cleanup(struct drm_device *dev);
+extern void intel_connector_unregister(struct intel_connector *);
 extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
 extern void intel_modeset_setup_hw_state(struct drm_device *dev,
                                         bool force_restore);
 extern void i915_redisable_vga(struct drm_device *dev);
+extern void i915_redisable_vga_power_on(struct drm_device *dev);
 extern bool intel_fbc_enabled(struct drm_device *dev);
 extern void intel_disable_fbc(struct drm_device *dev);
 extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
@@ -2463,6 +2696,7 @@ extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
  */
 void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine);
 void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine);
+void assert_force_wake_inactive(struct drm_i915_private *dev_priv);
 
 int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val);
 int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val);
@@ -2566,4 +2800,31 @@ timespec_to_jiffies_timeout(const struct timespec *value)
        return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
 }
 
+/*
+ * If you need to wait X milliseconds between events A and B, but event B
+ * doesn't happen exactly after event A, you record the timestamp (jiffies) of
+ * when event A happened, then just before event B you call this function and
+ * pass the timestamp as the first argument, and X as the second argument.
+ */
+static inline void
+wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms)
+{
+       unsigned long target_jiffies, tmp_jiffies, remaining_jiffies;
+
+       /*
+        * Don't re-read the value of "jiffies" every time since it may change
+        * behind our back and break the math.
+        */
+       tmp_jiffies = jiffies;
+       target_jiffies = timestamp_jiffies +
+                        msecs_to_jiffies_timeout(to_wait_ms);
+
+       if (time_after(target_jiffies, tmp_jiffies)) {
+               remaining_jiffies = target_jiffies - tmp_jiffies;
+               while (remaining_jiffies)
+                       remaining_jiffies =
+                           schedule_timeout_uninterruptible(remaining_jiffies);
+       }
+}
+
 #endif
index 00c8361547253ecccf8ba17d6c06d730c688ca3b..92b0b4164b1daf69ce43ba075a9cdb52a13f3e8d 100644 (file)
@@ -43,12 +43,6 @@ static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *o
 static __must_check int
 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
                               bool readonly);
-static __must_check int
-i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
-                          struct i915_address_space *vm,
-                          unsigned alignment,
-                          bool map_and_fenceable,
-                          bool nonblocking);
 static int i915_gem_phys_pwrite(struct drm_device *dev,
                                struct drm_i915_gem_object *obj,
                                struct drm_i915_gem_pwrite *args,
@@ -67,6 +61,7 @@ static unsigned long i915_gem_inactive_scan(struct shrinker *shrinker,
 static unsigned long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
 static unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
 static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
+static void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring);
 
 static bool cpu_cache_is_coherent(struct drm_device *dev,
                                  enum i915_cache_level level)
@@ -204,7 +199,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
        pinned = 0;
        mutex_lock(&dev->struct_mutex);
        list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
-               if (obj->pin_count)
+               if (i915_gem_obj_is_pinned(obj))
                        pinned += i915_gem_obj_ggtt_size(obj);
        mutex_unlock(&dev->struct_mutex);
 
@@ -332,6 +327,42 @@ __copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
        return 0;
 }
 
+/*
+ * Pins the specified object's pages and synchronizes the object with
+ * GPU accesses. Sets needs_clflush to non-zero if the caller should
+ * flush the object from the CPU cache.
+ */
+int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
+                                   int *needs_clflush)
+{
+       int ret;
+
+       *needs_clflush = 0;
+
+       if (!obj->base.filp)
+               return -EINVAL;
+
+       if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
+               /* If we're not in the cpu read domain, set ourself into the gtt
+                * read domain and manually flush cachelines (if required). This
+                * optimizes for the case when the gpu will dirty the data
+                * anyway again before the next pread happens. */
+               *needs_clflush = !cpu_cache_is_coherent(obj->base.dev,
+                                                       obj->cache_level);
+               ret = i915_gem_object_wait_rendering(obj, true);
+               if (ret)
+                       return ret;
+       }
+
+       ret = i915_gem_object_get_pages(obj);
+       if (ret)
+               return ret;
+
+       i915_gem_object_pin_pages(obj);
+
+       return ret;
+}
+
 /* Per-page copy function for the shmem pread fastpath.
  * Flushes invalid cachelines before reading the target if
  * needs_clflush is set. */
@@ -429,23 +460,10 @@ i915_gem_shmem_pread(struct drm_device *dev,
 
        obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
 
-       if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
-               /* If we're not in the cpu read domain, set ourself into the gtt
-                * read domain and manually flush cachelines (if required). This
-                * optimizes for the case when the gpu will dirty the data
-                * anyway again before the next pread happens. */
-               needs_clflush = !cpu_cache_is_coherent(dev, obj->cache_level);
-               ret = i915_gem_object_wait_rendering(obj, true);
-               if (ret)
-                       return ret;
-       }
-
-       ret = i915_gem_object_get_pages(obj);
+       ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
        if (ret)
                return ret;
 
-       i915_gem_object_pin_pages(obj);
-
        offset = args->offset;
 
        for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
@@ -476,7 +494,7 @@ i915_gem_shmem_pread(struct drm_device *dev,
 
                mutex_unlock(&dev->struct_mutex);
 
-               if (likely(!i915_prefault_disable) && !prefaulted) {
+               if (likely(!i915.prefault_disable) && !prefaulted) {
                        ret = fault_in_multipages_writeable(user_data, remain);
                        /* Userspace is tricking us, but we've already clobbered
                         * its pages with the prefault and promised to write the
@@ -492,12 +510,10 @@ i915_gem_shmem_pread(struct drm_device *dev,
 
                mutex_lock(&dev->struct_mutex);
 
-next_page:
-               mark_page_accessed(page);
-
                if (ret)
                        goto out;
 
+next_page:
                remain -= page_length;
                user_data += page_length;
                offset += page_length;
@@ -605,7 +621,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
        char __user *user_data;
        int page_offset, page_length, ret;
 
-       ret = i915_gem_obj_ggtt_pin(obj, 0, true, true);
+       ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE | PIN_NONBLOCK);
        if (ret)
                goto out;
 
@@ -651,7 +667,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
        }
 
 out_unpin:
-       i915_gem_object_unpin(obj);
+       i915_gem_object_ggtt_unpin(obj);
 out:
        return ret;
 }
@@ -677,9 +693,8 @@ shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
        if (needs_clflush_before)
                drm_clflush_virt_range(vaddr + shmem_page_offset,
                                       page_length);
-       ret = __copy_from_user_inatomic_nocache(vaddr + shmem_page_offset,
-                                               user_data,
-                                               page_length);
+       ret = __copy_from_user_inatomic(vaddr + shmem_page_offset,
+                                       user_data, page_length);
        if (needs_clflush_after)
                drm_clflush_virt_range(vaddr + shmem_page_offset,
                                       page_length);
@@ -813,13 +828,10 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
 
                mutex_lock(&dev->struct_mutex);
 
-next_page:
-               set_page_dirty(page);
-               mark_page_accessed(page);
-
                if (ret)
                        goto out;
 
+next_page:
                remain -= page_length;
                user_data += page_length;
                offset += page_length;
@@ -868,7 +880,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
                       args->size))
                return -EFAULT;
 
-       if (likely(!i915_prefault_disable)) {
+       if (likely(!i915.prefault_disable)) {
                ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr),
                                                   args->size);
                if (ret)
@@ -1014,7 +1026,8 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
                        struct timespec *timeout,
                        struct drm_i915_file_private *file_priv)
 {
-       drm_i915_private_t *dev_priv = ring->dev->dev_private;
+       struct drm_device *dev = ring->dev;
+       drm_i915_private_t *dev_priv = dev->dev_private;
        const bool irq_test_in_progress =
                ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring);
        struct timespec before, now;
@@ -1029,7 +1042,7 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
 
        timeout_expire = timeout ? jiffies + timespec_to_jiffies_timeout(timeout) : 0;
 
-       if (dev_priv->info->gen >= 6 && can_wait_boost(file_priv)) {
+       if (INTEL_INFO(dev)->gen >= 6 && can_wait_boost(file_priv)) {
                gen6_rps_boost(dev_priv);
                if (file_priv)
                        mod_delayed_work(dev_priv->wq,
@@ -1184,7 +1197,7 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
  */
 static __must_check int
 i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
-                                           struct drm_file *file,
+                                           struct drm_i915_file_private *file_priv,
                                            bool readonly)
 {
        struct drm_device *dev = obj->base.dev;
@@ -1211,7 +1224,7 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
 
        reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
        mutex_unlock(&dev->struct_mutex);
-       ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, file->driver_priv);
+       ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, file_priv);
        mutex_lock(&dev->struct_mutex);
        if (ret)
                return ret;
@@ -1260,7 +1273,9 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
         * We will repeat the flush holding the lock in the normal manner
         * to catch cases where we are gazumped.
         */
-       ret = i915_gem_object_wait_rendering__nonblocking(obj, file, !write_domain);
+       ret = i915_gem_object_wait_rendering__nonblocking(obj,
+                                                         file->driver_priv,
+                                                         !write_domain);
        if (ret)
                goto unref;
 
@@ -1392,6 +1407,15 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 
        trace_i915_gem_object_fault(obj, page_offset, true, write);
 
+       /* Try to flush the object off the GPU first without holding the lock.
+        * Upon reacquiring the lock, we will perform our sanity checks and then
+        * repeat the flush holding the lock in the normal manner to catch cases
+        * where we are gazumped.
+        */
+       ret = i915_gem_object_wait_rendering__nonblocking(obj, NULL, !write);
+       if (ret)
+               goto unlock;
+
        /* Access to snoopable pages through the GTT is incoherent. */
        if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) {
                ret = -EINVAL;
@@ -1399,7 +1423,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
        }
 
        /* Now bind it into the GTT if needed */
-       ret = i915_gem_obj_ggtt_pin(obj,  0, true, false);
+       ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE);
        if (ret)
                goto unlock;
 
@@ -1420,7 +1444,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
        /* Finally, remap it using the new GTT offset */
        ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
 unpin:
-       i915_gem_object_unpin(obj);
+       i915_gem_object_ggtt_unpin(obj);
 unlock:
        mutex_unlock(&dev->struct_mutex);
 out:
@@ -1453,6 +1477,7 @@ out:
                ret = VM_FAULT_OOM;
                break;
        case -ENOSPC:
+       case -EFAULT:
                ret = VM_FAULT_SIGBUS;
                break;
        default:
@@ -1617,8 +1642,8 @@ i915_gem_mmap_gtt(struct drm_file *file,
        }
 
        if (obj->madv != I915_MADV_WILLNEED) {
-               DRM_ERROR("Attempting to mmap a purgeable buffer\n");
-               ret = -EINVAL;
+               DRM_DEBUG("Attempting to mmap a purgeable buffer\n");
+               ret = -EFAULT;
                goto out;
        }
 
@@ -1971,8 +1996,8 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
                return 0;
 
        if (obj->madv != I915_MADV_WILLNEED) {
-               DRM_ERROR("Attempting to obtain a purgeable object\n");
-               return -EINVAL;
+               DRM_DEBUG("Attempting to obtain a purgeable object\n");
+               return -EFAULT;
        }
 
        BUG_ON(obj->pages_pin_count);
@@ -2035,13 +2060,17 @@ static void
 i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
 {
        struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
-       struct i915_address_space *ggtt_vm = &dev_priv->gtt.base;
-       struct i915_vma *vma = i915_gem_obj_to_vma(obj, ggtt_vm);
+       struct i915_address_space *vm;
+       struct i915_vma *vma;
 
        BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
        BUG_ON(!obj->active);
 
-       list_move_tail(&vma->mm_list, &ggtt_vm->inactive_list);
+       list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
+               vma = i915_gem_obj_to_vma(obj, vm);
+               if (vma && !list_empty(&vma->mm_list))
+                       list_move_tail(&vma->mm_list, &vm->inactive_list);
+       }
 
        list_del_init(&obj->ring_list);
        obj->ring = NULL;
@@ -2137,7 +2166,6 @@ int __i915_add_request(struct intel_ring_buffer *ring,
        drm_i915_private_t *dev_priv = ring->dev->dev_private;
        struct drm_i915_gem_request *request;
        u32 request_ring_position, request_start;
-       int was_empty;
        int ret;
 
        request_start = intel_ring_get_tail(ring);
@@ -2188,7 +2216,6 @@ int __i915_add_request(struct intel_ring_buffer *ring,
                i915_gem_context_reference(request->ctx);
 
        request->emitted_jiffies = jiffies;
-       was_empty = list_empty(&ring->request_list);
        list_add_tail(&request->list, &ring->request_list);
        request->file_priv = NULL;
 
@@ -2209,13 +2236,11 @@ int __i915_add_request(struct intel_ring_buffer *ring,
        if (!dev_priv->ums.mm_suspended) {
                i915_queue_hangcheck(ring->dev);
 
-               if (was_empty) {
-                       cancel_delayed_work_sync(&dev_priv->mm.idle_work);
-                       queue_delayed_work(dev_priv->wq,
-                                          &dev_priv->mm.retire_work,
-                                          round_jiffies_up_relative(HZ));
-                       intel_mark_busy(dev_priv->dev);
-               }
+               cancel_delayed_work_sync(&dev_priv->mm.idle_work);
+               queue_delayed_work(dev_priv->wq,
+                                  &dev_priv->mm.retire_work,
+                                  round_jiffies_up_relative(HZ));
+               intel_mark_busy(dev_priv->dev);
        }
 
        if (out_seqno)
@@ -2237,125 +2262,46 @@ i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
        spin_unlock(&file_priv->mm.lock);
 }
 
-static bool i915_head_inside_object(u32 acthd, struct drm_i915_gem_object *obj,
-                                   struct i915_address_space *vm)
+static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
+                                  const struct i915_hw_context *ctx)
 {
-       if (acthd >= i915_gem_obj_offset(obj, vm) &&
-           acthd < i915_gem_obj_offset(obj, vm) + obj->base.size)
-               return true;
+       unsigned long elapsed;
 
-       return false;
-}
+       elapsed = get_seconds() - ctx->hang_stats.guilty_ts;
 
-static bool i915_head_inside_request(const u32 acthd_unmasked,
-                                    const u32 request_start,
-                                    const u32 request_end)
-{
-       const u32 acthd = acthd_unmasked & HEAD_ADDR;
+       if (ctx->hang_stats.banned)
+               return true;
 
-       if (request_start < request_end) {
-               if (acthd >= request_start && acthd < request_end)
+       if (elapsed <= DRM_I915_CTX_BAN_PERIOD) {
+               if (!i915_gem_context_is_default(ctx)) {
+                       DRM_DEBUG("context hanging too fast, banning!\n");
                        return true;
-       } else if (request_start > request_end) {
-               if (acthd >= request_start || acthd < request_end)
-                       return true;
-       }
-
-       return false;
-}
-
-static struct i915_address_space *
-request_to_vm(struct drm_i915_gem_request *request)
-{
-       struct drm_i915_private *dev_priv = request->ring->dev->dev_private;
-       struct i915_address_space *vm;
-
-       vm = &dev_priv->gtt.base;
-
-       return vm;
-}
-
-static bool i915_request_guilty(struct drm_i915_gem_request *request,
-                               const u32 acthd, bool *inside)
-{
-       /* There is a possibility that unmasked head address
-        * pointing inside the ring, matches the batch_obj address range.
-        * However this is extremely unlikely.
-        */
-       if (request->batch_obj) {
-               if (i915_head_inside_object(acthd, request->batch_obj,
-                                           request_to_vm(request))) {
-                       *inside = true;
+               } else if (dev_priv->gpu_error.stop_rings == 0) {
+                       DRM_ERROR("gpu hanging too fast, banning!\n");
                        return true;
                }
        }
 
-       if (i915_head_inside_request(acthd, request->head, request->tail)) {
-               *inside = false;
-               return true;
-       }
-
        return false;
 }
 
-static bool i915_context_is_banned(const struct i915_ctx_hang_stats *hs)
+static void i915_set_reset_status(struct drm_i915_private *dev_priv,
+                                 struct i915_hw_context *ctx,
+                                 const bool guilty)
 {
-       const unsigned long elapsed = get_seconds() - hs->guilty_ts;
+       struct i915_ctx_hang_stats *hs;
 
-       if (hs->banned)
-               return true;
-
-       if (elapsed <= DRM_I915_CTX_BAN_PERIOD) {
-               DRM_ERROR("context hanging too fast, declaring banned!\n");
-               return true;
-       }
-
-       return false;
-}
-
-static void i915_set_reset_status(struct intel_ring_buffer *ring,
-                                 struct drm_i915_gem_request *request,
-                                 u32 acthd)
-{
-       struct i915_ctx_hang_stats *hs = NULL;
-       bool inside, guilty;
-       unsigned long offset = 0;
-
-       /* Innocent until proven guilty */
-       guilty = false;
-
-       if (request->batch_obj)
-               offset = i915_gem_obj_offset(request->batch_obj,
-                                            request_to_vm(request));
-
-       if (ring->hangcheck.action != HANGCHECK_WAIT &&
-           i915_request_guilty(request, acthd, &inside)) {
-               DRM_DEBUG("%s hung %s bo (0x%lx ctx %d) at 0x%x\n",
-                         ring->name,
-                         inside ? "inside" : "flushing",
-                         offset,
-                         request->ctx ? request->ctx->id : 0,
-                         acthd);
+       if (WARN_ON(!ctx))
+               return;
 
-               guilty = true;
-       }
+       hs = &ctx->hang_stats;
 
-       /* If contexts are disabled or this is the default context, use
-        * file_priv->reset_state
-        */
-       if (request->ctx && request->ctx->id != DEFAULT_CONTEXT_ID)
-               hs = &request->ctx->hang_stats;
-       else if (request->file_priv)
-               hs = &request->file_priv->hang_stats;
-
-       if (hs) {
-               if (guilty) {
-                       hs->banned = i915_context_is_banned(hs);
-                       hs->batch_active++;
-                       hs->guilty_ts = get_seconds();
-               } else {
-                       hs->batch_pending++;
-               }
+       if (guilty) {
+               hs->banned = i915_context_is_banned(dev_priv, ctx);
+               hs->batch_active++;
+               hs->guilty_ts = get_seconds();
+       } else {
+               hs->batch_pending++;
        }
 }
 
@@ -2370,19 +2316,41 @@ static void i915_gem_free_request(struct drm_i915_gem_request *request)
        kfree(request);
 }
 
-static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
-                                      struct intel_ring_buffer *ring)
+struct drm_i915_gem_request *
+i915_gem_find_active_request(struct intel_ring_buffer *ring)
 {
-       u32 completed_seqno = ring->get_seqno(ring, false);
-       u32 acthd = intel_ring_get_active_head(ring);
        struct drm_i915_gem_request *request;
+       u32 completed_seqno;
+
+       completed_seqno = ring->get_seqno(ring, false);
 
        list_for_each_entry(request, &ring->request_list, list) {
                if (i915_seqno_passed(completed_seqno, request->seqno))
                        continue;
 
-               i915_set_reset_status(ring, request, acthd);
+               return request;
        }
+
+       return NULL;
+}
+
+static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
+                                      struct intel_ring_buffer *ring)
+{
+       struct drm_i915_gem_request *request;
+       bool ring_hung;
+
+       request = i915_gem_find_active_request(ring);
+
+       if (request == NULL)
+               return;
+
+       ring_hung = ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
+
+       i915_set_reset_status(dev_priv, request->ctx, ring_hung);
+
+       list_for_each_entry_continue(request, &ring->request_list, list)
+               i915_set_reset_status(dev_priv, request->ctx, false);
 }
 
 static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
@@ -2456,13 +2424,15 @@ void i915_gem_reset(struct drm_device *dev)
 
        i915_gem_cleanup_ringbuffer(dev);
 
+       i915_gem_context_reset(dev);
+
        i915_gem_restore_fences(dev);
 }
 
 /**
  * This function clears the request list as sequence numbers are passed.
  */
-void
+static void
 i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
 {
        uint32_t seqno;
@@ -2474,6 +2444,24 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
 
        seqno = ring->get_seqno(ring, true);
 
+       /* Move any buffers on the active list that are no longer referenced
+        * by the ringbuffer to the flushing/inactive lists as appropriate,
+        * before we free the context associated with the requests.
+        */
+       while (!list_empty(&ring->active_list)) {
+               struct drm_i915_gem_object *obj;
+
+               obj = list_first_entry(&ring->active_list,
+                                     struct drm_i915_gem_object,
+                                     ring_list);
+
+               if (!i915_seqno_passed(seqno, obj->last_read_seqno))
+                       break;
+
+               i915_gem_object_move_to_inactive(obj);
+       }
+
+
        while (!list_empty(&ring->request_list)) {
                struct drm_i915_gem_request *request;
 
@@ -2495,22 +2483,6 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
                i915_gem_free_request(request);
        }
 
-       /* Move any buffers on the active list that are no longer referenced
-        * by the ringbuffer to the flushing/inactive lists as appropriate.
-        */
-       while (!list_empty(&ring->active_list)) {
-               struct drm_i915_gem_object *obj;
-
-               obj = list_first_entry(&ring->active_list,
-                                     struct drm_i915_gem_object,
-                                     ring_list);
-
-               if (!i915_seqno_passed(seqno, obj->last_read_seqno))
-                       break;
-
-               i915_gem_object_move_to_inactive(obj);
-       }
-
        if (unlikely(ring->trace_irq_seqno &&
                     i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
                ring->irq_put(ring);
@@ -2753,19 +2725,15 @@ int i915_vma_unbind(struct i915_vma *vma)
        drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
        int ret;
 
-       /* For now we only ever use 1 vma per object */
-       WARN_ON(!list_is_singular(&obj->vma_list));
-
        if (list_empty(&vma->vma_link))
                return 0;
 
        if (!drm_mm_node_allocated(&vma->node)) {
                i915_gem_vma_destroy(vma);
-
                return 0;
        }
 
-       if (obj->pin_count)
+       if (vma->pin_count)
                return -EBUSY;
 
        BUG_ON(obj->pages == NULL);
@@ -2787,15 +2755,11 @@ int i915_vma_unbind(struct i915_vma *vma)
 
        trace_i915_vma_unbind(vma);
 
-       if (obj->has_global_gtt_mapping)
-               i915_gem_gtt_unbind_object(obj);
-       if (obj->has_aliasing_ppgtt_mapping) {
-               i915_ppgtt_unbind_object(dev_priv->mm.aliasing_ppgtt, obj);
-               obj->has_aliasing_ppgtt_mapping = 0;
-       }
+       vma->unbind_vma(vma);
+
        i915_gem_gtt_finish_object(obj);
 
-       list_del(&vma->mm_list);
+       list_del_init(&vma->mm_list);
        /* Avoid an unnecessary call to unbind on rebind. */
        if (i915_is_ggtt(vma->vm))
                obj->map_and_fenceable = true;
@@ -2817,26 +2781,6 @@ int i915_vma_unbind(struct i915_vma *vma)
        return 0;
 }
 
-/**
- * Unbinds an object from the global GTT aperture.
- */
-int
-i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj)
-{
-       struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
-       struct i915_address_space *ggtt = &dev_priv->gtt.base;
-
-       if (!i915_gem_obj_ggtt_bound(obj))
-               return 0;
-
-       if (obj->pin_count)
-               return -EBUSY;
-
-       BUG_ON(obj->pages == NULL);
-
-       return i915_vma_unbind(i915_gem_obj_to_vma(obj, ggtt));
-}
-
 int i915_gpu_idle(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
@@ -2845,7 +2789,7 @@ int i915_gpu_idle(struct drm_device *dev)
 
        /* Flush everything onto the inactive list. */
        for_each_ring(ring, dev_priv, i) {
-               ret = i915_switch_context(ring, NULL, DEFAULT_CONTEXT_ID);
+               ret = i915_switch_context(ring, NULL, ring->default_context);
                if (ret)
                        return ret;
 
@@ -3259,18 +3203,17 @@ static void i915_gem_verify_gtt(struct drm_device *dev)
 /**
  * Finds free space in the GTT aperture and binds the object there.
  */
-static int
+static struct i915_vma *
 i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
                           struct i915_address_space *vm,
                           unsigned alignment,
-                          bool map_and_fenceable,
-                          bool nonblocking)
+                          unsigned flags)
 {
        struct drm_device *dev = obj->base.dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
        u32 size, fence_size, fence_alignment, unfenced_alignment;
        size_t gtt_max =
-               map_and_fenceable ? dev_priv->gtt.mappable_end : vm->total;
+               flags & PIN_MAPPABLE ? dev_priv->gtt.mappable_end : vm->total;
        struct i915_vma *vma;
        int ret;
 
@@ -3282,46 +3225,39 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
                                                     obj->tiling_mode, true);
        unfenced_alignment =
                i915_gem_get_gtt_alignment(dev,
-                                                   obj->base.size,
-                                                   obj->tiling_mode, false);
+                                          obj->base.size,
+                                          obj->tiling_mode, false);
 
        if (alignment == 0)
-               alignment = map_and_fenceable ? fence_alignment :
+               alignment = flags & PIN_MAPPABLE ? fence_alignment :
                                                unfenced_alignment;
-       if (map_and_fenceable && alignment & (fence_alignment - 1)) {
-               DRM_ERROR("Invalid object alignment requested %u\n", alignment);
-               return -EINVAL;
+       if (flags & PIN_MAPPABLE && alignment & (fence_alignment - 1)) {
+               DRM_DEBUG("Invalid object alignment requested %u\n", alignment);
+               return ERR_PTR(-EINVAL);
        }
 
-       size = map_and_fenceable ? fence_size : obj->base.size;
+       size = flags & PIN_MAPPABLE ? fence_size : obj->base.size;
 
        /* If the object is bigger than the entire aperture, reject it early
         * before evicting everything in a vain attempt to find space.
         */
        if (obj->base.size > gtt_max) {
-               DRM_ERROR("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%zu\n",
+               DRM_DEBUG("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%zu\n",
                          obj->base.size,
-                         map_and_fenceable ? "mappable" : "total",
+                         flags & PIN_MAPPABLE ? "mappable" : "total",
                          gtt_max);
-               return -E2BIG;
+               return ERR_PTR(-E2BIG);
        }
 
        ret = i915_gem_object_get_pages(obj);
        if (ret)
-               return ret;
+               return ERR_PTR(ret);
 
        i915_gem_object_pin_pages(obj);
 
-       BUG_ON(!i915_is_ggtt(vm));
-
        vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
-       if (IS_ERR(vma)) {
-               ret = PTR_ERR(vma);
+       if (IS_ERR(vma))
                goto err_unpin;
-       }
-
-       /* For now we only ever use 1 vma per object */
-       WARN_ON(!list_is_singular(&obj->vma_list));
 
 search_free:
        ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
@@ -3330,9 +3266,7 @@ search_free:
                                                  DRM_MM_SEARCH_DEFAULT);
        if (ret) {
                ret = i915_gem_evict_something(dev, vm, size, alignment,
-                                              obj->cache_level,
-                                              map_and_fenceable,
-                                              nonblocking);
+                                              obj->cache_level, flags);
                if (ret == 0)
                        goto search_free;
 
@@ -3363,19 +3297,23 @@ search_free:
                obj->map_and_fenceable = mappable && fenceable;
        }
 
-       WARN_ON(map_and_fenceable && !obj->map_and_fenceable);
+       WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable);
+
+       trace_i915_vma_bind(vma, flags);
+       vma->bind_vma(vma, obj->cache_level,
+                     flags & (PIN_MAPPABLE | PIN_GLOBAL) ? GLOBAL_BIND : 0);
 
-       trace_i915_vma_bind(vma, map_and_fenceable);
        i915_gem_verify_gtt(dev);
-       return 0;
+       return vma;
 
 err_remove_node:
        drm_mm_remove_node(&vma->node);
 err_free_vma:
        i915_gem_vma_destroy(vma);
+       vma = ERR_PTR(ret);
 err_unpin:
        i915_gem_object_unpin_pages(obj);
-       return ret;
+       return vma;
 }
 
 bool
@@ -3528,14 +3466,13 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
                                    enum i915_cache_level cache_level)
 {
        struct drm_device *dev = obj->base.dev;
-       drm_i915_private_t *dev_priv = dev->dev_private;
        struct i915_vma *vma;
        int ret;
 
        if (obj->cache_level == cache_level)
                return 0;
 
-       if (obj->pin_count) {
+       if (i915_gem_obj_is_pinned(obj)) {
                DRM_DEBUG("can not change the cache level of pinned objects\n");
                return -EBUSY;
        }
@@ -3567,11 +3504,10 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
                                return ret;
                }
 
-               if (obj->has_global_gtt_mapping)
-                       i915_gem_gtt_bind_object(obj, cache_level);
-               if (obj->has_aliasing_ppgtt_mapping)
-                       i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
-                                              obj, cache_level);
+               list_for_each_entry(vma, &obj->vma_list, vma_link)
+                       if (drm_mm_node_allocated(&vma->node))
+                               vma->bind_vma(vma, cache_level,
+                                             obj->has_global_gtt_mapping ? GLOBAL_BIND : 0);
        }
 
        list_for_each_entry(vma, &obj->vma_list, vma_link)
@@ -3695,7 +3631,7 @@ static bool is_pin_display(struct drm_i915_gem_object *obj)
         * subtracting the potential reference by the user, any pin_count
         * remains, it must be due to another use by the display engine.
         */
-       return obj->pin_count - !!obj->user_pin_count;
+       return i915_gem_obj_to_ggtt(obj)->pin_count - !!obj->user_pin_count;
 }
 
 /*
@@ -3740,7 +3676,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
         * (e.g. libkms for the bootup splash), we have to ensure that we
         * always use map_and_fenceable for all scanout buffers.
         */
-       ret = i915_gem_obj_ggtt_pin(obj, alignment, true, false);
+       ret = i915_gem_obj_ggtt_pin(obj, alignment, PIN_MAPPABLE);
        if (ret)
                goto err_unpin_display;
 
@@ -3769,7 +3705,7 @@ err_unpin_display:
 void
 i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj)
 {
-       i915_gem_object_unpin(obj);
+       i915_gem_object_ggtt_unpin(obj);
        obj->pin_display = is_pin_display(obj);
 }
 
@@ -3896,65 +3832,63 @@ int
 i915_gem_object_pin(struct drm_i915_gem_object *obj,
                    struct i915_address_space *vm,
                    uint32_t alignment,
-                   bool map_and_fenceable,
-                   bool nonblocking)
+                   unsigned flags)
 {
        struct i915_vma *vma;
        int ret;
 
-       if (WARN_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
-               return -EBUSY;
-
-       WARN_ON(map_and_fenceable && !i915_is_ggtt(vm));
+       if (WARN_ON(flags & (PIN_GLOBAL | PIN_MAPPABLE) && !i915_is_ggtt(vm)))
+               return -EINVAL;
 
        vma = i915_gem_obj_to_vma(obj, vm);
-
        if (vma) {
+               if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
+                       return -EBUSY;
+
                if ((alignment &&
                     vma->node.start & (alignment - 1)) ||
-                   (map_and_fenceable && !obj->map_and_fenceable)) {
-                       WARN(obj->pin_count,
+                   (flags & PIN_MAPPABLE && !obj->map_and_fenceable)) {
+                       WARN(vma->pin_count,
                             "bo is already pinned with incorrect alignment:"
                             " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
                             " obj->map_and_fenceable=%d\n",
                             i915_gem_obj_offset(obj, vm), alignment,
-                            map_and_fenceable,
+                            flags & PIN_MAPPABLE,
                             obj->map_and_fenceable);
                        ret = i915_vma_unbind(vma);
                        if (ret)
                                return ret;
+
+                       vma = NULL;
                }
        }
 
-       if (!i915_gem_obj_bound(obj, vm)) {
-               struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
-
-               ret = i915_gem_object_bind_to_vm(obj, vm, alignment,
-                                                map_and_fenceable,
-                                                nonblocking);
-               if (ret)
-                       return ret;
-
-               if (!dev_priv->mm.aliasing_ppgtt)
-                       i915_gem_gtt_bind_object(obj, obj->cache_level);
+       if (vma == NULL || !drm_mm_node_allocated(&vma->node)) {
+               vma = i915_gem_object_bind_to_vm(obj, vm, alignment, flags);
+               if (IS_ERR(vma))
+                       return PTR_ERR(vma);
        }
 
-       if (!obj->has_global_gtt_mapping && map_and_fenceable)
-               i915_gem_gtt_bind_object(obj, obj->cache_level);
+       if (flags & PIN_GLOBAL && !obj->has_global_gtt_mapping)
+               vma->bind_vma(vma, obj->cache_level, GLOBAL_BIND);
 
-       obj->pin_count++;
-       obj->pin_mappable |= map_and_fenceable;
+       vma->pin_count++;
+       if (flags & PIN_MAPPABLE)
+               obj->pin_mappable |= true;
 
        return 0;
 }
 
 void
-i915_gem_object_unpin(struct drm_i915_gem_object *obj)
+i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj)
 {
-       BUG_ON(obj->pin_count == 0);
-       BUG_ON(!i915_gem_obj_bound_any(obj));
+       struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
+
+       BUG_ON(!vma);
+       BUG_ON(vma->pin_count == 0);
+       BUG_ON(!i915_gem_obj_ggtt_bound(obj));
 
-       if (--obj->pin_count == 0)
+       if (--vma->pin_count == 0)
                obj->pin_mappable = false;
 }
 
@@ -3966,6 +3900,9 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
        struct drm_i915_gem_object *obj;
        int ret;
 
+       if (INTEL_INFO(dev)->gen >= 6)
+               return -ENODEV;
+
        ret = i915_mutex_lock_interruptible(dev);
        if (ret)
                return ret;
@@ -3977,13 +3914,13 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
        }
 
        if (obj->madv != I915_MADV_WILLNEED) {
-               DRM_ERROR("Attempting to pin a purgeable buffer\n");
-               ret = -EINVAL;
+               DRM_DEBUG("Attempting to pin a purgeable buffer\n");
+               ret = -EFAULT;
                goto out;
        }
 
        if (obj->pin_filp != NULL && obj->pin_filp != file) {
-               DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
+               DRM_DEBUG("Already pinned in i915_gem_pin_ioctl(): %d\n",
                          args->handle);
                ret = -EINVAL;
                goto out;
@@ -3995,7 +3932,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
        }
 
        if (obj->user_pin_count == 0) {
-               ret = i915_gem_obj_ggtt_pin(obj, args->alignment, true, false);
+               ret = i915_gem_obj_ggtt_pin(obj, args->alignment, PIN_MAPPABLE);
                if (ret)
                        goto out;
        }
@@ -4030,7 +3967,7 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
        }
 
        if (obj->pin_filp != file) {
-               DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
+               DRM_DEBUG("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
                          args->handle);
                ret = -EINVAL;
                goto out;
@@ -4038,7 +3975,7 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
        obj->user_pin_count--;
        if (obj->user_pin_count == 0) {
                obj->pin_filp = NULL;
-               i915_gem_object_unpin(obj);
+               i915_gem_object_ggtt_unpin(obj);
        }
 
 out:
@@ -4118,7 +4055,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
                goto unlock;
        }
 
-       if (obj->pin_count) {
+       if (i915_gem_obj_is_pinned(obj)) {
                ret = -EINVAL;
                goto out;
        }
@@ -4229,12 +4166,11 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
        if (obj->phys_obj)
                i915_gem_detach_phys_object(dev, obj);
 
-       obj->pin_count = 0;
-       /* NB: 0 or 1 elements */
-       WARN_ON(!list_empty(&obj->vma_list) &&
-               !list_is_singular(&obj->vma_list));
        list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
-               int ret = i915_vma_unbind(vma);
+               int ret;
+
+               vma->pin_count = 0;
+               ret = i915_vma_unbind(vma);
                if (WARN_ON(ret == -ERESTARTSYS)) {
                        bool was_interruptible;
 
@@ -4283,41 +4219,6 @@ struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
        return NULL;
 }
 
-static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj,
-                                             struct i915_address_space *vm)
-{
-       struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
-       if (vma == NULL)
-               return ERR_PTR(-ENOMEM);
-
-       INIT_LIST_HEAD(&vma->vma_link);
-       INIT_LIST_HEAD(&vma->mm_list);
-       INIT_LIST_HEAD(&vma->exec_list);
-       vma->vm = vm;
-       vma->obj = obj;
-
-       /* Keep GGTT vmas first to make debug easier */
-       if (i915_is_ggtt(vm))
-               list_add(&vma->vma_link, &obj->vma_list);
-       else
-               list_add_tail(&vma->vma_link, &obj->vma_list);
-
-       return vma;
-}
-
-struct i915_vma *
-i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
-                                 struct i915_address_space *vm)
-{
-       struct i915_vma *vma;
-
-       vma = i915_gem_obj_to_vma(obj, vm);
-       if (!vma)
-               vma = __i915_gem_vma_create(obj, vm);
-
-       return vma;
-}
-
 void i915_gem_vma_destroy(struct i915_vma *vma)
 {
        WARN_ON(vma->node.allocated);
@@ -4508,9 +4409,15 @@ i915_gem_init_hw(struct drm_device *dev)
                           LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
 
        if (HAS_PCH_NOP(dev)) {
-               u32 temp = I915_READ(GEN7_MSG_CTL);
-               temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
-               I915_WRITE(GEN7_MSG_CTL, temp);
+               if (IS_IVYBRIDGE(dev)) {
+                       u32 temp = I915_READ(GEN7_MSG_CTL);
+                       temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
+                       I915_WRITE(GEN7_MSG_CTL, temp);
+               } else if (INTEL_INFO(dev)->gen >= 7) {
+                       u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT);
+                       temp &= ~RESET_PCH_HANDSHAKE_ENABLE;
+                       I915_WRITE(HSW_NDE_RSTWRN_OPT, temp);
+               }
        }
 
        i915_gem_init_swizzling(dev);
@@ -4523,25 +4430,23 @@ i915_gem_init_hw(struct drm_device *dev)
                i915_gem_l3_remap(&dev_priv->ring[RCS], i);
 
        /*
-        * XXX: There was some w/a described somewhere suggesting loading
-        * contexts before PPGTT.
+        * XXX: Contexts should only be initialized once. Doing a switch to the
+        * default context switch however is something we'd like to do after
+        * reset or thaw (the latter may not actually be necessary for HW, but
+        * goes with our code better). Context switching requires rings (for
+        * the do_switch), but before enabling PPGTT. So don't move this.
         */
-       ret = i915_gem_context_init(dev);
+       ret = i915_gem_context_enable(dev_priv);
        if (ret) {
-               i915_gem_cleanup_ringbuffer(dev);
-               DRM_ERROR("Context initialization failed %d\n", ret);
-               return ret;
-       }
-
-       if (dev_priv->mm.aliasing_ppgtt) {
-               ret = dev_priv->mm.aliasing_ppgtt->enable(dev);
-               if (ret) {
-                       i915_gem_cleanup_aliasing_ppgtt(dev);
-                       DRM_INFO("PPGTT enable failed. This is not fatal, but unexpected\n");
-               }
+               DRM_ERROR("Context enable failed %d\n", ret);
+               goto err_out;
        }
 
        return 0;
+
+err_out:
+       i915_gem_cleanup_ringbuffer(dev);
+       return ret;
 }
 
 int i915_gem_init(struct drm_device *dev)
@@ -4560,10 +4465,18 @@ int i915_gem_init(struct drm_device *dev)
 
        i915_gem_init_global_gtt(dev);
 
+       ret = i915_gem_context_init(dev);
+       if (ret) {
+               mutex_unlock(&dev->struct_mutex);
+               return ret;
+       }
+
        ret = i915_gem_init_hw(dev);
        mutex_unlock(&dev->struct_mutex);
        if (ret) {
-               i915_gem_cleanup_aliasing_ppgtt(dev);
+               WARN_ON(dev_priv->mm.aliasing_ppgtt);
+               i915_gem_context_fini(dev);
+               drm_mm_takedown(&dev_priv->gtt.base.mm);
                return ret;
        }
 
@@ -4658,14 +4571,16 @@ init_ring_lists(struct intel_ring_buffer *ring)
        INIT_LIST_HEAD(&ring->request_list);
 }
 
-static void i915_init_vm(struct drm_i915_private *dev_priv,
-                        struct i915_address_space *vm)
+void i915_init_vm(struct drm_i915_private *dev_priv,
+                 struct i915_address_space *vm)
 {
+       if (!i915_is_ggtt(vm))
+               drm_mm_init(&vm->mm, vm->start, vm->total);
        vm->dev = dev_priv->dev;
        INIT_LIST_HEAD(&vm->active_list);
        INIT_LIST_HEAD(&vm->inactive_list);
        INIT_LIST_HEAD(&vm->global_link);
-       list_add(&vm->global_link, &dev_priv->vm_list);
+       list_add_tail(&vm->global_link, &dev_priv->vm_list);
 }
 
 void
@@ -4950,6 +4865,7 @@ i915_gem_file_idle_work_handler(struct work_struct *work)
 int i915_gem_open(struct drm_device *dev, struct drm_file *file)
 {
        struct drm_i915_file_private *file_priv;
+       int ret;
 
        DRM_DEBUG_DRIVER("\n");
 
@@ -4959,15 +4875,18 @@ int i915_gem_open(struct drm_device *dev, struct drm_file *file)
 
        file->driver_priv = file_priv;
        file_priv->dev_priv = dev->dev_private;
+       file_priv->file = file;
 
        spin_lock_init(&file_priv->mm.lock);
        INIT_LIST_HEAD(&file_priv->mm.request_list);
        INIT_DELAYED_WORK(&file_priv->mm.idle_work,
                          i915_gem_file_idle_work_handler);
 
-       idr_init(&file_priv->context_idr);
+       ret = i915_gem_context_open(dev, file);
+       if (ret)
+               kfree(file_priv);
 
-       return 0;
+       return ret;
 }
 
 static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
@@ -5014,7 +4933,7 @@ i915_gem_inactive_count(struct shrinker *shrinker, struct shrink_control *sc)
                if (obj->active)
                        continue;
 
-               if (obj->pin_count == 0 && obj->pages_pin_count == 0)
+               if (!i915_gem_obj_is_pinned(obj) && obj->pages_pin_count == 0)
                        count += obj->base.size >> PAGE_SHIFT;
        }
 
@@ -5031,7 +4950,8 @@ unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
        struct drm_i915_private *dev_priv = o->base.dev->dev_private;
        struct i915_vma *vma;
 
-       if (vm == &dev_priv->mm.aliasing_ppgtt->base)
+       if (!dev_priv->mm.aliasing_ppgtt ||
+           vm == &dev_priv->mm.aliasing_ppgtt->base)
                vm = &dev_priv->gtt.base;
 
        BUG_ON(list_empty(&o->vma_list));
@@ -5072,7 +4992,8 @@ unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
        struct drm_i915_private *dev_priv = o->base.dev->dev_private;
        struct i915_vma *vma;
 
-       if (vm == &dev_priv->mm.aliasing_ppgtt->base)
+       if (!dev_priv->mm.aliasing_ppgtt ||
+           vm == &dev_priv->mm.aliasing_ppgtt->base)
                vm = &dev_priv->gtt.base;
 
        BUG_ON(list_empty(&o->vma_list));
@@ -5127,7 +5048,7 @@ struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
                return NULL;
 
        vma = list_first_entry(&obj->vma_list, typeof(*vma), vma_link);
-       if (WARN_ON(vma->vm != obj_to_ggtt(obj)))
+       if (vma->vm != obj_to_ggtt(obj))
                return NULL;
 
        return vma;
index e08acaba540269736777a7f7758a88d97f2aa4b6..ce41cff843466291582aeaed583ded996fba213c 100644 (file)
  * I've seen in a spec to date, and that was a workaround for a non-shipping
  * part. It should be safe to decrease this, but it's more future proof as is.
  */
-#define CONTEXT_ALIGN (64<<10)
+#define GEN6_CONTEXT_ALIGN (64<<10)
+#define GEN7_CONTEXT_ALIGN 4096
 
-static struct i915_hw_context *
-i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id);
-static int do_switch(struct i915_hw_context *to);
+static int do_switch(struct intel_ring_buffer *ring,
+                    struct i915_hw_context *to);
+
+static void do_ppgtt_cleanup(struct i915_hw_ppgtt *ppgtt)
+{
+       struct drm_device *dev = ppgtt->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct i915_address_space *vm = &ppgtt->base;
+
+       if (ppgtt == dev_priv->mm.aliasing_ppgtt ||
+           (list_empty(&vm->active_list) && list_empty(&vm->inactive_list))) {
+               ppgtt->base.cleanup(&ppgtt->base);
+               return;
+       }
+
+       /*
+        * Make sure vmas are unbound before we take down the drm_mm
+        *
+        * FIXME: Proper refcounting should take care of this, this shouldn't be
+        * needed at all.
+        */
+       if (!list_empty(&vm->active_list)) {
+               struct i915_vma *vma;
+
+               list_for_each_entry(vma, &vm->active_list, mm_list)
+                       if (WARN_ON(list_empty(&vma->vma_link) ||
+                                   list_is_singular(&vma->vma_link)))
+                               break;
+
+               i915_gem_evict_vm(&ppgtt->base, true);
+       } else {
+               i915_gem_retire_requests(dev);
+               i915_gem_evict_vm(&ppgtt->base, false);
+       }
+
+       ppgtt->base.cleanup(&ppgtt->base);
+}
+
+static void ppgtt_release(struct kref *kref)
+{
+       struct i915_hw_ppgtt *ppgtt =
+               container_of(kref, struct i915_hw_ppgtt, ref);
+
+       do_ppgtt_cleanup(ppgtt);
+       kfree(ppgtt);
+}
+
+static size_t get_context_alignment(struct drm_device *dev)
+{
+       if (IS_GEN6(dev))
+               return GEN6_CONTEXT_ALIGN;
+
+       return GEN7_CONTEXT_ALIGN;
+}
 
 static int get_context_size(struct drm_device *dev)
 {
@@ -131,14 +183,43 @@ void i915_gem_context_free(struct kref *ctx_ref)
 {
        struct i915_hw_context *ctx = container_of(ctx_ref,
                                                   typeof(*ctx), ref);
+       struct i915_hw_ppgtt *ppgtt = NULL;
 
-       list_del(&ctx->link);
+       /* We refcount even the aliasing PPGTT to keep the code symmetric */
+       if (USES_PPGTT(ctx->obj->base.dev))
+               ppgtt = ctx_to_ppgtt(ctx);
+
+       /* XXX: Free up the object before tearing down the address space, in
+        * case we're bound in the PPGTT */
        drm_gem_object_unreference(&ctx->obj->base);
+
+       if (ppgtt)
+               kref_put(&ppgtt->ref, ppgtt_release);
+       list_del(&ctx->link);
        kfree(ctx);
 }
 
+static struct i915_hw_ppgtt *
+create_vm_for_ctx(struct drm_device *dev, struct i915_hw_context *ctx)
+{
+       struct i915_hw_ppgtt *ppgtt;
+       int ret;
+
+       ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
+       if (!ppgtt)
+               return ERR_PTR(-ENOMEM);
+
+       ret = i915_gem_init_ppgtt(dev, ppgtt);
+       if (ret) {
+               kfree(ppgtt);
+               return ERR_PTR(ret);
+       }
+
+       return ppgtt;
+}
+
 static struct i915_hw_context *
-create_hw_context(struct drm_device *dev,
+__create_hw_context(struct drm_device *dev,
                  struct drm_i915_file_private *file_priv)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -166,18 +247,13 @@ create_hw_context(struct drm_device *dev,
                        goto err_out;
        }
 
-       /* The ring associated with the context object is handled by the normal
-        * object tracking code. We give an initial ring value simple to pass an
-        * assertion in the context switch code.
-        */
-       ctx->ring = &dev_priv->ring[RCS];
        list_add_tail(&ctx->link, &dev_priv->context_list);
 
        /* Default context will never have a file_priv */
        if (file_priv == NULL)
                return ctx;
 
-       ret = idr_alloc(&file_priv->context_idr, ctx, DEFAULT_CONTEXT_ID + 1, 0,
+       ret = idr_alloc(&file_priv->context_idr, ctx, DEFAULT_CONTEXT_ID, 0,
                        GFP_KERNEL);
        if (ret < 0)
                goto err_out;
@@ -196,67 +272,136 @@ err_out:
        return ERR_PTR(ret);
 }
 
-static inline bool is_default_context(struct i915_hw_context *ctx)
-{
-       return (ctx == ctx->ring->default_context);
-}
-
 /**
  * The default context needs to exist per ring that uses contexts. It stores the
  * context state of the GPU for applications that don't utilize HW contexts, as
  * well as an idle case.
  */
-static int create_default_context(struct drm_i915_private *dev_priv)
+static struct i915_hw_context *
+i915_gem_create_context(struct drm_device *dev,
+                       struct drm_i915_file_private *file_priv,
+                       bool create_vm)
 {
+       const bool is_global_default_ctx = file_priv == NULL;
+       struct drm_i915_private *dev_priv = dev->dev_private;
        struct i915_hw_context *ctx;
-       int ret;
+       int ret = 0;
 
-       BUG_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
+       BUG_ON(!mutex_is_locked(&dev->struct_mutex));
 
-       ctx = create_hw_context(dev_priv->dev, NULL);
+       ctx = __create_hw_context(dev, file_priv);
        if (IS_ERR(ctx))
-               return PTR_ERR(ctx);
-
-       /* We may need to do things with the shrinker which require us to
-        * immediately switch back to the default context. This can cause a
-        * problem as pinning the default context also requires GTT space which
-        * may not be available. To avoid this we always pin the
-        * default context.
-        */
-       ret = i915_gem_obj_ggtt_pin(ctx->obj, CONTEXT_ALIGN, false, false);
-       if (ret) {
-               DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret);
-               goto err_destroy;
-       }
+               return ctx;
 
-       ret = do_switch(ctx);
-       if (ret) {
-               DRM_DEBUG_DRIVER("Switch failed %d\n", ret);
-               goto err_unpin;
+       if (is_global_default_ctx) {
+               /* We may need to do things with the shrinker which
+                * require us to immediately switch back to the default
+                * context. This can cause a problem as pinning the
+                * default context also requires GTT space which may not
+                * be available. To avoid this we always pin the default
+                * context.
+                */
+               ret = i915_gem_obj_ggtt_pin(ctx->obj,
+                                           get_context_alignment(dev), 0);
+               if (ret) {
+                       DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret);
+                       goto err_destroy;
+               }
        }
 
-       dev_priv->ring[RCS].default_context = ctx;
+       if (create_vm) {
+               struct i915_hw_ppgtt *ppgtt = create_vm_for_ctx(dev, ctx);
+
+               if (IS_ERR_OR_NULL(ppgtt)) {
+                       DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
+                                        PTR_ERR(ppgtt));
+                       ret = PTR_ERR(ppgtt);
+                       goto err_unpin;
+               } else
+                       ctx->vm = &ppgtt->base;
+
+               /* This case is reserved for the global default context and
+                * should only happen once. */
+               if (is_global_default_ctx) {
+                       if (WARN_ON(dev_priv->mm.aliasing_ppgtt)) {
+                               ret = -EEXIST;
+                               goto err_unpin;
+                       }
+
+                       dev_priv->mm.aliasing_ppgtt = ppgtt;
+               }
+       } else if (USES_PPGTT(dev)) {
+               /* For platforms which only have aliasing PPGTT, we fake the
+                * address space and refcounting. */
+               ctx->vm = &dev_priv->mm.aliasing_ppgtt->base;
+               kref_get(&dev_priv->mm.aliasing_ppgtt->ref);
+       } else
+               ctx->vm = &dev_priv->gtt.base;
 
-       DRM_DEBUG_DRIVER("Default HW context loaded\n");
-       return 0;
+       return ctx;
 
 err_unpin:
-       i915_gem_object_unpin(ctx->obj);
+       if (is_global_default_ctx)
+               i915_gem_object_ggtt_unpin(ctx->obj);
 err_destroy:
        i915_gem_context_unreference(ctx);
-       return ret;
+       return ERR_PTR(ret);
+}
+
+void i915_gem_context_reset(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_ring_buffer *ring;
+       int i;
+
+       if (!HAS_HW_CONTEXTS(dev))
+               return;
+
+       /* Prevent the hardware from restoring the last context (which hung) on
+        * the next switch */
+       for (i = 0; i < I915_NUM_RINGS; i++) {
+               struct i915_hw_context *dctx;
+               if (!(INTEL_INFO(dev)->ring_mask & (1<<i)))
+                       continue;
+
+               /* Do a fake switch to the default context */
+               ring = &dev_priv->ring[i];
+               dctx = ring->default_context;
+               if (WARN_ON(!dctx))
+                       continue;
+
+               if (!ring->last_context)
+                       continue;
+
+               if (ring->last_context == dctx)
+                       continue;
+
+               if (i == RCS) {
+                       WARN_ON(i915_gem_obj_ggtt_pin(dctx->obj,
+                                                     get_context_alignment(dev), 0));
+                       /* Fake a finish/inactive */
+                       dctx->obj->base.write_domain = 0;
+                       dctx->obj->active = 0;
+               }
+
+               i915_gem_context_unreference(ring->last_context);
+               i915_gem_context_reference(dctx);
+               ring->last_context = dctx;
+       }
 }
 
 int i915_gem_context_init(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       int ret;
+       struct intel_ring_buffer *ring;
+       int i;
 
        if (!HAS_HW_CONTEXTS(dev))
                return 0;
 
-       /* If called from reset, or thaw... we've been here already */
-       if (dev_priv->ring[RCS].default_context)
+       /* Init should only be called once per module load. Eventually the
+        * restriction on the context_disabled check can be loosened. */
+       if (WARN_ON(dev_priv->ring[RCS].default_context))
                return 0;
 
        dev_priv->hw_context_size = round_up(get_context_size(dev), 4096);
@@ -266,11 +411,23 @@ int i915_gem_context_init(struct drm_device *dev)
                return -E2BIG;
        }
 
-       ret = create_default_context(dev_priv);
-       if (ret) {
-               DRM_DEBUG_DRIVER("Disabling HW Contexts; create failed %d\n",
-                                ret);
-               return ret;
+       dev_priv->ring[RCS].default_context =
+               i915_gem_create_context(dev, NULL, USES_PPGTT(dev));
+
+       if (IS_ERR_OR_NULL(dev_priv->ring[RCS].default_context)) {
+               DRM_DEBUG_DRIVER("Disabling HW Contexts; create failed %ld\n",
+                                PTR_ERR(dev_priv->ring[RCS].default_context));
+               return PTR_ERR(dev_priv->ring[RCS].default_context);
+       }
+
+       for (i = RCS + 1; i < I915_NUM_RINGS; i++) {
+               if (!(INTEL_INFO(dev)->ring_mask & (1<<i)))
+                       continue;
+
+               ring = &dev_priv->ring[i];
+
+               /* NB: RCS will hold a ref for all rings */
+               ring->default_context = dev_priv->ring[RCS].default_context;
        }
 
        DRM_DEBUG_DRIVER("HW context support initialized\n");
@@ -281,6 +438,7 @@ void i915_gem_context_fini(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct i915_hw_context *dctx = dev_priv->ring[RCS].default_context;
+       int i;
 
        if (!HAS_HW_CONTEXTS(dev))
                return;
@@ -300,59 +458,129 @@ void i915_gem_context_fini(struct drm_device *dev)
        if (dev_priv->ring[RCS].last_context == dctx) {
                /* Fake switch to NULL context */
                WARN_ON(dctx->obj->active);
-               i915_gem_object_unpin(dctx->obj);
+               i915_gem_object_ggtt_unpin(dctx->obj);
                i915_gem_context_unreference(dctx);
+               dev_priv->ring[RCS].last_context = NULL;
+       }
+
+       for (i = 0; i < I915_NUM_RINGS; i++) {
+               struct intel_ring_buffer *ring = &dev_priv->ring[i];
+               if (!(INTEL_INFO(dev)->ring_mask & (1<<i)))
+                       continue;
+
+               if (ring->last_context)
+                       i915_gem_context_unreference(ring->last_context);
+
+               ring->default_context = NULL;
+               ring->last_context = NULL;
        }
 
-       i915_gem_object_unpin(dctx->obj);
+       i915_gem_object_ggtt_unpin(dctx->obj);
        i915_gem_context_unreference(dctx);
-       dev_priv->ring[RCS].default_context = NULL;
-       dev_priv->ring[RCS].last_context = NULL;
+       dev_priv->mm.aliasing_ppgtt = NULL;
+}
+
+int i915_gem_context_enable(struct drm_i915_private *dev_priv)
+{
+       struct intel_ring_buffer *ring;
+       int ret, i;
+
+       if (!HAS_HW_CONTEXTS(dev_priv->dev))
+               return 0;
+
+       /* This is the only place the aliasing PPGTT gets enabled, which means
+        * it has to happen before we bail on reset */
+       if (dev_priv->mm.aliasing_ppgtt) {
+               struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
+               ppgtt->enable(ppgtt);
+       }
+
+       /* FIXME: We should make this work, even in reset */
+       if (i915_reset_in_progress(&dev_priv->gpu_error))
+               return 0;
+
+       BUG_ON(!dev_priv->ring[RCS].default_context);
+
+       for_each_ring(ring, dev_priv, i) {
+               ret = do_switch(ring, ring->default_context);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
 }
 
 static int context_idr_cleanup(int id, void *p, void *data)
 {
        struct i915_hw_context *ctx = p;
 
-       BUG_ON(id == DEFAULT_CONTEXT_ID);
+       /* Ignore the default context because close will handle it */
+       if (i915_gem_context_is_default(ctx))
+               return 0;
 
        i915_gem_context_unreference(ctx);
        return 0;
 }
 
-struct i915_ctx_hang_stats *
-i915_gem_context_get_hang_stats(struct drm_device *dev,
-                               struct drm_file *file,
-                               u32 id)
+int i915_gem_context_open(struct drm_device *dev, struct drm_file *file)
 {
        struct drm_i915_file_private *file_priv = file->driver_priv;
-       struct i915_hw_context *ctx;
+       struct drm_i915_private *dev_priv = dev->dev_private;
 
-       if (id == DEFAULT_CONTEXT_ID)
-               return &file_priv->hang_stats;
+       if (!HAS_HW_CONTEXTS(dev)) {
+               /* Cheat for hang stats */
+               file_priv->private_default_ctx =
+                       kzalloc(sizeof(struct i915_hw_context), GFP_KERNEL);
 
-       if (!HAS_HW_CONTEXTS(dev))
-               return ERR_PTR(-ENOENT);
+               if (file_priv->private_default_ctx == NULL)
+                       return -ENOMEM;
 
-       ctx = i915_gem_context_get(file->driver_priv, id);
-       if (ctx == NULL)
-               return ERR_PTR(-ENOENT);
+               file_priv->private_default_ctx->vm = &dev_priv->gtt.base;
+               return 0;
+       }
+
+       idr_init(&file_priv->context_idr);
+
+       mutex_lock(&dev->struct_mutex);
+       file_priv->private_default_ctx =
+               i915_gem_create_context(dev, file_priv, USES_FULL_PPGTT(dev));
+       mutex_unlock(&dev->struct_mutex);
+
+       if (IS_ERR(file_priv->private_default_ctx)) {
+               idr_destroy(&file_priv->context_idr);
+               return PTR_ERR(file_priv->private_default_ctx);
+       }
 
-       return &ctx->hang_stats;
+       return 0;
 }
 
 void i915_gem_context_close(struct drm_device *dev, struct drm_file *file)
 {
        struct drm_i915_file_private *file_priv = file->driver_priv;
 
+       if (!HAS_HW_CONTEXTS(dev)) {
+               kfree(file_priv->private_default_ctx);
+               return;
+       }
+
        idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
+       i915_gem_context_unreference(file_priv->private_default_ctx);
        idr_destroy(&file_priv->context_idr);
 }
 
-static struct i915_hw_context *
+struct i915_hw_context *
 i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id)
 {
-       return (struct i915_hw_context *)idr_find(&file_priv->context_idr, id);
+       struct i915_hw_context *ctx;
+
+       if (!HAS_HW_CONTEXTS(file_priv->dev_priv->dev))
+               return file_priv->private_default_ctx;
+
+       ctx = (struct i915_hw_context *)idr_find(&file_priv->context_idr, id);
+       if (!ctx)
+               return ERR_PTR(-ENOENT);
+
+       return ctx;
 }
 
 static inline int
@@ -390,7 +618,10 @@ mi_set_context(struct intel_ring_buffer *ring,
                        MI_SAVE_EXT_STATE_EN |
                        MI_RESTORE_EXT_STATE_EN |
                        hw_flags);
-       /* w/a: MI_SET_CONTEXT must always be followed by MI_NOOP */
+       /*
+        * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
+        * WaMiSetContext_Hang:snb,ivb,vlv
+        */
        intel_ring_emit(ring, MI_NOOP);
 
        if (IS_GEN7(ring->dev))
@@ -403,21 +634,30 @@ mi_set_context(struct intel_ring_buffer *ring,
        return ret;
 }
 
-static int do_switch(struct i915_hw_context *to)
+static int do_switch(struct intel_ring_buffer *ring,
+                    struct i915_hw_context *to)
 {
-       struct intel_ring_buffer *ring = to->ring;
+       struct drm_i915_private *dev_priv = ring->dev->dev_private;
        struct i915_hw_context *from = ring->last_context;
+       struct i915_hw_ppgtt *ppgtt = ctx_to_ppgtt(to);
        u32 hw_flags = 0;
        int ret, i;
 
-       BUG_ON(from != NULL && from->obj != NULL && from->obj->pin_count == 0);
+       if (from != NULL && ring == &dev_priv->ring[RCS]) {
+               BUG_ON(from->obj == NULL);
+               BUG_ON(!i915_gem_obj_is_pinned(from->obj));
+       }
 
-       if (from == to && !to->remap_slice)
+       if (from == to && from->last_ring == ring && !to->remap_slice)
                return 0;
 
-       ret = i915_gem_obj_ggtt_pin(to->obj, CONTEXT_ALIGN, false, false);
-       if (ret)
-               return ret;
+       /* Trying to pin first makes error handling easier. */
+       if (ring == &dev_priv->ring[RCS]) {
+               ret = i915_gem_obj_ggtt_pin(to->obj,
+                                           get_context_alignment(ring->dev), 0);
+               if (ret)
+                       return ret;
+       }
 
        /*
         * Pin can switch back to the default context if we end up calling into
@@ -426,6 +666,18 @@ static int do_switch(struct i915_hw_context *to)
         */
        from = ring->last_context;
 
+       if (USES_FULL_PPGTT(ring->dev)) {
+               ret = ppgtt->switch_mm(ppgtt, ring, false);
+               if (ret)
+                       goto unpin_out;
+       }
+
+       if (ring != &dev_priv->ring[RCS]) {
+               if (from)
+                       i915_gem_context_unreference(from);
+               goto done;
+       }
+
        /*
         * Clear this page out of any CPU caches for coherent swap-in/out. Note
         * that thanks to write = false in this call and us not setting any gpu
@@ -435,22 +687,21 @@ static int do_switch(struct i915_hw_context *to)
         * XXX: We need a real interface to do this instead of trickery.
         */
        ret = i915_gem_object_set_to_gtt_domain(to->obj, false);
-       if (ret) {
-               i915_gem_object_unpin(to->obj);
-               return ret;
-       }
+       if (ret)
+               goto unpin_out;
 
-       if (!to->obj->has_global_gtt_mapping)
-               i915_gem_gtt_bind_object(to->obj, to->obj->cache_level);
+       if (!to->obj->has_global_gtt_mapping) {
+               struct i915_vma *vma = i915_gem_obj_to_vma(to->obj,
+                                                          &dev_priv->gtt.base);
+               vma->bind_vma(vma, to->obj->cache_level, GLOBAL_BIND);
+       }
 
-       if (!to->is_initialized || is_default_context(to))
+       if (!to->is_initialized || i915_gem_context_is_default(to))
                hw_flags |= MI_RESTORE_INHIBIT;
 
        ret = mi_set_context(ring, to, hw_flags);
-       if (ret) {
-               i915_gem_object_unpin(to->obj);
-               return ret;
-       }
+       if (ret)
+               goto unpin_out;
 
        for (i = 0; i < MAX_L3_SLICES; i++) {
                if (!(to->remap_slice & (1<<i)))
@@ -484,22 +735,30 @@ static int do_switch(struct i915_hw_context *to)
                BUG_ON(from->obj->ring != ring);
 
                /* obj is kept alive until the next request by its active ref */
-               i915_gem_object_unpin(from->obj);
+               i915_gem_object_ggtt_unpin(from->obj);
                i915_gem_context_unreference(from);
        }
 
+       to->is_initialized = true;
+
+done:
        i915_gem_context_reference(to);
        ring->last_context = to;
-       to->is_initialized = true;
+       to->last_ring = ring;
 
        return 0;
+
+unpin_out:
+       if (ring->id == RCS)
+               i915_gem_object_ggtt_unpin(to->obj);
+       return ret;
 }
 
 /**
  * i915_switch_context() - perform a GPU context switch.
  * @ring: ring for which we'll execute the context switch
  * @file_priv: file_priv associated with the context, may be NULL
- * @id: context id number
+ * @to: the context to switch to
  *
  * The context life cycle is simple. The context refcount is incremented and
  * decremented by 1 and create and destroy. If the context is in use by the GPU,
@@ -508,31 +767,19 @@ static int do_switch(struct i915_hw_context *to)
  */
 int i915_switch_context(struct intel_ring_buffer *ring,
                        struct drm_file *file,
-                       int to_id)
+                       struct i915_hw_context *to)
 {
        struct drm_i915_private *dev_priv = ring->dev->dev_private;
-       struct i915_hw_context *to;
-
-       if (!HAS_HW_CONTEXTS(ring->dev))
-               return 0;
 
        WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
 
-       if (ring != &dev_priv->ring[RCS])
-               return 0;
+       BUG_ON(file && to == NULL);
 
-       if (to_id == DEFAULT_CONTEXT_ID) {
-               to = ring->default_context;
-       } else {
-               if (file == NULL)
-                       return -EINVAL;
-
-               to = i915_gem_context_get(file->driver_priv, to_id);
-               if (to == NULL)
-                       return -ENOENT;
-       }
+       /* We have the fake context, but don't supports switching. */
+       if (!HAS_HW_CONTEXTS(ring->dev))
+               return 0;
 
-       return do_switch(to);
+       return do_switch(ring, to);
 }
 
 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
@@ -543,9 +790,6 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
        struct i915_hw_context *ctx;
        int ret;
 
-       if (!(dev->driver->driver_features & DRIVER_GEM))
-               return -ENODEV;
-
        if (!HAS_HW_CONTEXTS(dev))
                return -ENODEV;
 
@@ -553,7 +797,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
        if (ret)
                return ret;
 
-       ctx = create_hw_context(dev, file_priv);
+       ctx = i915_gem_create_context(dev, file_priv, USES_FULL_PPGTT(dev));
        mutex_unlock(&dev->struct_mutex);
        if (IS_ERR(ctx))
                return PTR_ERR(ctx);
@@ -572,17 +816,17 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
        struct i915_hw_context *ctx;
        int ret;
 
-       if (!(dev->driver->driver_features & DRIVER_GEM))
-               return -ENODEV;
+       if (args->ctx_id == DEFAULT_CONTEXT_ID)
+               return -ENOENT;
 
        ret = i915_mutex_lock_interruptible(dev);
        if (ret)
                return ret;
 
        ctx = i915_gem_context_get(file_priv, args->ctx_id);
-       if (!ctx) {
+       if (IS_ERR(ctx)) {
                mutex_unlock(&dev->struct_mutex);
-               return -ENOENT;
+               return PTR_ERR(ctx);
        }
 
        idr_remove(&ctx->file_priv->context_idr, ctx->id);
index 2ca280f9ee53e3f6f6422fcc3605fa86b2f52afa..8a78f7885cba887ed3dc8a36534b9368062f0467 100644 (file)
@@ -36,7 +36,7 @@
 static bool
 mark_free(struct i915_vma *vma, struct list_head *unwind)
 {
-       if (vma->obj->pin_count)
+       if (vma->pin_count)
                return false;
 
        if (WARN_ON(!list_empty(&vma->exec_list)))
@@ -46,10 +46,29 @@ mark_free(struct i915_vma *vma, struct list_head *unwind)
        return drm_mm_scan_add_block(&vma->node);
 }
 
+/**
+ * i915_gem_evict_something - Evict vmas to make room for binding a new one
+ * @dev: drm_device
+ * @vm: address space to evict from
+ * @size: size of the desired free space
+ * @alignment: alignment constraint of the desired free space
+ * @cache_level: cache_level for the desired space
+ * @mappable: whether the free space must be mappable
+ * @nonblocking: whether evicting active objects is allowed or not
+ *
+ * This function will try to evict vmas until a free space satisfying the
+ * requirements is found. Callers must check first whether any such hole exists
+ * already before calling this function.
+ *
+ * This function is used by the object/vma binding code.
+ *
+ * To clarify: This is for freeing up virtual address space, not for freeing
+ * memory in e.g. the shrinker.
+ */
 int
 i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
                         int min_size, unsigned alignment, unsigned cache_level,
-                        bool mappable, bool nonblocking)
+                        unsigned flags)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct list_head eviction_list, unwind_list;
@@ -57,7 +76,7 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
        int ret = 0;
        int pass = 0;
 
-       trace_i915_gem_evict(dev, min_size, alignment, mappable);
+       trace_i915_gem_evict(dev, min_size, alignment, flags);
 
        /*
         * The goal is to evict objects and amalgamate space in LRU order.
@@ -83,7 +102,7 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
         */
 
        INIT_LIST_HEAD(&unwind_list);
-       if (mappable) {
+       if (flags & PIN_MAPPABLE) {
                BUG_ON(!i915_is_ggtt(vm));
                drm_mm_init_scan_with_range(&vm->mm, min_size,
                                            alignment, cache_level, 0,
@@ -98,7 +117,7 @@ search_again:
                        goto found;
        }
 
-       if (nonblocking)
+       if (flags & PIN_NONBLOCK)
                goto none;
 
        /* Now merge in the soon-to-be-expired objects... */
@@ -122,7 +141,7 @@ none:
        /* Can we unpin some objects such as idle hw contents,
         * or pending flips?
         */
-       if (nonblocking)
+       if (flags & PIN_NONBLOCK)
                return -ENOSPC;
 
        /* Only idle the GPU and repeat the search once */
@@ -177,19 +196,19 @@ found:
 }
 
 /**
- * i915_gem_evict_vm - Try to free up VM space
+ * i915_gem_evict_vm - Evict all idle vmas from a vm
  *
- * @vm: Address space to evict from
+ * @vm: Address space to cleanse
  * @do_idle: Boolean directing whether to idle first.
  *
- * VM eviction is about freeing up virtual address space. If one wants fine
- * grained eviction, they should see evict something for more details. In terms
- * of freeing up actual system memory, this function may not accomplish the
- * desired result. An object may be shared in multiple address space, and this
- * function will not assert those objects be freed.
+ * This function evicts all idles vmas from a vm. If all unpinned vmas should be
+ * evicted the @do_idle needs to be set to true.
  *
- * Using do_idle will result in a more complete eviction because it retires, and
- * inactivates current BOs.
+ * This is used by the execbuf code as a last-ditch effort to defragment the
+ * address space.
+ *
+ * To clarify: This is for freeing up virtual address space, not for freeing
+ * memory in e.g. the shrinker.
  */
 int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
 {
@@ -207,12 +226,20 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
        }
 
        list_for_each_entry_safe(vma, next, &vm->inactive_list, mm_list)
-               if (vma->obj->pin_count == 0)
+               if (vma->pin_count == 0)
                        WARN_ON(i915_vma_unbind(vma));
 
        return 0;
 }
 
+/**
+ * i915_gem_evict_everything - Try to evict all objects
+ * @dev: Device to evict objects for
+ *
+ * This functions tries to evict all gem objects from all address spaces. Used
+ * by the shrinker as a last-ditch effort and for suspend, before releasing the
+ * backing storage of all unbound objects.
+ */
 int
 i915_gem_evict_everything(struct drm_device *dev)
 {
index d269ecf46e264cbaaef6c9e1db6ddcfb57f1424d..3851a1b1dc88de14461f34cdeefaaf2469a77e95 100644 (file)
@@ -91,6 +91,7 @@ eb_lookup_vmas(struct eb_vmas *eb,
               struct i915_address_space *vm,
               struct drm_file *file)
 {
+       struct drm_i915_private *dev_priv = vm->dev->dev_private;
        struct drm_i915_gem_object *obj;
        struct list_head objects;
        int i, ret;
@@ -125,6 +126,20 @@ eb_lookup_vmas(struct eb_vmas *eb,
        i = 0;
        while (!list_empty(&objects)) {
                struct i915_vma *vma;
+               struct i915_address_space *bind_vm = vm;
+
+               if (exec[i].flags & EXEC_OBJECT_NEEDS_GTT &&
+                   USES_FULL_PPGTT(vm->dev)) {
+                       ret = -EINVAL;
+                       goto err;
+               }
+
+               /* If we have secure dispatch, or the userspace assures us that
+                * they know what they're doing, use the GGTT VM.
+                */
+               if (((args->flags & I915_EXEC_SECURE) &&
+                   (i == (args->buffer_count - 1))))
+                       bind_vm = &dev_priv->gtt.base;
 
                obj = list_first_entry(&objects,
                                       struct drm_i915_gem_object,
@@ -138,7 +153,7 @@ eb_lookup_vmas(struct eb_vmas *eb,
                 * from the (obj, vm) we don't run the risk of creating
                 * duplicated vmas for the same vm.
                 */
-               vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
+               vma = i915_gem_obj_lookup_or_create_vma(obj, bind_vm);
                if (IS_ERR(vma)) {
                        DRM_DEBUG("Failed to lookup VMA\n");
                        ret = PTR_ERR(vma);
@@ -217,7 +232,7 @@ i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
                i915_gem_object_unpin_fence(obj);
 
        if (entry->flags & __EXEC_OBJECT_HAS_PIN)
-               i915_gem_object_unpin(obj);
+               vma->pin_count--;
 
        entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
 }
@@ -327,8 +342,7 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
 static int
 i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
                                   struct eb_vmas *eb,
-                                  struct drm_i915_gem_relocation_entry *reloc,
-                                  struct i915_address_space *vm)
+                                  struct drm_i915_gem_relocation_entry *reloc)
 {
        struct drm_device *dev = obj->base.dev;
        struct drm_gem_object *target_obj;
@@ -352,8 +366,10 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
        if (unlikely(IS_GEN6(dev) &&
            reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
            !target_i915_obj->has_global_gtt_mapping)) {
-               i915_gem_gtt_bind_object(target_i915_obj,
-                                        target_i915_obj->cache_level);
+               struct i915_vma *vma =
+                       list_first_entry(&target_i915_obj->vma_list,
+                                        typeof(*vma), vma_link);
+               vma->bind_vma(vma, target_i915_obj->cache_level, GLOBAL_BIND);
        }
 
        /* Validate that the target is in a valid r/w GPU domain */
@@ -451,8 +467,7 @@ i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
                do {
                        u64 offset = r->presumed_offset;
 
-                       ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r,
-                                                                vma->vm);
+                       ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r);
                        if (ret)
                                return ret;
 
@@ -481,8 +496,7 @@ i915_gem_execbuffer_relocate_vma_slow(struct i915_vma *vma,
        int i, ret;
 
        for (i = 0; i < entry->relocation_count; i++) {
-               ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i],
-                                                        vma->vm);
+               ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i]);
                if (ret)
                        return ret;
        }
@@ -527,21 +541,26 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
                                struct intel_ring_buffer *ring,
                                bool *need_reloc)
 {
-       struct drm_i915_private *dev_priv = ring->dev->dev_private;
+       struct drm_i915_gem_object *obj = vma->obj;
        struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
        bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
-       bool need_fence, need_mappable;
-       struct drm_i915_gem_object *obj = vma->obj;
+       bool need_fence;
+       unsigned flags;
        int ret;
 
+       flags = 0;
+
        need_fence =
                has_fenced_gpu_access &&
                entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
                obj->tiling_mode != I915_TILING_NONE;
-       need_mappable = need_fence || need_reloc_mappable(vma);
+       if (need_fence || need_reloc_mappable(vma))
+               flags |= PIN_MAPPABLE;
 
-       ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, need_mappable,
-                                 false);
+       if (entry->flags & EXEC_OBJECT_NEEDS_GTT)
+               flags |= PIN_GLOBAL;
+
+       ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, flags);
        if (ret)
                return ret;
 
@@ -560,14 +579,6 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
                }
        }
 
-       /* Ensure ppgtt mapping exists if needed */
-       if (dev_priv->mm.aliasing_ppgtt && !obj->has_aliasing_ppgtt_mapping) {
-               i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
-                                      obj, obj->cache_level);
-
-               obj->has_aliasing_ppgtt_mapping = 1;
-       }
-
        if (entry->offset != vma->node.start) {
                entry->offset = vma->node.start;
                *need_reloc = true;
@@ -578,10 +589,6 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
                obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER;
        }
 
-       if (entry->flags & EXEC_OBJECT_NEEDS_GTT &&
-           !obj->has_global_gtt_mapping)
-               i915_gem_gtt_bind_object(obj, obj->cache_level);
-
        return 0;
 }
 
@@ -891,7 +898,7 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
                if (!access_ok(VERIFY_WRITE, ptr, length))
                        return -EFAULT;
 
-               if (likely(!i915_prefault_disable)) {
+               if (likely(!i915.prefault_disable)) {
                        if (fault_in_multipages_readable(ptr, length))
                                return -EFAULT;
                }
@@ -900,22 +907,27 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
        return 0;
 }
 
-static int
+static struct i915_hw_context *
 i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
-                         const u32 ctx_id)
+                         struct intel_ring_buffer *ring, const u32 ctx_id)
 {
+       struct i915_hw_context *ctx = NULL;
        struct i915_ctx_hang_stats *hs;
 
-       hs = i915_gem_context_get_hang_stats(dev, file, ctx_id);
-       if (IS_ERR(hs))
-               return PTR_ERR(hs);
+       if (ring->id != RCS && ctx_id != DEFAULT_CONTEXT_ID)
+               return ERR_PTR(-EINVAL);
 
+       ctx = i915_gem_context_get(file->driver_priv, ctx_id);
+       if (IS_ERR(ctx))
+               return ctx;
+
+       hs = &ctx->hang_stats;
        if (hs->banned) {
                DRM_DEBUG("Context %u tried to submit while banned\n", ctx_id);
-               return -EIO;
+               return ERR_PTR(-EIO);
        }
 
-       return 0;
+       return ctx;
 }
 
 static void
@@ -939,7 +951,9 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
                if (obj->base.write_domain) {
                        obj->dirty = 1;
                        obj->last_write_seqno = intel_ring_get_seqno(ring);
-                       if (obj->pin_count) /* check for potential scanout */
+                       /* check for potential scanout */
+                       if (i915_gem_obj_ggtt_bound(obj) &&
+                           i915_gem_obj_to_ggtt(obj)->pin_count)
                                intel_mark_fb_busy(obj, ring);
                }
 
@@ -989,16 +1003,17 @@ static int
 i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                       struct drm_file *file,
                       struct drm_i915_gem_execbuffer2 *args,
-                      struct drm_i915_gem_exec_object2 *exec,
-                      struct i915_address_space *vm)
+                      struct drm_i915_gem_exec_object2 *exec)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct eb_vmas *eb;
        struct drm_i915_gem_object *batch_obj;
        struct drm_clip_rect *cliprects = NULL;
        struct intel_ring_buffer *ring;
+       struct i915_hw_context *ctx;
+       struct i915_address_space *vm;
        const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
-       u32 exec_start, exec_len;
+       u32 exec_start = args->batch_start_offset, exec_len;
        u32 mask, flags;
        int ret, mode, i;
        bool need_relocs;
@@ -1020,41 +1035,17 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
        if (args->flags & I915_EXEC_IS_PINNED)
                flags |= I915_DISPATCH_PINNED;
 
-       switch (args->flags & I915_EXEC_RING_MASK) {
-       case I915_EXEC_DEFAULT:
-       case I915_EXEC_RENDER:
-               ring = &dev_priv->ring[RCS];
-               break;
-       case I915_EXEC_BSD:
-               ring = &dev_priv->ring[VCS];
-               if (ctx_id != DEFAULT_CONTEXT_ID) {
-                       DRM_DEBUG("Ring %s doesn't support contexts\n",
-                                 ring->name);
-                       return -EPERM;
-               }
-               break;
-       case I915_EXEC_BLT:
-               ring = &dev_priv->ring[BCS];
-               if (ctx_id != DEFAULT_CONTEXT_ID) {
-                       DRM_DEBUG("Ring %s doesn't support contexts\n",
-                                 ring->name);
-                       return -EPERM;
-               }
-               break;
-       case I915_EXEC_VEBOX:
-               ring = &dev_priv->ring[VECS];
-               if (ctx_id != DEFAULT_CONTEXT_ID) {
-                       DRM_DEBUG("Ring %s doesn't support contexts\n",
-                                 ring->name);
-                       return -EPERM;
-               }
-               break;
-
-       default:
+       if ((args->flags & I915_EXEC_RING_MASK) > I915_NUM_RINGS) {
                DRM_DEBUG("execbuf with unknown ring: %d\n",
                          (int)(args->flags & I915_EXEC_RING_MASK));
                return -EINVAL;
        }
+
+       if ((args->flags & I915_EXEC_RING_MASK) == I915_EXEC_DEFAULT)
+               ring = &dev_priv->ring[RCS];
+       else
+               ring = &dev_priv->ring[(args->flags & I915_EXEC_RING_MASK) - 1];
+
        if (!intel_ring_initialized(ring)) {
                DRM_DEBUG("execbuf with invalid ring: %d\n",
                          (int)(args->flags & I915_EXEC_RING_MASK));
@@ -1136,11 +1127,18 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                goto pre_mutex_err;
        }
 
-       ret = i915_gem_validate_context(dev, file, ctx_id);
-       if (ret) {
+       ctx = i915_gem_validate_context(dev, file, ring, ctx_id);
+       if (IS_ERR(ctx)) {
                mutex_unlock(&dev->struct_mutex);
+               ret = PTR_ERR(ctx);
                goto pre_mutex_err;
-       }
+       } 
+
+       i915_gem_context_reference(ctx);
+
+       vm = ctx->vm;
+       if (!USES_FULL_PPGTT(dev))
+               vm = &dev_priv->gtt.base;
 
        eb = eb_create(args);
        if (eb == NULL) {
@@ -1184,17 +1182,46 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
        }
        batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
 
+       if (i915_needs_cmd_parser(ring)) {
+               ret = i915_parse_cmds(ring,
+                                     batch_obj,
+                                     args->batch_start_offset,
+                                     file->is_master);
+               if (ret)
+                       goto err;
+
+               /*
+                * XXX: Actually do this when enabling batch copy...
+                *
+                * Set the DISPATCH_SECURE bit to remove the NON_SECURE bit
+                * from MI_BATCH_BUFFER_START commands issued in the
+                * dispatch_execbuffer implementations. We specifically don't
+                * want that set when the command parser is enabled.
+                */
+       }
+
        /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
         * batch" bit. Hence we need to pin secure batches into the global gtt.
         * hsw should have this fixed, but bdw mucks it up again. */
-       if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping)
-               i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level);
+       if (flags & I915_DISPATCH_SECURE &&
+           !batch_obj->has_global_gtt_mapping) {
+               /* When we have multiple VMs, we'll need to make sure that we
+                * allocate space first */
+               struct i915_vma *vma = i915_gem_obj_to_ggtt(batch_obj);
+               BUG_ON(!vma);
+               vma->bind_vma(vma, batch_obj->cache_level, GLOBAL_BIND);
+       }
+
+       if (flags & I915_DISPATCH_SECURE)
+               exec_start += i915_gem_obj_ggtt_offset(batch_obj);
+       else
+               exec_start += i915_gem_obj_offset(batch_obj, vm);
 
        ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->vmas);
        if (ret)
                goto err;
 
-       ret = i915_switch_context(ring, file, ctx_id);
+       ret = i915_switch_context(ring, file, ctx);
        if (ret)
                goto err;
 
@@ -1219,8 +1246,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                        goto err;
        }
 
-       exec_start = i915_gem_obj_offset(batch_obj, vm) +
-               args->batch_start_offset;
+
        exec_len = args->batch_len;
        if (cliprects) {
                for (i = 0; i < args->num_cliprects; i++) {
@@ -1249,6 +1275,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
        i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
 
 err:
+       /* the request owns the ref now */
+       i915_gem_context_unreference(ctx);
        eb_destroy(eb);
 
        mutex_unlock(&dev->struct_mutex);
@@ -1270,7 +1298,6 @@ int
 i915_gem_execbuffer(struct drm_device *dev, void *data,
                    struct drm_file *file)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_execbuffer *args = data;
        struct drm_i915_gem_execbuffer2 exec2;
        struct drm_i915_gem_exec_object *exec_list = NULL;
@@ -1326,8 +1353,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
        exec2.flags = I915_EXEC_RENDER;
        i915_execbuffer2_set_context_id(exec2, 0);
 
-       ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list,
-                                    &dev_priv->gtt.base);
+       ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
        if (!ret) {
                /* Copy the new buffer offsets back to the user's exec list. */
                for (i = 0; i < args->buffer_count; i++)
@@ -1353,7 +1379,6 @@ int
 i915_gem_execbuffer2(struct drm_device *dev, void *data,
                     struct drm_file *file)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_execbuffer2 *args = data;
        struct drm_i915_gem_exec_object2 *exec2_list = NULL;
        int ret;
@@ -1384,8 +1409,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
                return -EFAULT;
        }
 
-       ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list,
-                                    &dev_priv->gtt.base);
+       ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
        if (!ret) {
                /* Copy the new buffer offsets back to the user's exec list. */
                ret = copy_to_user(to_user_ptr(args->buffers_ptr),
index 40a2b36b276baa774028b56ae60b6ae6c59e919d..63a6dc7a6bb6c57682018aeee2fbaa9c72a5ac45 100644 (file)
@@ -1,5 +1,6 @@
 /*
  * Copyright Â© 2010 Daniel Vetter
+ * Copyright Â© 2011-2014 Intel Corporation
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the "Software"),
  *
  */
 
+#include <linux/seq_file.h>
 #include <drm/drmP.h>
 #include <drm/i915_drm.h>
 #include "i915_drv.h"
 #include "i915_trace.h"
 #include "intel_drv.h"
 
+bool intel_enable_ppgtt(struct drm_device *dev, bool full)
+{
+       if (i915.enable_ppgtt == 0 || !HAS_ALIASING_PPGTT(dev))
+               return false;
+
+       if (i915.enable_ppgtt == 1 && full)
+               return false;
+
+#ifdef CONFIG_INTEL_IOMMU
+       /* Disable ppgtt on SNB if VT-d is on. */
+       if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) {
+               DRM_INFO("Disabling PPGTT because VT-d is on\n");
+               return false;
+       }
+#endif
+
+       /* Full ppgtt disabled by default for now due to issues. */
+       if (full)
+               return false; /* HAS_PPGTT(dev) */
+       else
+               return HAS_ALIASING_PPGTT(dev);
+}
+
 #define GEN6_PPGTT_PD_ENTRIES 512
 #define I915_PPGTT_PT_ENTRIES (PAGE_SIZE / sizeof(gen6_gtt_pte_t))
 typedef uint64_t gen8_gtt_pte_t;
@@ -63,13 +88,31 @@ typedef gen8_gtt_pte_t gen8_ppgtt_pde_t;
 
 #define GEN8_PTES_PER_PAGE             (PAGE_SIZE / sizeof(gen8_gtt_pte_t))
 #define GEN8_PDES_PER_PAGE             (PAGE_SIZE / sizeof(gen8_ppgtt_pde_t))
-#define GEN8_LEGACY_PDPS               4
+
+/* GEN8 legacy style addressis defined as a 3 level page table:
+ * 31:30 | 29:21 | 20:12 |  11:0
+ * PDPE  |  PDE  |  PTE  | offset
+ * The difference as compared to normal x86 3 level page table is the PDPEs are
+ * programmed via register.
+ */
+#define GEN8_PDPE_SHIFT                        30
+#define GEN8_PDPE_MASK                 0x3
+#define GEN8_PDE_SHIFT                 21
+#define GEN8_PDE_MASK                  0x1ff
+#define GEN8_PTE_SHIFT                 12
+#define GEN8_PTE_MASK                  0x1ff
 
 #define PPAT_UNCACHED_INDEX            (_PAGE_PWT | _PAGE_PCD)
 #define PPAT_CACHED_PDE_INDEX          0 /* WB LLC */
 #define PPAT_CACHED_INDEX              _PAGE_PAT /* WB LLCeLLC */
 #define PPAT_DISPLAY_ELLC_INDEX                _PAGE_PCD /* WT eLLC */
 
+static void ppgtt_bind_vma(struct i915_vma *vma,
+                          enum i915_cache_level cache_level,
+                          u32 flags);
+static void ppgtt_unbind_vma(struct i915_vma *vma);
+static int gen8_ppgtt_enable(struct i915_hw_ppgtt *ppgtt);
+
 static inline gen8_gtt_pte_t gen8_pte_encode(dma_addr_t addr,
                                             enum i915_cache_level level,
                                             bool valid)
@@ -199,12 +242,19 @@ static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr,
 
 /* Broadwell Page Directory Pointer Descriptors */
 static int gen8_write_pdp(struct intel_ring_buffer *ring, unsigned entry,
-                          uint64_t val)
+                          uint64_t val, bool synchronous)
 {
+       struct drm_i915_private *dev_priv = ring->dev->dev_private;
        int ret;
 
        BUG_ON(entry >= 4);
 
+       if (synchronous) {
+               I915_WRITE(GEN8_RING_PDP_UDW(ring, entry), val >> 32);
+               I915_WRITE(GEN8_RING_PDP_LDW(ring, entry), (u32)val);
+               return 0;
+       }
+
        ret = intel_ring_begin(ring, 6);
        if (ret)
                return ret;
@@ -220,216 +270,357 @@ static int gen8_write_pdp(struct intel_ring_buffer *ring, unsigned entry,
        return 0;
 }
 
-static int gen8_ppgtt_enable(struct drm_device *dev)
+static int gen8_mm_switch(struct i915_hw_ppgtt *ppgtt,
+                         struct intel_ring_buffer *ring,
+                         bool synchronous)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_ring_buffer *ring;
-       struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
-       int i, j, ret;
+       int i, ret;
 
        /* bit of a hack to find the actual last used pd */
        int used_pd = ppgtt->num_pd_entries / GEN8_PDES_PER_PAGE;
 
-       for_each_ring(ring, dev_priv, j) {
-               I915_WRITE(RING_MODE_GEN7(ring),
-                          _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
-       }
-
        for (i = used_pd - 1; i >= 0; i--) {
                dma_addr_t addr = ppgtt->pd_dma_addr[i];
-               for_each_ring(ring, dev_priv, j) {
-                       ret = gen8_write_pdp(ring, i, addr);
-                       if (ret)
-                               goto err_out;
-               }
+               ret = gen8_write_pdp(ring, i, addr, synchronous);
+               if (ret)
+                       return ret;
        }
-       return 0;
 
-err_out:
-       for_each_ring(ring, dev_priv, j)
-               I915_WRITE(RING_MODE_GEN7(ring),
-                          _MASKED_BIT_DISABLE(GFX_PPGTT_ENABLE));
-       return ret;
+       return 0;
 }
 
 static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
-                                  unsigned first_entry,
-                                  unsigned num_entries,
+                                  uint64_t start,
+                                  uint64_t length,
                                   bool use_scratch)
 {
        struct i915_hw_ppgtt *ppgtt =
                container_of(vm, struct i915_hw_ppgtt, base);
        gen8_gtt_pte_t *pt_vaddr, scratch_pte;
-       unsigned act_pt = first_entry / GEN8_PTES_PER_PAGE;
-       unsigned first_pte = first_entry % GEN8_PTES_PER_PAGE;
+       unsigned pdpe = start >> GEN8_PDPE_SHIFT & GEN8_PDPE_MASK;
+       unsigned pde = start >> GEN8_PDE_SHIFT & GEN8_PDE_MASK;
+       unsigned pte = start >> GEN8_PTE_SHIFT & GEN8_PTE_MASK;
+       unsigned num_entries = length >> PAGE_SHIFT;
        unsigned last_pte, i;
 
        scratch_pte = gen8_pte_encode(ppgtt->base.scratch.addr,
                                      I915_CACHE_LLC, use_scratch);
 
        while (num_entries) {
-               struct page *page_table = &ppgtt->gen8_pt_pages[act_pt];
+               struct page *page_table = ppgtt->gen8_pt_pages[pdpe][pde];
 
-               last_pte = first_pte + num_entries;
+               last_pte = pte + num_entries;
                if (last_pte > GEN8_PTES_PER_PAGE)
                        last_pte = GEN8_PTES_PER_PAGE;
 
                pt_vaddr = kmap_atomic(page_table);
 
-               for (i = first_pte; i < last_pte; i++)
+               for (i = pte; i < last_pte; i++) {
                        pt_vaddr[i] = scratch_pte;
+                       num_entries--;
+               }
 
                kunmap_atomic(pt_vaddr);
 
-               num_entries -= last_pte - first_pte;
-               first_pte = 0;
-               act_pt++;
+               pte = 0;
+               if (++pde == GEN8_PDES_PER_PAGE) {
+                       pdpe++;
+                       pde = 0;
+               }
        }
 }
 
 static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
                                      struct sg_table *pages,
-                                     unsigned first_entry,
+                                     uint64_t start,
                                      enum i915_cache_level cache_level)
 {
        struct i915_hw_ppgtt *ppgtt =
                container_of(vm, struct i915_hw_ppgtt, base);
        gen8_gtt_pte_t *pt_vaddr;
-       unsigned act_pt = first_entry / GEN8_PTES_PER_PAGE;
-       unsigned act_pte = first_entry % GEN8_PTES_PER_PAGE;
+       unsigned pdpe = start >> GEN8_PDPE_SHIFT & GEN8_PDPE_MASK;
+       unsigned pde = start >> GEN8_PDE_SHIFT & GEN8_PDE_MASK;
+       unsigned pte = start >> GEN8_PTE_SHIFT & GEN8_PTE_MASK;
        struct sg_page_iter sg_iter;
 
        pt_vaddr = NULL;
+
        for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
+               if (WARN_ON(pdpe >= GEN8_LEGACY_PDPS))
+                       break;
+
                if (pt_vaddr == NULL)
-                       pt_vaddr = kmap_atomic(&ppgtt->gen8_pt_pages[act_pt]);
+                       pt_vaddr = kmap_atomic(ppgtt->gen8_pt_pages[pdpe][pde]);
 
-               pt_vaddr[act_pte] =
+               pt_vaddr[pte] =
                        gen8_pte_encode(sg_page_iter_dma_address(&sg_iter),
                                        cache_level, true);
-               if (++act_pte == GEN8_PTES_PER_PAGE) {
+               if (++pte == GEN8_PTES_PER_PAGE) {
                        kunmap_atomic(pt_vaddr);
                        pt_vaddr = NULL;
-                       act_pt++;
-                       act_pte = 0;
+                       if (++pde == GEN8_PDES_PER_PAGE) {
+                               pdpe++;
+                               pde = 0;
+                       }
+                       pte = 0;
                }
        }
        if (pt_vaddr)
                kunmap_atomic(pt_vaddr);
 }
 
+static void gen8_free_page_tables(struct page **pt_pages)
+{
+       int i;
+
+       if (pt_pages == NULL)
+               return;
+
+       for (i = 0; i < GEN8_PDES_PER_PAGE; i++)
+               if (pt_pages[i])
+                       __free_pages(pt_pages[i], 0);
+}
+
+static void gen8_ppgtt_free(const struct i915_hw_ppgtt *ppgtt)
+{
+       int i;
+
+       for (i = 0; i < ppgtt->num_pd_pages; i++) {
+               gen8_free_page_tables(ppgtt->gen8_pt_pages[i]);
+               kfree(ppgtt->gen8_pt_pages[i]);
+               kfree(ppgtt->gen8_pt_dma_addr[i]);
+       }
+
+       __free_pages(ppgtt->pd_pages, get_order(ppgtt->num_pd_pages << PAGE_SHIFT));
+}
+
+static void gen8_ppgtt_unmap_pages(struct i915_hw_ppgtt *ppgtt)
+{
+       struct pci_dev *hwdev = ppgtt->base.dev->pdev;
+       int i, j;
+
+       for (i = 0; i < ppgtt->num_pd_pages; i++) {
+               /* TODO: In the future we'll support sparse mappings, so this
+                * will have to change. */
+               if (!ppgtt->pd_dma_addr[i])
+                       continue;
+
+               pci_unmap_page(hwdev, ppgtt->pd_dma_addr[i], PAGE_SIZE,
+                              PCI_DMA_BIDIRECTIONAL);
+
+               for (j = 0; j < GEN8_PDES_PER_PAGE; j++) {
+                       dma_addr_t addr = ppgtt->gen8_pt_dma_addr[i][j];
+                       if (addr)
+                               pci_unmap_page(hwdev, addr, PAGE_SIZE,
+                                              PCI_DMA_BIDIRECTIONAL);
+               }
+       }
+}
+
 static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
 {
        struct i915_hw_ppgtt *ppgtt =
                container_of(vm, struct i915_hw_ppgtt, base);
-       int i, j;
 
+       list_del(&vm->global_link);
        drm_mm_takedown(&vm->mm);
 
-       for (i = 0; i < ppgtt->num_pd_pages ; i++) {
-               if (ppgtt->pd_dma_addr[i]) {
-                       pci_unmap_page(ppgtt->base.dev->pdev,
-                                      ppgtt->pd_dma_addr[i],
-                                      PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+       gen8_ppgtt_unmap_pages(ppgtt);
+       gen8_ppgtt_free(ppgtt);
+}
+
+static struct page **__gen8_alloc_page_tables(void)
+{
+       struct page **pt_pages;
+       int i;
 
-                       for (j = 0; j < GEN8_PDES_PER_PAGE; j++) {
-                               dma_addr_t addr = ppgtt->gen8_pt_dma_addr[i][j];
-                               if (addr)
-                                       pci_unmap_page(ppgtt->base.dev->pdev,
-                                                      addr,
-                                                      PAGE_SIZE,
-                                                      PCI_DMA_BIDIRECTIONAL);
+       pt_pages = kcalloc(GEN8_PDES_PER_PAGE, sizeof(struct page *), GFP_KERNEL);
+       if (!pt_pages)
+               return ERR_PTR(-ENOMEM);
 
-                       }
-               }
-               kfree(ppgtt->gen8_pt_dma_addr[i]);
+       for (i = 0; i < GEN8_PDES_PER_PAGE; i++) {
+               pt_pages[i] = alloc_page(GFP_KERNEL);
+               if (!pt_pages[i])
+                       goto bail;
        }
 
-       __free_pages(ppgtt->gen8_pt_pages, get_order(ppgtt->num_pt_pages << PAGE_SHIFT));
-       __free_pages(ppgtt->pd_pages, get_order(ppgtt->num_pd_pages << PAGE_SHIFT));
+       return pt_pages;
+
+bail:
+       gen8_free_page_tables(pt_pages);
+       kfree(pt_pages);
+       return ERR_PTR(-ENOMEM);
 }
 
-/**
- * GEN8 legacy ppgtt programming is accomplished through 4 PDP registers with a
- * net effect resembling a 2-level page table in normal x86 terms. Each PDP
- * represents 1GB of memory
- * 4 * 512 * 512 * 4096 = 4GB legacy 32b address space.
- *
- * TODO: Do something with the size parameter
- **/
-static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
+static int gen8_ppgtt_allocate_page_tables(struct i915_hw_ppgtt *ppgtt,
+                                          const int max_pdp)
 {
-       struct page *pt_pages;
-       int i, j, ret = -ENOMEM;
-       const int max_pdp = DIV_ROUND_UP(size, 1 << 30);
-       const int num_pt_pages = GEN8_PDES_PER_PAGE * max_pdp;
+       struct page **pt_pages[GEN8_LEGACY_PDPS];
+       int i, ret;
 
-       if (size % (1<<30))
-               DRM_INFO("Pages will be wasted unless GTT size (%llu) is divisible by 1GB\n", size);
+       for (i = 0; i < max_pdp; i++) {
+               pt_pages[i] = __gen8_alloc_page_tables();
+               if (IS_ERR(pt_pages[i])) {
+                       ret = PTR_ERR(pt_pages[i]);
+                       goto unwind_out;
+               }
+       }
 
-       /* FIXME: split allocation into smaller pieces. For now we only ever do
-        * this once, but with full PPGTT, the multiple contiguous allocations
-        * will be bad.
+       /* NB: Avoid touching gen8_pt_pages until last to keep the allocation,
+        * "atomic" - for cleanup purposes.
         */
+       for (i = 0; i < max_pdp; i++)
+               ppgtt->gen8_pt_pages[i] = pt_pages[i];
+
+       return 0;
+
+unwind_out:
+       while (i--) {
+               gen8_free_page_tables(pt_pages[i]);
+               kfree(pt_pages[i]);
+       }
+
+       return ret;
+}
+
+static int gen8_ppgtt_allocate_dma(struct i915_hw_ppgtt *ppgtt)
+{
+       int i;
+
+       for (i = 0; i < ppgtt->num_pd_pages; i++) {
+               ppgtt->gen8_pt_dma_addr[i] = kcalloc(GEN8_PDES_PER_PAGE,
+                                                    sizeof(dma_addr_t),
+                                                    GFP_KERNEL);
+               if (!ppgtt->gen8_pt_dma_addr[i])
+                       return -ENOMEM;
+       }
+
+       return 0;
+}
+
+static int gen8_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt,
+                                               const int max_pdp)
+{
        ppgtt->pd_pages = alloc_pages(GFP_KERNEL, get_order(max_pdp << PAGE_SHIFT));
        if (!ppgtt->pd_pages)
                return -ENOMEM;
 
-       pt_pages = alloc_pages(GFP_KERNEL, get_order(num_pt_pages << PAGE_SHIFT));
-       if (!pt_pages) {
+       ppgtt->num_pd_pages = 1 << get_order(max_pdp << PAGE_SHIFT);
+       BUG_ON(ppgtt->num_pd_pages > GEN8_LEGACY_PDPS);
+
+       return 0;
+}
+
+static int gen8_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt,
+                           const int max_pdp)
+{
+       int ret;
+
+       ret = gen8_ppgtt_allocate_page_directories(ppgtt, max_pdp);
+       if (ret)
+               return ret;
+
+       ret = gen8_ppgtt_allocate_page_tables(ppgtt, max_pdp);
+       if (ret) {
                __free_pages(ppgtt->pd_pages, get_order(max_pdp << PAGE_SHIFT));
-               return -ENOMEM;
+               return ret;
        }
 
-       ppgtt->gen8_pt_pages = pt_pages;
-       ppgtt->num_pd_pages = 1 << get_order(max_pdp << PAGE_SHIFT);
-       ppgtt->num_pt_pages = 1 << get_order(num_pt_pages << PAGE_SHIFT);
        ppgtt->num_pd_entries = max_pdp * GEN8_PDES_PER_PAGE;
-       ppgtt->enable = gen8_ppgtt_enable;
-       ppgtt->base.clear_range = gen8_ppgtt_clear_range;
-       ppgtt->base.insert_entries = gen8_ppgtt_insert_entries;
-       ppgtt->base.cleanup = gen8_ppgtt_cleanup;
-       ppgtt->base.start = 0;
-       ppgtt->base.total = ppgtt->num_pt_pages * GEN8_PTES_PER_PAGE * PAGE_SIZE;
 
-       BUG_ON(ppgtt->num_pd_pages > GEN8_LEGACY_PDPS);
+       ret = gen8_ppgtt_allocate_dma(ppgtt);
+       if (ret)
+               gen8_ppgtt_free(ppgtt);
 
-       /*
-        * - Create a mapping for the page directories.
-        * - For each page directory:
-        *      allocate space for page table mappings.
-        *      map each page table
-        */
-       for (i = 0; i < max_pdp; i++) {
-               dma_addr_t temp;
-               temp = pci_map_page(ppgtt->base.dev->pdev,
-                                   &ppgtt->pd_pages[i], 0,
-                                   PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
-               if (pci_dma_mapping_error(ppgtt->base.dev->pdev, temp))
-                       goto err_out;
+       return ret;
+}
 
-               ppgtt->pd_dma_addr[i] = temp;
+static int gen8_ppgtt_setup_page_directories(struct i915_hw_ppgtt *ppgtt,
+                                            const int pd)
+{
+       dma_addr_t pd_addr;
+       int ret;
 
-               ppgtt->gen8_pt_dma_addr[i] = kmalloc(sizeof(dma_addr_t) * GEN8_PDES_PER_PAGE, GFP_KERNEL);
-               if (!ppgtt->gen8_pt_dma_addr[i])
-                       goto err_out;
+       pd_addr = pci_map_page(ppgtt->base.dev->pdev,
+                              &ppgtt->pd_pages[pd], 0,
+                              PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
 
-               for (j = 0; j < GEN8_PDES_PER_PAGE; j++) {
-                       struct page *p = &pt_pages[i * GEN8_PDES_PER_PAGE + j];
-                       temp = pci_map_page(ppgtt->base.dev->pdev,
-                                           p, 0, PAGE_SIZE,
-                                           PCI_DMA_BIDIRECTIONAL);
+       ret = pci_dma_mapping_error(ppgtt->base.dev->pdev, pd_addr);
+       if (ret)
+               return ret;
+
+       ppgtt->pd_dma_addr[pd] = pd_addr;
 
-                       if (pci_dma_mapping_error(ppgtt->base.dev->pdev, temp))
-                               goto err_out;
+       return 0;
+}
 
-                       ppgtt->gen8_pt_dma_addr[i][j] = temp;
+static int gen8_ppgtt_setup_page_tables(struct i915_hw_ppgtt *ppgtt,
+                                       const int pd,
+                                       const int pt)
+{
+       dma_addr_t pt_addr;
+       struct page *p;
+       int ret;
+
+       p = ppgtt->gen8_pt_pages[pd][pt];
+       pt_addr = pci_map_page(ppgtt->base.dev->pdev,
+                              p, 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+       ret = pci_dma_mapping_error(ppgtt->base.dev->pdev, pt_addr);
+       if (ret)
+               return ret;
+
+       ppgtt->gen8_pt_dma_addr[pd][pt] = pt_addr;
+
+       return 0;
+}
+
+/**
+ * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers
+ * with a net effect resembling a 2-level page table in normal x86 terms. Each
+ * PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address
+ * space.
+ *
+ * FIXME: split allocation into smaller pieces. For now we only ever do this
+ * once, but with full PPGTT, the multiple contiguous allocations will be bad.
+ * TODO: Do something with the size parameter
+ */
+static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
+{
+       const int max_pdp = DIV_ROUND_UP(size, 1 << 30);
+       const int min_pt_pages = GEN8_PDES_PER_PAGE * max_pdp;
+       int i, j, ret;
+
+       if (size % (1<<30))
+               DRM_INFO("Pages will be wasted unless GTT size (%llu) is divisible by 1GB\n", size);
+
+       /* 1. Do all our allocations for page directories and page tables. */
+       ret = gen8_ppgtt_alloc(ppgtt, max_pdp);
+       if (ret)
+               return ret;
+
+       /*
+        * 2. Create DMA mappings for the page directories and page tables.
+        */
+       for (i = 0; i < max_pdp; i++) {
+               ret = gen8_ppgtt_setup_page_directories(ppgtt, i);
+               if (ret)
+                       goto bail;
+
+               for (j = 0; j < GEN8_PDES_PER_PAGE; j++) {
+                       ret = gen8_ppgtt_setup_page_tables(ppgtt, i, j);
+                       if (ret)
+                               goto bail;
                }
        }
 
-       /* For now, the PPGTT helper functions all require that the PDEs are
+       /*
+        * 3. Map all the page directory entires to point to the page tables
+        * we've allocated.
+        *
+        * For now, the PPGTT helper functions all require that the PDEs are
         * plugged in correctly. So we do that now/here. For aliasing PPGTT, we
-        * will never need to touch the PDEs again */
+        * will never need to touch the PDEs again.
+        */
        for (i = 0; i < max_pdp; i++) {
                gen8_ppgtt_pde_t *pd_vaddr;
                pd_vaddr = kmap_atomic(&ppgtt->pd_pages[i]);
@@ -441,23 +632,85 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
                kunmap_atomic(pd_vaddr);
        }
 
-       ppgtt->base.clear_range(&ppgtt->base, 0,
-                               ppgtt->num_pd_entries * GEN8_PTES_PER_PAGE,
-                               true);
+       ppgtt->enable = gen8_ppgtt_enable;
+       ppgtt->switch_mm = gen8_mm_switch;
+       ppgtt->base.clear_range = gen8_ppgtt_clear_range;
+       ppgtt->base.insert_entries = gen8_ppgtt_insert_entries;
+       ppgtt->base.cleanup = gen8_ppgtt_cleanup;
+       ppgtt->base.start = 0;
+       ppgtt->base.total = ppgtt->num_pd_entries * GEN8_PTES_PER_PAGE * PAGE_SIZE;
+
+       ppgtt->base.clear_range(&ppgtt->base, 0, ppgtt->base.total, true);
 
        DRM_DEBUG_DRIVER("Allocated %d pages for page directories (%d wasted)\n",
                         ppgtt->num_pd_pages, ppgtt->num_pd_pages - max_pdp);
        DRM_DEBUG_DRIVER("Allocated %d pages for page tables (%lld wasted)\n",
-                        ppgtt->num_pt_pages,
-                        (ppgtt->num_pt_pages - num_pt_pages) +
-                        size % (1<<30));
+                        ppgtt->num_pd_entries,
+                        (ppgtt->num_pd_entries - min_pt_pages) + size % (1<<30));
        return 0;
 
-err_out:
-       ppgtt->base.cleanup(&ppgtt->base);
+bail:
+       gen8_ppgtt_unmap_pages(ppgtt);
+       gen8_ppgtt_free(ppgtt);
        return ret;
 }
 
+static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
+{
+       struct drm_i915_private *dev_priv = ppgtt->base.dev->dev_private;
+       struct i915_address_space *vm = &ppgtt->base;
+       gen6_gtt_pte_t __iomem *pd_addr;
+       gen6_gtt_pte_t scratch_pte;
+       uint32_t pd_entry;
+       int pte, pde;
+
+       scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true);
+
+       pd_addr = (gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm +
+               ppgtt->pd_offset / sizeof(gen6_gtt_pte_t);
+
+       seq_printf(m, "  VM %p (pd_offset %x-%x):\n", vm,
+                  ppgtt->pd_offset, ppgtt->pd_offset + ppgtt->num_pd_entries);
+       for (pde = 0; pde < ppgtt->num_pd_entries; pde++) {
+               u32 expected;
+               gen6_gtt_pte_t *pt_vaddr;
+               dma_addr_t pt_addr = ppgtt->pt_dma_addr[pde];
+               pd_entry = readl(pd_addr + pde);
+               expected = (GEN6_PDE_ADDR_ENCODE(pt_addr) | GEN6_PDE_VALID);
+
+               if (pd_entry != expected)
+                       seq_printf(m, "\tPDE #%d mismatch: Actual PDE: %x Expected PDE: %x\n",
+                                  pde,
+                                  pd_entry,
+                                  expected);
+               seq_printf(m, "\tPDE: %x\n", pd_entry);
+
+               pt_vaddr = kmap_atomic(ppgtt->pt_pages[pde]);
+               for (pte = 0; pte < I915_PPGTT_PT_ENTRIES; pte+=4) {
+                       unsigned long va =
+                               (pde * PAGE_SIZE * I915_PPGTT_PT_ENTRIES) +
+                               (pte * PAGE_SIZE);
+                       int i;
+                       bool found = false;
+                       for (i = 0; i < 4; i++)
+                               if (pt_vaddr[pte + i] != scratch_pte)
+                                       found = true;
+                       if (!found)
+                               continue;
+
+                       seq_printf(m, "\t\t0x%lx [%03d,%04d]: =", va, pde, pte);
+                       for (i = 0; i < 4; i++) {
+                               if (pt_vaddr[pte + i] != scratch_pte)
+                                       seq_printf(m, " %08x", pt_vaddr[pte + i]);
+                               else
+                                       seq_puts(m, "  SCRATCH ");
+                       }
+                       seq_puts(m, "\n");
+               }
+               kunmap_atomic(pt_vaddr);
+       }
+}
+
 static void gen6_write_pdes(struct i915_hw_ppgtt *ppgtt)
 {
        struct drm_i915_private *dev_priv = ppgtt->base.dev->dev_private;
@@ -480,73 +733,235 @@ static void gen6_write_pdes(struct i915_hw_ppgtt *ppgtt)
        readl(pd_addr);
 }
 
-static int gen6_ppgtt_enable(struct drm_device *dev)
+static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt)
 {
-       drm_i915_private_t *dev_priv = dev->dev_private;
-       uint32_t pd_offset;
+       BUG_ON(ppgtt->pd_offset & 0x3f);
+
+       return (ppgtt->pd_offset / 64) << 16;
+}
+
+static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
+                        struct intel_ring_buffer *ring,
+                        bool synchronous)
+{
+       struct drm_device *dev = ppgtt->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int ret;
+
+       /* If we're in reset, we can assume the GPU is sufficiently idle to
+        * manually frob these bits. Ideally we could use the ring functions,
+        * except our error handling makes it quite difficult (can't use
+        * intel_ring_begin, ring->flush, or intel_ring_advance)
+        *
+        * FIXME: We should try not to special case reset
+        */
+       if (synchronous ||
+           i915_reset_in_progress(&dev_priv->gpu_error)) {
+               WARN_ON(ppgtt != dev_priv->mm.aliasing_ppgtt);
+               I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
+               I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt));
+               POSTING_READ(RING_PP_DIR_BASE(ring));
+               return 0;
+       }
+
+       /* NB: TLBs must be flushed and invalidated before a switch */
+       ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+       if (ret)
+               return ret;
+
+       ret = intel_ring_begin(ring, 6);
+       if (ret)
+               return ret;
+
+       intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
+       intel_ring_emit(ring, RING_PP_DIR_DCLV(ring));
+       intel_ring_emit(ring, PP_DIR_DCLV_2G);
+       intel_ring_emit(ring, RING_PP_DIR_BASE(ring));
+       intel_ring_emit(ring, get_pd_offset(ppgtt));
+       intel_ring_emit(ring, MI_NOOP);
+       intel_ring_advance(ring);
+
+       return 0;
+}
+
+static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
+                         struct intel_ring_buffer *ring,
+                         bool synchronous)
+{
+       struct drm_device *dev = ppgtt->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int ret;
+
+       /* If we're in reset, we can assume the GPU is sufficiently idle to
+        * manually frob these bits. Ideally we could use the ring functions,
+        * except our error handling makes it quite difficult (can't use
+        * intel_ring_begin, ring->flush, or intel_ring_advance)
+        *
+        * FIXME: We should try not to special case reset
+        */
+       if (synchronous ||
+           i915_reset_in_progress(&dev_priv->gpu_error)) {
+               WARN_ON(ppgtt != dev_priv->mm.aliasing_ppgtt);
+               I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
+               I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt));
+               POSTING_READ(RING_PP_DIR_BASE(ring));
+               return 0;
+       }
+
+       /* NB: TLBs must be flushed and invalidated before a switch */
+       ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+       if (ret)
+               return ret;
+
+       ret = intel_ring_begin(ring, 6);
+       if (ret)
+               return ret;
+
+       intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
+       intel_ring_emit(ring, RING_PP_DIR_DCLV(ring));
+       intel_ring_emit(ring, PP_DIR_DCLV_2G);
+       intel_ring_emit(ring, RING_PP_DIR_BASE(ring));
+       intel_ring_emit(ring, get_pd_offset(ppgtt));
+       intel_ring_emit(ring, MI_NOOP);
+       intel_ring_advance(ring);
+
+       /* XXX: RCS is the only one to auto invalidate the TLBs? */
+       if (ring->id != RCS) {
+               ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+
+static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
+                         struct intel_ring_buffer *ring,
+                         bool synchronous)
+{
+       struct drm_device *dev = ppgtt->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       if (!synchronous)
+               return 0;
+
+       I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
+       I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt));
+
+       POSTING_READ(RING_PP_DIR_DCLV(ring));
+
+       return 0;
+}
+
+static int gen8_ppgtt_enable(struct i915_hw_ppgtt *ppgtt)
+{
+       struct drm_device *dev = ppgtt->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_ring_buffer *ring;
-       struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
-       int i;
+       int j, ret;
 
-       BUG_ON(ppgtt->pd_offset & 0x3f);
+       for_each_ring(ring, dev_priv, j) {
+               I915_WRITE(RING_MODE_GEN7(ring),
+                          _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
 
-       gen6_write_pdes(ppgtt);
+               /* We promise to do a switch later with FULL PPGTT. If this is
+                * aliasing, this is the one and only switch we'll do */
+               if (USES_FULL_PPGTT(dev))
+                       continue;
 
-       pd_offset = ppgtt->pd_offset;
-       pd_offset /= 64; /* in cachelines, */
-       pd_offset <<= 16;
+               ret = ppgtt->switch_mm(ppgtt, ring, true);
+               if (ret)
+                       goto err_out;
+       }
 
-       if (INTEL_INFO(dev)->gen == 6) {
-               uint32_t ecochk, gab_ctl, ecobits;
+       return 0;
 
-               ecobits = I915_READ(GAC_ECO_BITS);
-               I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT |
-                                        ECOBITS_PPGTT_CACHE64B);
+err_out:
+       for_each_ring(ring, dev_priv, j)
+               I915_WRITE(RING_MODE_GEN7(ring),
+                          _MASKED_BIT_DISABLE(GFX_PPGTT_ENABLE));
+       return ret;
+}
 
-               gab_ctl = I915_READ(GAB_CTL);
-               I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
+static int gen7_ppgtt_enable(struct i915_hw_ppgtt *ppgtt)
+{
+       struct drm_device *dev = ppgtt->base.dev;
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       struct intel_ring_buffer *ring;
+       uint32_t ecochk, ecobits;
+       int i;
 
-               ecochk = I915_READ(GAM_ECOCHK);
-               I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT |
-                                      ECOCHK_PPGTT_CACHE64B);
-               I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
-       } else if (INTEL_INFO(dev)->gen >= 7) {
-               uint32_t ecochk, ecobits;
+       ecobits = I915_READ(GAC_ECO_BITS);
+       I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
 
-               ecobits = I915_READ(GAC_ECO_BITS);
-               I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
+       ecochk = I915_READ(GAM_ECOCHK);
+       if (IS_HASWELL(dev)) {
+               ecochk |= ECOCHK_PPGTT_WB_HSW;
+       } else {
+               ecochk |= ECOCHK_PPGTT_LLC_IVB;
+               ecochk &= ~ECOCHK_PPGTT_GFDT_IVB;
+       }
+       I915_WRITE(GAM_ECOCHK, ecochk);
 
-               ecochk = I915_READ(GAM_ECOCHK);
-               if (IS_HASWELL(dev)) {
-                       ecochk |= ECOCHK_PPGTT_WB_HSW;
-               } else {
-                       ecochk |= ECOCHK_PPGTT_LLC_IVB;
-                       ecochk &= ~ECOCHK_PPGTT_GFDT_IVB;
-               }
-               I915_WRITE(GAM_ECOCHK, ecochk);
+       for_each_ring(ring, dev_priv, i) {
+               int ret;
                /* GFX_MODE is per-ring on gen7+ */
+               I915_WRITE(RING_MODE_GEN7(ring),
+                          _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
+
+               /* We promise to do a switch later with FULL PPGTT. If this is
+                * aliasing, this is the one and only switch we'll do */
+               if (USES_FULL_PPGTT(dev))
+                       continue;
+
+               ret = ppgtt->switch_mm(ppgtt, ring, true);
+               if (ret)
+                       return ret;
        }
 
-       for_each_ring(ring, dev_priv, i) {
-               if (INTEL_INFO(dev)->gen >= 7)
-                       I915_WRITE(RING_MODE_GEN7(ring),
-                                  _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
+       return 0;
+}
 
-               I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
-               I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset);
+static int gen6_ppgtt_enable(struct i915_hw_ppgtt *ppgtt)
+{
+       struct drm_device *dev = ppgtt->base.dev;
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       struct intel_ring_buffer *ring;
+       uint32_t ecochk, gab_ctl, ecobits;
+       int i;
+
+       ecobits = I915_READ(GAC_ECO_BITS);
+       I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT |
+                  ECOBITS_PPGTT_CACHE64B);
+
+       gab_ctl = I915_READ(GAB_CTL);
+       I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
+
+       ecochk = I915_READ(GAM_ECOCHK);
+       I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B);
+
+       I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
+
+       for_each_ring(ring, dev_priv, i) {
+               int ret = ppgtt->switch_mm(ppgtt, ring, true);
+               if (ret)
+                       return ret;
        }
+
        return 0;
 }
 
 /* PPGTT support for Sandybdrige/Gen6 and later */
 static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
-                                  unsigned first_entry,
-                                  unsigned num_entries,
+                                  uint64_t start,
+                                  uint64_t length,
                                   bool use_scratch)
 {
        struct i915_hw_ppgtt *ppgtt =
                container_of(vm, struct i915_hw_ppgtt, base);
        gen6_gtt_pte_t *pt_vaddr, scratch_pte;
+       unsigned first_entry = start >> PAGE_SHIFT;
+       unsigned num_entries = length >> PAGE_SHIFT;
        unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
        unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
        unsigned last_pte, i;
@@ -573,12 +988,13 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
 
 static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
                                      struct sg_table *pages,
-                                     unsigned first_entry,
+                                     uint64_t start,
                                      enum i915_cache_level cache_level)
 {
        struct i915_hw_ppgtt *ppgtt =
                container_of(vm, struct i915_hw_ppgtt, base);
        gen6_gtt_pte_t *pt_vaddr;
+       unsigned first_entry = start >> PAGE_SHIFT;
        unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
        unsigned act_pte = first_entry % I915_PPGTT_PT_ENTRIES;
        struct sg_page_iter sg_iter;
@@ -602,65 +1018,129 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
                kunmap_atomic(pt_vaddr);
 }
 
-static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
+static void gen6_ppgtt_unmap_pages(struct i915_hw_ppgtt *ppgtt)
 {
-       struct i915_hw_ppgtt *ppgtt =
-               container_of(vm, struct i915_hw_ppgtt, base);
        int i;
 
-       drm_mm_takedown(&ppgtt->base.mm);
-
        if (ppgtt->pt_dma_addr) {
                for (i = 0; i < ppgtt->num_pd_entries; i++)
                        pci_unmap_page(ppgtt->base.dev->pdev,
                                       ppgtt->pt_dma_addr[i],
                                       4096, PCI_DMA_BIDIRECTIONAL);
        }
+}
+
+static void gen6_ppgtt_free(struct i915_hw_ppgtt *ppgtt)
+{
+       int i;
 
        kfree(ppgtt->pt_dma_addr);
        for (i = 0; i < ppgtt->num_pd_entries; i++)
                __free_page(ppgtt->pt_pages[i]);
        kfree(ppgtt->pt_pages);
-       kfree(ppgtt);
 }
 
-static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
+static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
 {
+       struct i915_hw_ppgtt *ppgtt =
+               container_of(vm, struct i915_hw_ppgtt, base);
+
+       list_del(&vm->global_link);
+       drm_mm_takedown(&ppgtt->base.mm);
+       drm_mm_remove_node(&ppgtt->node);
+
+       gen6_ppgtt_unmap_pages(ppgtt);
+       gen6_ppgtt_free(ppgtt);
+}
+
+static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
+{
+#define GEN6_PD_ALIGN (PAGE_SIZE * 16)
+#define GEN6_PD_SIZE (GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE)
        struct drm_device *dev = ppgtt->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       unsigned first_pd_entry_in_global_pt;
-       int i;
-       int ret = -ENOMEM;
+       bool retried = false;
+       int ret;
 
-       /* ppgtt PDEs reside in the global gtt pagetable, which has 512*1024
-        * entries. For aliasing ppgtt support we just steal them at the end for
-        * now. */
-       first_pd_entry_in_global_pt = gtt_total_entries(dev_priv->gtt);
+       /* PPGTT PDEs reside in the GGTT and consists of 512 entries. The
+        * allocator works in address space sizes, so it's multiplied by page
+        * size. We allocate at the top of the GTT to avoid fragmentation.
+        */
+       BUG_ON(!drm_mm_initialized(&dev_priv->gtt.base.mm));
+alloc:
+       ret = drm_mm_insert_node_in_range_generic(&dev_priv->gtt.base.mm,
+                                                 &ppgtt->node, GEN6_PD_SIZE,
+                                                 GEN6_PD_ALIGN, 0,
+                                                 0, dev_priv->gtt.base.total,
+                                                 DRM_MM_SEARCH_DEFAULT);
+       if (ret == -ENOSPC && !retried) {
+               ret = i915_gem_evict_something(dev, &dev_priv->gtt.base,
+                                              GEN6_PD_SIZE, GEN6_PD_ALIGN,
+                                              I915_CACHE_NONE, 0);
+               if (ret)
+                       return ret;
+
+               retried = true;
+               goto alloc;
+       }
+
+       if (ppgtt->node.start < dev_priv->gtt.mappable_end)
+               DRM_DEBUG("Forced to use aperture for PDEs\n");
 
-       ppgtt->base.pte_encode = dev_priv->gtt.base.pte_encode;
        ppgtt->num_pd_entries = GEN6_PPGTT_PD_ENTRIES;
-       ppgtt->enable = gen6_ppgtt_enable;
-       ppgtt->base.clear_range = gen6_ppgtt_clear_range;
-       ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
-       ppgtt->base.cleanup = gen6_ppgtt_cleanup;
-       ppgtt->base.scratch = dev_priv->gtt.base.scratch;
-       ppgtt->base.start = 0;
-       ppgtt->base.total = GEN6_PPGTT_PD_ENTRIES * I915_PPGTT_PT_ENTRIES * PAGE_SIZE;
+       return ret;
+}
+
+static int gen6_ppgtt_allocate_page_tables(struct i915_hw_ppgtt *ppgtt)
+{
+       int i;
+
        ppgtt->pt_pages = kcalloc(ppgtt->num_pd_entries, sizeof(struct page *),
                                  GFP_KERNEL);
+
        if (!ppgtt->pt_pages)
                return -ENOMEM;
 
        for (i = 0; i < ppgtt->num_pd_entries; i++) {
                ppgtt->pt_pages[i] = alloc_page(GFP_KERNEL);
-               if (!ppgtt->pt_pages[i])
-                       goto err_pt_alloc;
+               if (!ppgtt->pt_pages[i]) {
+                       gen6_ppgtt_free(ppgtt);
+                       return -ENOMEM;
+               }
+       }
+
+       return 0;
+}
+
+static int gen6_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt)
+{
+       int ret;
+
+       ret = gen6_ppgtt_allocate_page_directories(ppgtt);
+       if (ret)
+               return ret;
+
+       ret = gen6_ppgtt_allocate_page_tables(ppgtt);
+       if (ret) {
+               drm_mm_remove_node(&ppgtt->node);
+               return ret;
        }
 
        ppgtt->pt_dma_addr = kcalloc(ppgtt->num_pd_entries, sizeof(dma_addr_t),
                                     GFP_KERNEL);
-       if (!ppgtt->pt_dma_addr)
-               goto err_pt_alloc;
+       if (!ppgtt->pt_dma_addr) {
+               drm_mm_remove_node(&ppgtt->node);
+               gen6_ppgtt_free(ppgtt);
+               return -ENOMEM;
+       }
+
+       return 0;
+}
+
+static int gen6_ppgtt_setup_page_tables(struct i915_hw_ppgtt *ppgtt)
+{
+       struct drm_device *dev = ppgtt->base.dev;
+       int i;
 
        for (i = 0; i < ppgtt->num_pd_entries; i++) {
                dma_addr_t pt_addr;
@@ -669,46 +1149,69 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
                                       PCI_DMA_BIDIRECTIONAL);
 
                if (pci_dma_mapping_error(dev->pdev, pt_addr)) {
-                       ret = -EIO;
-                       goto err_pd_pin;
-
+                       gen6_ppgtt_unmap_pages(ppgtt);
+                       return -EIO;
                }
+
                ppgtt->pt_dma_addr[i] = pt_addr;
        }
 
-       ppgtt->base.clear_range(&ppgtt->base, 0,
-                               ppgtt->num_pd_entries * I915_PPGTT_PT_ENTRIES, true);
+       return 0;
+}
 
-       ppgtt->pd_offset = first_pd_entry_in_global_pt * sizeof(gen6_gtt_pte_t);
+static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
+{
+       struct drm_device *dev = ppgtt->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int ret;
 
-       return 0;
+       ppgtt->base.pte_encode = dev_priv->gtt.base.pte_encode;
+       if (IS_GEN6(dev)) {
+               ppgtt->enable = gen6_ppgtt_enable;
+               ppgtt->switch_mm = gen6_mm_switch;
+       } else if (IS_HASWELL(dev)) {
+               ppgtt->enable = gen7_ppgtt_enable;
+               ppgtt->switch_mm = hsw_mm_switch;
+       } else if (IS_GEN7(dev)) {
+               ppgtt->enable = gen7_ppgtt_enable;
+               ppgtt->switch_mm = gen7_mm_switch;
+       } else
+               BUG();
 
-err_pd_pin:
-       if (ppgtt->pt_dma_addr) {
-               for (i--; i >= 0; i--)
-                       pci_unmap_page(dev->pdev, ppgtt->pt_dma_addr[i],
-                                      4096, PCI_DMA_BIDIRECTIONAL);
-       }
-err_pt_alloc:
-       kfree(ppgtt->pt_dma_addr);
-       for (i = 0; i < ppgtt->num_pd_entries; i++) {
-               if (ppgtt->pt_pages[i])
-                       __free_page(ppgtt->pt_pages[i]);
+       ret = gen6_ppgtt_alloc(ppgtt);
+       if (ret)
+               return ret;
+
+       ret = gen6_ppgtt_setup_page_tables(ppgtt);
+       if (ret) {
+               gen6_ppgtt_free(ppgtt);
+               return ret;
        }
-       kfree(ppgtt->pt_pages);
 
-       return ret;
+       ppgtt->base.clear_range = gen6_ppgtt_clear_range;
+       ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
+       ppgtt->base.cleanup = gen6_ppgtt_cleanup;
+       ppgtt->base.scratch = dev_priv->gtt.base.scratch;
+       ppgtt->base.start = 0;
+       ppgtt->base.total = GEN6_PPGTT_PD_ENTRIES * I915_PPGTT_PT_ENTRIES * PAGE_SIZE;
+       ppgtt->debug_dump = gen6_dump_ppgtt;
+
+       ppgtt->pd_offset =
+               ppgtt->node.start / PAGE_SIZE * sizeof(gen6_gtt_pte_t);
+
+       ppgtt->base.clear_range(&ppgtt->base, 0, ppgtt->base.total, true);
+
+       DRM_DEBUG_DRIVER("Allocated pde space (%ldM) at GTT entry: %lx\n",
+                        ppgtt->node.size >> 20,
+                        ppgtt->node.start / PAGE_SIZE);
+
+       return 0;
 }
 
-static int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
+int i915_gem_init_ppgtt(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct i915_hw_ppgtt *ppgtt;
-       int ret;
-
-       ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
-       if (!ppgtt)
-               return -ENOMEM;
+       int ret = 0;
 
        ppgtt->base.dev = dev;
 
@@ -719,45 +1222,39 @@ static int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
        else
                BUG();
 
-       if (ret)
-               kfree(ppgtt);
-       else {
-               dev_priv->mm.aliasing_ppgtt = ppgtt;
+       if (!ret) {
+               struct drm_i915_private *dev_priv = dev->dev_private;
+               kref_init(&ppgtt->ref);
                drm_mm_init(&ppgtt->base.mm, ppgtt->base.start,
                            ppgtt->base.total);
+               i915_init_vm(dev_priv, &ppgtt->base);
+               if (INTEL_INFO(dev)->gen < 8) {
+                       gen6_write_pdes(ppgtt);
+                       DRM_DEBUG("Adding PPGTT at offset %x\n",
+                                 ppgtt->pd_offset << 10);
+               }
        }
 
        return ret;
 }
 
-void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
+static void
+ppgtt_bind_vma(struct i915_vma *vma,
+              enum i915_cache_level cache_level,
+              u32 flags)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
-
-       if (!ppgtt)
-               return;
+       WARN_ON(flags);
 
-       ppgtt->base.cleanup(&ppgtt->base);
-       dev_priv->mm.aliasing_ppgtt = NULL;
+       vma->vm->insert_entries(vma->vm, vma->obj->pages, vma->node.start,
+                               cache_level);
 }
 
-void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
-                           struct drm_i915_gem_object *obj,
-                           enum i915_cache_level cache_level)
+static void ppgtt_unbind_vma(struct i915_vma *vma)
 {
-       ppgtt->base.insert_entries(&ppgtt->base, obj->pages,
-                                  i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT,
-                                  cache_level);
-}
-
-void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
-                             struct drm_i915_gem_object *obj)
-{
-       ppgtt->base.clear_range(&ppgtt->base,
-                               i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT,
-                               obj->base.size >> PAGE_SHIFT,
-                               true);
+       vma->vm->clear_range(vma->vm,
+                            vma->node.start,
+                            vma->obj->base.size,
+                            true);
 }
 
 extern int intel_iommu_gfx_mapped;
@@ -840,8 +1337,8 @@ void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
        i915_check_and_clear_faults(dev);
 
        dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
-                                      dev_priv->gtt.base.start / PAGE_SIZE,
-                                      dev_priv->gtt.base.total / PAGE_SIZE,
+                                      dev_priv->gtt.base.start,
+                                      dev_priv->gtt.base.total,
                                       false);
 }
 
@@ -849,18 +1346,44 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_object *obj;
+       struct i915_address_space *vm;
 
        i915_check_and_clear_faults(dev);
 
        /* First fill our portion of the GTT with scratch pages */
        dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
-                                      dev_priv->gtt.base.start / PAGE_SIZE,
-                                      dev_priv->gtt.base.total / PAGE_SIZE,
+                                      dev_priv->gtt.base.start,
+                                      dev_priv->gtt.base.total,
                                       true);
 
        list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+               struct i915_vma *vma = i915_gem_obj_to_vma(obj,
+                                                          &dev_priv->gtt.base);
+               if (!vma)
+                       continue;
+
                i915_gem_clflush_object(obj, obj->pin_display);
-               i915_gem_gtt_bind_object(obj, obj->cache_level);
+               /* The bind_vma code tries to be smart about tracking mappings.
+                * Unfortunately above, we've just wiped out the mappings
+                * without telling our object about it. So we need to fake it.
+                */
+               obj->has_global_gtt_mapping = 0;
+               vma->bind_vma(vma, obj->cache_level, GLOBAL_BIND);
+       }
+
+
+       if (INTEL_INFO(dev)->gen >= 8)
+               return;
+
+       list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
+               /* TODO: Perhaps it shouldn't be gen6 specific */
+               if (i915_is_ggtt(vm)) {
+                       if (dev_priv->mm.aliasing_ppgtt)
+                               gen6_write_pdes(dev_priv->mm.aliasing_ppgtt);
+                       continue;
+               }
+
+               gen6_write_pdes(container_of(vm, struct i915_hw_ppgtt, base));
        }
 
        i915_gem_chipset_flush(dev);
@@ -891,10 +1414,11 @@ static inline void gen8_set_pte(void __iomem *addr, gen8_gtt_pte_t pte)
 
 static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
                                     struct sg_table *st,
-                                    unsigned int first_entry,
+                                    uint64_t start,
                                     enum i915_cache_level level)
 {
        struct drm_i915_private *dev_priv = vm->dev->dev_private;
+       unsigned first_entry = start >> PAGE_SHIFT;
        gen8_gtt_pte_t __iomem *gtt_entries =
                (gen8_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
        int i = 0;
@@ -936,10 +1460,11 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
  */
 static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
                                     struct sg_table *st,
-                                    unsigned int first_entry,
+                                    uint64_t start,
                                     enum i915_cache_level level)
 {
        struct drm_i915_private *dev_priv = vm->dev->dev_private;
+       unsigned first_entry = start >> PAGE_SHIFT;
        gen6_gtt_pte_t __iomem *gtt_entries =
                (gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
        int i = 0;
@@ -971,11 +1496,13 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
 }
 
 static void gen8_ggtt_clear_range(struct i915_address_space *vm,
-                                 unsigned int first_entry,
-                                 unsigned int num_entries,
+                                 uint64_t start,
+                                 uint64_t length,
                                  bool use_scratch)
 {
        struct drm_i915_private *dev_priv = vm->dev->dev_private;
+       unsigned first_entry = start >> PAGE_SHIFT;
+       unsigned num_entries = length >> PAGE_SHIFT;
        gen8_gtt_pte_t scratch_pte, __iomem *gtt_base =
                (gen8_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
        const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
@@ -995,11 +1522,13 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm,
 }
 
 static void gen6_ggtt_clear_range(struct i915_address_space *vm,
-                                 unsigned int first_entry,
-                                 unsigned int num_entries,
+                                 uint64_t start,
+                                 uint64_t length,
                                  bool use_scratch)
 {
        struct drm_i915_private *dev_priv = vm->dev->dev_private;
+       unsigned first_entry = start >> PAGE_SHIFT;
+       unsigned num_entries = length >> PAGE_SHIFT;
        gen6_gtt_pte_t scratch_pte, __iomem *gtt_base =
                (gen6_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
        const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
@@ -1017,53 +1546,103 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
        readl(gtt_base);
 }
 
-static void i915_ggtt_insert_entries(struct i915_address_space *vm,
-                                    struct sg_table *st,
-                                    unsigned int pg_start,
-                                    enum i915_cache_level cache_level)
+
+static void i915_ggtt_bind_vma(struct i915_vma *vma,
+                              enum i915_cache_level cache_level,
+                              u32 unused)
 {
+       const unsigned long entry = vma->node.start >> PAGE_SHIFT;
        unsigned int flags = (cache_level == I915_CACHE_NONE) ?
                AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
 
-       intel_gtt_insert_sg_entries(st, pg_start, flags);
-
+       BUG_ON(!i915_is_ggtt(vma->vm));
+       intel_gtt_insert_sg_entries(vma->obj->pages, entry, flags);
+       vma->obj->has_global_gtt_mapping = 1;
 }
 
 static void i915_ggtt_clear_range(struct i915_address_space *vm,
-                                 unsigned int first_entry,
-                                 unsigned int num_entries,
+                                 uint64_t start,
+                                 uint64_t length,
                                  bool unused)
 {
+       unsigned first_entry = start >> PAGE_SHIFT;
+       unsigned num_entries = length >> PAGE_SHIFT;
        intel_gtt_clear_range(first_entry, num_entries);
 }
 
+static void i915_ggtt_unbind_vma(struct i915_vma *vma)
+{
+       const unsigned int first = vma->node.start >> PAGE_SHIFT;
+       const unsigned int size = vma->obj->base.size >> PAGE_SHIFT;
+
+       BUG_ON(!i915_is_ggtt(vma->vm));
+       vma->obj->has_global_gtt_mapping = 0;
+       intel_gtt_clear_range(first, size);
+}
 
-void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
-                             enum i915_cache_level cache_level)
+static void ggtt_bind_vma(struct i915_vma *vma,
+                         enum i915_cache_level cache_level,
+                         u32 flags)
 {
-       struct drm_device *dev = obj->base.dev;
+       struct drm_device *dev = vma->vm->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       const unsigned long entry = i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT;
+       struct drm_i915_gem_object *obj = vma->obj;
 
-       dev_priv->gtt.base.insert_entries(&dev_priv->gtt.base, obj->pages,
-                                         entry,
-                                         cache_level);
+       /* If there is no aliasing PPGTT, or the caller needs a global mapping,
+        * or we have a global mapping already but the cacheability flags have
+        * changed, set the global PTEs.
+        *
+        * If there is an aliasing PPGTT it is anecdotally faster, so use that
+        * instead if none of the above hold true.
+        *
+        * NB: A global mapping should only be needed for special regions like
+        * "gtt mappable", SNB errata, or if specified via special execbuf
+        * flags. At all other times, the GPU will use the aliasing PPGTT.
+        */
+       if (!dev_priv->mm.aliasing_ppgtt || flags & GLOBAL_BIND) {
+               if (!obj->has_global_gtt_mapping ||
+                   (cache_level != obj->cache_level)) {
+                       vma->vm->insert_entries(vma->vm, obj->pages,
+                                               vma->node.start,
+                                               cache_level);
+                       obj->has_global_gtt_mapping = 1;
+               }
+       }
 
-       obj->has_global_gtt_mapping = 1;
+       if (dev_priv->mm.aliasing_ppgtt &&
+           (!obj->has_aliasing_ppgtt_mapping ||
+            (cache_level != obj->cache_level))) {
+               struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt;
+               appgtt->base.insert_entries(&appgtt->base,
+                                           vma->obj->pages,
+                                           vma->node.start,
+                                           cache_level);
+               vma->obj->has_aliasing_ppgtt_mapping = 1;
+       }
 }
 
-void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
+static void ggtt_unbind_vma(struct i915_vma *vma)
 {
-       struct drm_device *dev = obj->base.dev;
+       struct drm_device *dev = vma->vm->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       const unsigned long entry = i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT;
-
-       dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
-                                      entry,
-                                      obj->base.size >> PAGE_SHIFT,
-                                      true);
+       struct drm_i915_gem_object *obj = vma->obj;
+
+       if (obj->has_global_gtt_mapping) {
+               vma->vm->clear_range(vma->vm,
+                                    vma->node.start,
+                                    obj->base.size,
+                                    true);
+               obj->has_global_gtt_mapping = 0;
+       }
 
-       obj->has_global_gtt_mapping = 0;
+       if (obj->has_aliasing_ppgtt_mapping) {
+               struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt;
+               appgtt->base.clear_range(&appgtt->base,
+                                        vma->node.start,
+                                        obj->base.size,
+                                        true);
+               obj->has_aliasing_ppgtt_mapping = 0;
+       }
 }
 
 void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
@@ -1145,29 +1724,14 @@ void i915_gem_setup_global_gtt(struct drm_device *dev,
 
        /* Clear any non-preallocated blocks */
        drm_mm_for_each_hole(entry, &ggtt_vm->mm, hole_start, hole_end) {
-               const unsigned long count = (hole_end - hole_start) / PAGE_SIZE;
                DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
                              hole_start, hole_end);
-               ggtt_vm->clear_range(ggtt_vm, hole_start / PAGE_SIZE, count, true);
+               ggtt_vm->clear_range(ggtt_vm, hole_start,
+                                    hole_end - hole_start, true);
        }
 
        /* And finally clear the reserved guard page */
-       ggtt_vm->clear_range(ggtt_vm, end / PAGE_SIZE - 1, 1, true);
-}
-
-static bool
-intel_enable_ppgtt(struct drm_device *dev)
-{
-       if (i915_enable_ppgtt >= 0)
-               return i915_enable_ppgtt;
-
-#ifdef CONFIG_INTEL_IOMMU
-       /* Disable ppgtt on SNB if VT-d is on. */
-       if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
-               return false;
-#endif
-
-       return true;
+       ggtt_vm->clear_range(ggtt_vm, end - PAGE_SIZE, PAGE_SIZE, true);
 }
 
 void i915_gem_init_global_gtt(struct drm_device *dev)
@@ -1178,26 +1742,6 @@ void i915_gem_init_global_gtt(struct drm_device *dev)
        gtt_size = dev_priv->gtt.base.total;
        mappable_size = dev_priv->gtt.mappable_end;
 
-       if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
-               int ret;
-
-               if (INTEL_INFO(dev)->gen <= 7) {
-                       /* PPGTT pdes are stolen from global gtt ptes, so shrink the
-                        * aperture accordingly when using aliasing ppgtt. */
-                       gtt_size -= GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE;
-               }
-
-               i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
-
-               ret = i915_gem_init_aliasing_ppgtt(dev);
-               if (!ret)
-                       return;
-
-               DRM_ERROR("Aliased PPGTT setup failed %d\n", ret);
-               drm_mm_takedown(&dev_priv->gtt.base.mm);
-               if (INTEL_INFO(dev)->gen < 8)
-                       gtt_size += GEN6_PPGTT_PD_ENTRIES*PAGE_SIZE;
-       }
        i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
 }
 
@@ -1252,11 +1796,6 @@ static inline unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
        bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
        if (bdw_gmch_ctl)
                bdw_gmch_ctl = 1 << bdw_gmch_ctl;
-       if (bdw_gmch_ctl > 4) {
-               WARN_ON(!i915_preliminary_hw_support);
-               return 4<<20;
-       }
-
        return bdw_gmch_ctl << 20;
 }
 
@@ -1438,7 +1977,6 @@ static int i915_gmch_probe(struct drm_device *dev,
 
        dev_priv->gtt.do_idle_maps = needs_idle_maps(dev_priv->dev);
        dev_priv->gtt.base.clear_range = i915_ggtt_clear_range;
-       dev_priv->gtt.base.insert_entries = i915_ggtt_insert_entries;
 
        if (unlikely(dev_priv->gtt.do_idle_maps))
                DRM_INFO("applying Ironlake quirks for intel_iommu\n");
@@ -1493,3 +2031,62 @@ int i915_gem_gtt_init(struct drm_device *dev)
 
        return 0;
 }
+
+static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj,
+                                             struct i915_address_space *vm)
+{
+       struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
+       if (vma == NULL)
+               return ERR_PTR(-ENOMEM);
+
+       INIT_LIST_HEAD(&vma->vma_link);
+       INIT_LIST_HEAD(&vma->mm_list);
+       INIT_LIST_HEAD(&vma->exec_list);
+       vma->vm = vm;
+       vma->obj = obj;
+
+       switch (INTEL_INFO(vm->dev)->gen) {
+       case 8:
+       case 7:
+       case 6:
+               if (i915_is_ggtt(vm)) {
+                       vma->unbind_vma = ggtt_unbind_vma;
+                       vma->bind_vma = ggtt_bind_vma;
+               } else {
+                       vma->unbind_vma = ppgtt_unbind_vma;
+                       vma->bind_vma = ppgtt_bind_vma;
+               }
+               break;
+       case 5:
+       case 4:
+       case 3:
+       case 2:
+               BUG_ON(!i915_is_ggtt(vm));
+               vma->unbind_vma = i915_ggtt_unbind_vma;
+               vma->bind_vma = i915_ggtt_bind_vma;
+               break;
+       default:
+               BUG();
+       }
+
+       /* Keep GGTT vmas first to make debug easier */
+       if (i915_is_ggtt(vm))
+               list_add(&vma->vma_link, &obj->vma_list);
+       else
+               list_add_tail(&vma->vma_link, &obj->vma_list);
+
+       return vma;
+}
+
+struct i915_vma *
+i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
+                                 struct i915_address_space *vm)
+{
+       struct i915_vma *vma;
+
+       vma = i915_gem_obj_to_vma(obj, vm);
+       if (!vma)
+               vma = __i915_gem_vma_create(obj, vm);
+
+       return vma;
+}
index b1390534804888e46d4db065fcb0837814743e4c..eb993584aa6babbe3894594b928a8e5a47fb9fbb 100644 (file)
@@ -308,7 +308,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
                return -EINVAL;
        }
 
-       if (obj->pin_count || obj->framebuffer_references) {
+       if (i915_gem_obj_is_pinned(obj) || obj->framebuffer_references) {
                drm_gem_object_unreference_unlocked(&obj->base);
                return -EBUSY;
        }
index 990cf8f43efda908ecb1565cb5e8a786efe68306..144a5e2bc7ef3245afb09518d69e5bc81c3c8432 100644 (file)
@@ -238,50 +238,61 @@ static const char *hangcheck_action_to_str(enum intel_ring_hangcheck_action a)
 
 static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
                                  struct drm_device *dev,
-                                 struct drm_i915_error_state *error,
-                                 unsigned ring)
+                                 struct drm_i915_error_ring *ring)
 {
-       BUG_ON(ring >= I915_NUM_RINGS); /* shut up confused gcc */
-       if (!error->ring[ring].valid)
+       if (!ring->valid)
                return;
 
-       err_printf(m, "%s command stream:\n", ring_str(ring));
-       err_printf(m, "  HEAD: 0x%08x\n", error->head[ring]);
-       err_printf(m, "  TAIL: 0x%08x\n", error->tail[ring]);
-       err_printf(m, "  CTL: 0x%08x\n", error->ctl[ring]);
-       err_printf(m, "  ACTHD: 0x%08x\n", error->acthd[ring]);
-       err_printf(m, "  IPEIR: 0x%08x\n", error->ipeir[ring]);
-       err_printf(m, "  IPEHR: 0x%08x\n", error->ipehr[ring]);
-       err_printf(m, "  INSTDONE: 0x%08x\n", error->instdone[ring]);
+       err_printf(m, "  HEAD: 0x%08x\n", ring->head);
+       err_printf(m, "  TAIL: 0x%08x\n", ring->tail);
+       err_printf(m, "  CTL: 0x%08x\n", ring->ctl);
+       err_printf(m, "  HWS: 0x%08x\n", ring->hws);
+       err_printf(m, "  ACTHD: 0x%08x\n", ring->acthd);
+       err_printf(m, "  IPEIR: 0x%08x\n", ring->ipeir);
+       err_printf(m, "  IPEHR: 0x%08x\n", ring->ipehr);
+       err_printf(m, "  INSTDONE: 0x%08x\n", ring->instdone);
        if (INTEL_INFO(dev)->gen >= 4) {
-               err_printf(m, "  BBADDR: 0x%08llx\n", error->bbaddr[ring]);
-               err_printf(m, "  BB_STATE: 0x%08x\n", error->bbstate[ring]);
-               err_printf(m, "  INSTPS: 0x%08x\n", error->instps[ring]);
+               err_printf(m, "  BBADDR: 0x%08llx\n", ring->bbaddr);
+               err_printf(m, "  BB_STATE: 0x%08x\n", ring->bbstate);
+               err_printf(m, "  INSTPS: 0x%08x\n", ring->instps);
        }
-       err_printf(m, "  INSTPM: 0x%08x\n", error->instpm[ring]);
-       err_printf(m, "  FADDR: 0x%08x\n", error->faddr[ring]);
+       err_printf(m, "  INSTPM: 0x%08x\n", ring->instpm);
+       err_printf(m, "  FADDR: 0x%08x\n", ring->faddr);
        if (INTEL_INFO(dev)->gen >= 6) {
-               err_printf(m, "  RC PSMI: 0x%08x\n", error->rc_psmi[ring]);
-               err_printf(m, "  FAULT_REG: 0x%08x\n", error->fault_reg[ring]);
+               err_printf(m, "  RC PSMI: 0x%08x\n", ring->rc_psmi);
+               err_printf(m, "  FAULT_REG: 0x%08x\n", ring->fault_reg);
                err_printf(m, "  SYNC_0: 0x%08x [last synced 0x%08x]\n",
-                          error->semaphore_mboxes[ring][0],
-                          error->semaphore_seqno[ring][0]);
+                          ring->semaphore_mboxes[0],
+                          ring->semaphore_seqno[0]);
                err_printf(m, "  SYNC_1: 0x%08x [last synced 0x%08x]\n",
-                          error->semaphore_mboxes[ring][1],
-                          error->semaphore_seqno[ring][1]);
+                          ring->semaphore_mboxes[1],
+                          ring->semaphore_seqno[1]);
                if (HAS_VEBOX(dev)) {
                        err_printf(m, "  SYNC_2: 0x%08x [last synced 0x%08x]\n",
-                                  error->semaphore_mboxes[ring][2],
-                                  error->semaphore_seqno[ring][2]);
+                                  ring->semaphore_mboxes[2],
+                                  ring->semaphore_seqno[2]);
                }
        }
-       err_printf(m, "  seqno: 0x%08x\n", error->seqno[ring]);
-       err_printf(m, "  waiting: %s\n", yesno(error->waiting[ring]));
-       err_printf(m, "  ring->head: 0x%08x\n", error->cpu_ring_head[ring]);
-       err_printf(m, "  ring->tail: 0x%08x\n", error->cpu_ring_tail[ring]);
+       if (USES_PPGTT(dev)) {
+               err_printf(m, "  GFX_MODE: 0x%08x\n", ring->vm_info.gfx_mode);
+
+               if (INTEL_INFO(dev)->gen >= 8) {
+                       int i;
+                       for (i = 0; i < 4; i++)
+                               err_printf(m, "  PDP%d: 0x%016llx\n",
+                                          i, ring->vm_info.pdp[i]);
+               } else {
+                       err_printf(m, "  PP_DIR_BASE: 0x%08x\n",
+                                  ring->vm_info.pp_dir_base);
+               }
+       }
+       err_printf(m, "  seqno: 0x%08x\n", ring->seqno);
+       err_printf(m, "  waiting: %s\n", yesno(ring->waiting));
+       err_printf(m, "  ring->head: 0x%08x\n", ring->cpu_ring_head);
+       err_printf(m, "  ring->tail: 0x%08x\n", ring->cpu_ring_tail);
        err_printf(m, "  hangcheck: %s [%d]\n",
-                  hangcheck_action_to_str(error->hangcheck_action[ring]),
-                  error->hangcheck_score[ring]);
+                  hangcheck_action_to_str(ring->hangcheck_action),
+                  ring->hangcheck_score);
 }
 
 void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
@@ -293,22 +304,54 @@ void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
        va_end(args);
 }
 
+static void print_error_obj(struct drm_i915_error_state_buf *m,
+                           struct drm_i915_error_object *obj)
+{
+       int page, offset, elt;
+
+       for (page = offset = 0; page < obj->page_count; page++) {
+               for (elt = 0; elt < PAGE_SIZE/4; elt++) {
+                       err_printf(m, "%08x :  %08x\n", offset,
+                                  obj->pages[page][elt]);
+                       offset += 4;
+               }
+       }
+}
+
 int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
                            const struct i915_error_state_file_priv *error_priv)
 {
        struct drm_device *dev = error_priv->dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct drm_i915_error_state *error = error_priv->error;
-       int i, j, page, offset, elt;
+       int i, j, offset, elt;
+       int max_hangcheck_score;
 
        if (!error) {
                err_printf(m, "no error state collected\n");
                goto out;
        }
 
+       err_printf(m, "%s\n", error->error_msg);
        err_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
                   error->time.tv_usec);
        err_printf(m, "Kernel: " UTS_RELEASE "\n");
+       max_hangcheck_score = 0;
+       for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
+               if (error->ring[i].hangcheck_score > max_hangcheck_score)
+                       max_hangcheck_score = error->ring[i].hangcheck_score;
+       }
+       for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
+               if (error->ring[i].hangcheck_score == max_hangcheck_score &&
+                   error->ring[i].pid != -1) {
+                       err_printf(m, "Active process (on ring %s): %s [%d]\n",
+                                  ring_str(i),
+                                  error->ring[i].comm,
+                                  error->ring[i].pid);
+               }
+       }
+       err_printf(m, "Reset count: %u\n", error->reset_count);
+       err_printf(m, "Suspend count: %u\n", error->suspend_count);
        err_printf(m, "PCI ID: 0x%04x\n", dev->pdev->device);
        err_printf(m, "EIR: 0x%08x\n", error->eir);
        err_printf(m, "IER: 0x%08x\n", error->ier);
@@ -333,8 +376,10 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
        if (INTEL_INFO(dev)->gen == 7)
                err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
 
-       for (i = 0; i < ARRAY_SIZE(error->ring); i++)
-               i915_ring_error_state(m, dev, error, i);
+       for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
+               err_printf(m, "%s command stream:\n", ring_str(i));
+               i915_ring_error_state(m, dev, &error->ring[i]);
+       }
 
        if (error->active_bo)
                print_error_buffers(m, "Active",
@@ -349,18 +394,23 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
        for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
                struct drm_i915_error_object *obj;
 
-               if ((obj = error->ring[i].batchbuffer)) {
-                       err_printf(m, "%s --- gtt_offset = 0x%08x\n",
-                                  dev_priv->ring[i].name,
+               obj = error->ring[i].batchbuffer;
+               if (obj) {
+                       err_puts(m, dev_priv->ring[i].name);
+                       if (error->ring[i].pid != -1)
+                               err_printf(m, " (submitted by %s [%d])",
+                                          error->ring[i].comm,
+                                          error->ring[i].pid);
+                       err_printf(m, " --- gtt_offset = 0x%08x\n",
                                   obj->gtt_offset);
-                       offset = 0;
-                       for (page = 0; page < obj->page_count; page++) {
-                               for (elt = 0; elt < PAGE_SIZE/4; elt++) {
-                                       err_printf(m, "%08x :  %08x\n", offset,
-                                                  obj->pages[page][elt]);
-                                       offset += 4;
-                               }
-                       }
+                       print_error_obj(m, obj);
+               }
+
+               obj = error->ring[i].wa_batchbuffer;
+               if (obj) {
+                       err_printf(m, "%s (w/a) --- gtt_offset = 0x%08x\n",
+                                  dev_priv->ring[i].name, obj->gtt_offset);
+                       print_error_obj(m, obj);
                }
 
                if (error->ring[i].num_requests) {
@@ -379,14 +429,22 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
                        err_printf(m, "%s --- ringbuffer = 0x%08x\n",
                                   dev_priv->ring[i].name,
                                   obj->gtt_offset);
+                       print_error_obj(m, obj);
+               }
+
+               if ((obj = error->ring[i].hws_page)) {
+                       err_printf(m, "%s --- HW Status = 0x%08x\n",
+                                  dev_priv->ring[i].name,
+                                  obj->gtt_offset);
                        offset = 0;
-                       for (page = 0; page < obj->page_count; page++) {
-                               for (elt = 0; elt < PAGE_SIZE/4; elt++) {
-                                       err_printf(m, "%08x :  %08x\n",
-                                                  offset,
-                                                  obj->pages[page][elt]);
-                                       offset += 4;
-                               }
+                       for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
+                               err_printf(m, "[%04x] %08x %08x %08x %08x\n",
+                                          offset,
+                                          obj->pages[0][elt],
+                                          obj->pages[0][elt+1],
+                                          obj->pages[0][elt+2],
+                                          obj->pages[0][elt+3]);
+                                       offset += 16;
                        }
                }
 
@@ -472,6 +530,7 @@ static void i915_error_state_free(struct kref *error_ref)
        for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
                i915_error_object_free(error->ring[i].batchbuffer);
                i915_error_object_free(error->ring[i].ringbuffer);
+               i915_error_object_free(error->ring[i].hws_page);
                i915_error_object_free(error->ring[i].ctx);
                kfree(error->ring[i].requests);
        }
@@ -485,6 +544,7 @@ static void i915_error_state_free(struct kref *error_ref)
 static struct drm_i915_error_object *
 i915_error_object_create_sized(struct drm_i915_private *dev_priv,
                               struct drm_i915_gem_object *src,
+                              struct i915_address_space *vm,
                               const int num_pages)
 {
        struct drm_i915_error_object *dst;
@@ -498,7 +558,7 @@ i915_error_object_create_sized(struct drm_i915_private *dev_priv,
        if (dst == NULL)
                return NULL;
 
-       reloc_offset = dst->gtt_offset = i915_gem_obj_ggtt_offset(src);
+       reloc_offset = dst->gtt_offset = i915_gem_obj_offset(src, vm);
        for (i = 0; i < num_pages; i++) {
                unsigned long flags;
                void *d;
@@ -508,8 +568,10 @@ i915_error_object_create_sized(struct drm_i915_private *dev_priv,
                        goto unwind;
 
                local_irq_save(flags);
-               if (reloc_offset < dev_priv->gtt.mappable_end &&
-                   src->has_global_gtt_mapping) {
+               if (src->cache_level == I915_CACHE_NONE &&
+                   reloc_offset < dev_priv->gtt.mappable_end &&
+                   src->has_global_gtt_mapping &&
+                   i915_is_ggtt(vm)) {
                        void __iomem *s;
 
                        /* Simply ignore tiling or any overlapping fence.
@@ -559,8 +621,12 @@ unwind:
        kfree(dst);
        return NULL;
 }
-#define i915_error_object_create(dev_priv, src) \
-       i915_error_object_create_sized((dev_priv), (src), \
+#define i915_error_object_create(dev_priv, src, vm) \
+       i915_error_object_create_sized((dev_priv), (src), (vm), \
+                                      (src)->base.size>>PAGE_SHIFT)
+
+#define i915_error_ggtt_object_create(dev_priv, src) \
+       i915_error_object_create_sized((dev_priv), (src), &(dev_priv)->gtt.base, \
                                       (src)->base.size>>PAGE_SHIFT)
 
 static void capture_bo(struct drm_i915_error_buffer *err,
@@ -575,7 +641,7 @@ static void capture_bo(struct drm_i915_error_buffer *err,
        err->write_domain = obj->base.write_domain;
        err->fence_reg = obj->fence_reg;
        err->pinned = 0;
-       if (obj->pin_count > 0)
+       if (i915_gem_obj_is_pinned(obj))
                err->pinned = 1;
        if (obj->user_pin_count > 0)
                err->pinned = -1;
@@ -608,7 +674,7 @@ static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
        int i = 0;
 
        list_for_each_entry(obj, head, global_list) {
-               if (obj->pin_count == 0)
+               if (!i915_gem_obj_is_pinned(obj))
                        continue;
 
                capture_bo(err++, obj);
@@ -619,6 +685,39 @@ static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
        return i;
 }
 
+/* Generate a semi-unique error code. The code is not meant to have meaning, The
+ * code's only purpose is to try to prevent false duplicated bug reports by
+ * grossly estimating a GPU error state.
+ *
+ * TODO Ideally, hashing the batchbuffer would be a very nice way to determine
+ * the hang if we could strip the GTT offset information from it.
+ *
+ * It's only a small step better than a random number in its current form.
+ */
+static uint32_t i915_error_generate_code(struct drm_i915_private *dev_priv,
+                                        struct drm_i915_error_state *error,
+                                        int *ring_id)
+{
+       uint32_t error_code = 0;
+       int i;
+
+       /* IPEHR would be an ideal way to detect errors, as it's the gross
+        * measure of "the command that hung." However, has some very common
+        * synchronization commands which almost always appear in the case
+        * strictly a client bug. Use instdone to differentiate those some.
+        */
+       for (i = 0; i < I915_NUM_RINGS; i++) {
+               if (error->ring[i].hangcheck_action == HANGCHECK_HUNG) {
+                       if (ring_id)
+                               *ring_id = i;
+
+                       return error->ring[i].ipehr ^ error->ring[i].instdone;
+               }
+       }
+
+       return error_code;
+}
+
 static void i915_gem_record_fences(struct drm_device *dev,
                                   struct drm_i915_error_state *error)
 {
@@ -652,107 +751,112 @@ static void i915_gem_record_fences(struct drm_device *dev,
        }
 }
 
-static struct drm_i915_error_object *
-i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
-                            struct intel_ring_buffer *ring)
-{
-       struct i915_address_space *vm;
-       struct i915_vma *vma;
-       struct drm_i915_gem_object *obj;
-       u32 seqno;
-
-       if (!ring->get_seqno)
-               return NULL;
-
-       if (HAS_BROKEN_CS_TLB(dev_priv->dev)) {
-               u32 acthd = I915_READ(ACTHD);
-
-               if (WARN_ON(ring->id != RCS))
-                       return NULL;
-
-               obj = ring->scratch.obj;
-               if (obj != NULL &&
-                   acthd >= i915_gem_obj_ggtt_offset(obj) &&
-                   acthd < i915_gem_obj_ggtt_offset(obj) + obj->base.size)
-                       return i915_error_object_create(dev_priv, obj);
-       }
-
-       seqno = ring->get_seqno(ring, false);
-       list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
-               list_for_each_entry(vma, &vm->active_list, mm_list) {
-                       obj = vma->obj;
-                       if (obj->ring != ring)
-                               continue;
-
-                       if (i915_seqno_passed(seqno, obj->last_read_seqno))
-                               continue;
-
-                       if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
-                               continue;
-
-                       /* We need to copy these to an anonymous buffer as the simplest
-                        * method to avoid being overwritten by userspace.
-                        */
-                       return i915_error_object_create(dev_priv, obj);
-               }
-       }
-
-       return NULL;
-}
-
 static void i915_record_ring_state(struct drm_device *dev,
-                                  struct drm_i915_error_state *error,
-                                  struct intel_ring_buffer *ring)
+                                  struct intel_ring_buffer *ring,
+                                  struct drm_i915_error_ring *ering)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
 
        if (INTEL_INFO(dev)->gen >= 6) {
-               error->rc_psmi[ring->id] = I915_READ(ring->mmio_base + 0x50);
-               error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring));
-               error->semaphore_mboxes[ring->id][0]
+               ering->rc_psmi = I915_READ(ring->mmio_base + 0x50);
+               ering->fault_reg = I915_READ(RING_FAULT_REG(ring));
+               ering->semaphore_mboxes[0]
                        = I915_READ(RING_SYNC_0(ring->mmio_base));
-               error->semaphore_mboxes[ring->id][1]
+               ering->semaphore_mboxes[1]
                        = I915_READ(RING_SYNC_1(ring->mmio_base));
-               error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0];
-               error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1];
+               ering->semaphore_seqno[0] = ring->sync_seqno[0];
+               ering->semaphore_seqno[1] = ring->sync_seqno[1];
        }
 
        if (HAS_VEBOX(dev)) {
-               error->semaphore_mboxes[ring->id][2] =
+               ering->semaphore_mboxes[2] =
                        I915_READ(RING_SYNC_2(ring->mmio_base));
-               error->semaphore_seqno[ring->id][2] = ring->sync_seqno[2];
+               ering->semaphore_seqno[2] = ring->sync_seqno[2];
        }
 
        if (INTEL_INFO(dev)->gen >= 4) {
-               error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
-               error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base));
-               error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
-               error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
-               error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
-               error->bbaddr[ring->id] = I915_READ(RING_BBADDR(ring->mmio_base));
+               ering->faddr = I915_READ(RING_DMA_FADD(ring->mmio_base));
+               ering->ipeir = I915_READ(RING_IPEIR(ring->mmio_base));
+               ering->ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
+               ering->instdone = I915_READ(RING_INSTDONE(ring->mmio_base));
+               ering->instps = I915_READ(RING_INSTPS(ring->mmio_base));
+               ering->bbaddr = I915_READ(RING_BBADDR(ring->mmio_base));
                if (INTEL_INFO(dev)->gen >= 8)
-                       error->bbaddr[ring->id] |= (u64) I915_READ(RING_BBADDR_UDW(ring->mmio_base)) << 32;
-               error->bbstate[ring->id] = I915_READ(RING_BBSTATE(ring->mmio_base));
+                       ering->bbaddr |= (u64) I915_READ(RING_BBADDR_UDW(ring->mmio_base)) << 32;
+               ering->bbstate = I915_READ(RING_BBSTATE(ring->mmio_base));
        } else {
-               error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
-               error->ipeir[ring->id] = I915_READ(IPEIR);
-               error->ipehr[ring->id] = I915_READ(IPEHR);
-               error->instdone[ring->id] = I915_READ(INSTDONE);
+               ering->faddr = I915_READ(DMA_FADD_I8XX);
+               ering->ipeir = I915_READ(IPEIR);
+               ering->ipehr = I915_READ(IPEHR);
+               ering->instdone = I915_READ(INSTDONE);
+       }
+
+       ering->waiting = waitqueue_active(&ring->irq_queue);
+       ering->instpm = I915_READ(RING_INSTPM(ring->mmio_base));
+       ering->seqno = ring->get_seqno(ring, false);
+       ering->acthd = intel_ring_get_active_head(ring);
+       ering->head = I915_READ_HEAD(ring);
+       ering->tail = I915_READ_TAIL(ring);
+       ering->ctl = I915_READ_CTL(ring);
+
+       if (I915_NEED_GFX_HWS(dev)) {
+               int mmio;
+
+               if (IS_GEN7(dev)) {
+                       switch (ring->id) {
+                       default:
+                       case RCS:
+                               mmio = RENDER_HWS_PGA_GEN7;
+                               break;
+                       case BCS:
+                               mmio = BLT_HWS_PGA_GEN7;
+                               break;
+                       case VCS:
+                               mmio = BSD_HWS_PGA_GEN7;
+                               break;
+                       case VECS:
+                               mmio = VEBOX_HWS_PGA_GEN7;
+                               break;
+                       }
+               } else if (IS_GEN6(ring->dev)) {
+                       mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
+               } else {
+                       /* XXX: gen8 returns to sanity */
+                       mmio = RING_HWS_PGA(ring->mmio_base);
+               }
+
+               ering->hws = I915_READ(mmio);
        }
 
-       error->waiting[ring->id] = waitqueue_active(&ring->irq_queue);
-       error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
-       error->seqno[ring->id] = ring->get_seqno(ring, false);
-       error->acthd[ring->id] = intel_ring_get_active_head(ring);
-       error->head[ring->id] = I915_READ_HEAD(ring);
-       error->tail[ring->id] = I915_READ_TAIL(ring);
-       error->ctl[ring->id] = I915_READ_CTL(ring);
+       ering->cpu_ring_head = ring->head;
+       ering->cpu_ring_tail = ring->tail;
+
+       ering->hangcheck_score = ring->hangcheck.score;
+       ering->hangcheck_action = ring->hangcheck.action;
+
+       if (USES_PPGTT(dev)) {
+               int i;
 
-       error->cpu_ring_head[ring->id] = ring->head;
-       error->cpu_ring_tail[ring->id] = ring->tail;
+               ering->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(ring));
 
-       error->hangcheck_score[ring->id] = ring->hangcheck.score;
-       error->hangcheck_action[ring->id] = ring->hangcheck.action;
+               switch (INTEL_INFO(dev)->gen) {
+               case 8:
+                       for (i = 0; i < 4; i++) {
+                               ering->vm_info.pdp[i] =
+                                       I915_READ(GEN8_RING_PDP_UDW(ring, i));
+                               ering->vm_info.pdp[i] <<= 32;
+                               ering->vm_info.pdp[i] |=
+                                       I915_READ(GEN8_RING_PDP_LDW(ring, i));
+                       }
+                       break;
+               case 7:
+                       ering->vm_info.pp_dir_base = RING_PP_DIR_BASE(ring);
+                       break;
+               case 6:
+                       ering->vm_info.pp_dir_base = RING_PP_DIR_BASE_READ(ring);
+                       break;
+               }
+       }
 }
 
 
@@ -770,7 +874,9 @@ static void i915_gem_record_active_context(struct intel_ring_buffer *ring,
        list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
                if ((error->ccid & PAGE_MASK) == i915_gem_obj_ggtt_offset(obj)) {
                        ering->ctx = i915_error_object_create_sized(dev_priv,
-                                                                   obj, 1);
+                                                                   obj,
+                                                                   &dev_priv->gtt.base,
+                                                                   1);
                        break;
                }
        }
@@ -791,14 +897,48 @@ static void i915_gem_record_rings(struct drm_device *dev,
 
                error->ring[i].valid = true;
 
-               i915_record_ring_state(dev, error, ring);
+               i915_record_ring_state(dev, ring, &error->ring[i]);
 
-               error->ring[i].batchbuffer =
-                       i915_error_first_batchbuffer(dev_priv, ring);
+               error->ring[i].pid = -1;
+               request = i915_gem_find_active_request(ring);
+               if (request) {
+                       /* We need to copy these to an anonymous buffer
+                        * as the simplest method to avoid being overwritten
+                        * by userspace.
+                        */
+                       error->ring[i].batchbuffer =
+                               i915_error_object_create(dev_priv,
+                                                        request->batch_obj,
+                                                        request->ctx ?
+                                                        request->ctx->vm :
+                                                        &dev_priv->gtt.base);
+
+                       if (HAS_BROKEN_CS_TLB(dev_priv->dev) &&
+                           ring->scratch.obj)
+                               error->ring[i].wa_batchbuffer =
+                                       i915_error_ggtt_object_create(dev_priv,
+                                                            ring->scratch.obj);
+
+                       if (request->file_priv) {
+                               struct task_struct *task;
+
+                               rcu_read_lock();
+                               task = pid_task(request->file_priv->file->pid,
+                                               PIDTYPE_PID);
+                               if (task) {
+                                       strcpy(error->ring[i].comm, task->comm);
+                                       error->ring[i].pid = task->pid;
+                               }
+                               rcu_read_unlock();
+                       }
+               }
 
                error->ring[i].ringbuffer =
-                       i915_error_object_create(dev_priv, ring->obj);
+                       i915_error_ggtt_object_create(dev_priv, ring->obj);
 
+               if (ring->status_page.obj)
+                       error->ring[i].hws_page =
+                               i915_error_ggtt_object_create(dev_priv, ring->status_page.obj);
 
                i915_gem_record_active_context(ring, error, &error->ring[i]);
 
@@ -845,7 +985,7 @@ static void i915_gem_capture_vm(struct drm_i915_private *dev_priv,
                i++;
        error->active_bo_count[ndx] = i;
        list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
-               if (obj->pin_count)
+               if (i915_gem_obj_is_pinned(obj))
                        i++;
        error->pinned_bo_count[ndx] = i - error->active_bo_count[ndx];
 
@@ -879,11 +1019,6 @@ static void i915_gem_capture_buffers(struct drm_i915_private *dev_priv,
        list_for_each_entry(vm, &dev_priv->vm_list, global_link)
                cnt++;
 
-       if (WARN(cnt > 1, "Multiple VMs not yet supported\n"))
-               cnt = 1;
-
-       vm = &dev_priv->gtt.base;
-
        error->active_bo = kcalloc(cnt, sizeof(*error->active_bo), GFP_ATOMIC);
        error->pinned_bo = kcalloc(cnt, sizeof(*error->pinned_bo), GFP_ATOMIC);
        error->active_bo_count = kcalloc(cnt, sizeof(*error->active_bo_count),
@@ -895,6 +1030,108 @@ static void i915_gem_capture_buffers(struct drm_i915_private *dev_priv,
                i915_gem_capture_vm(dev_priv, error, vm, i++);
 }
 
+/* Capture all registers which don't fit into another category. */
+static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
+                                  struct drm_i915_error_state *error)
+{
+       struct drm_device *dev = dev_priv->dev;
+       int pipe;
+
+       /* General organization
+        * 1. Registers specific to a single generation
+        * 2. Registers which belong to multiple generations
+        * 3. Feature specific registers.
+        * 4. Everything else
+        * Please try to follow the order.
+        */
+
+       /* 1: Registers specific to a single generation */
+       if (IS_VALLEYVIEW(dev)) {
+               error->ier = I915_READ(GTIER) | I915_READ(VLV_IER);
+               error->forcewake = I915_READ(FORCEWAKE_VLV);
+       }
+
+       if (IS_GEN7(dev))
+               error->err_int = I915_READ(GEN7_ERR_INT);
+
+       if (IS_GEN6(dev)) {
+               error->forcewake = I915_READ(FORCEWAKE);
+               error->gab_ctl = I915_READ(GAB_CTL);
+               error->gfx_mode = I915_READ(GFX_MODE);
+       }
+
+       if (IS_GEN2(dev))
+               error->ier = I915_READ16(IER);
+
+       /* 2: Registers which belong to multiple generations */
+       if (INTEL_INFO(dev)->gen >= 7)
+               error->forcewake = I915_READ(FORCEWAKE_MT);
+
+       if (INTEL_INFO(dev)->gen >= 6) {
+               error->derrmr = I915_READ(DERRMR);
+               error->error = I915_READ(ERROR_GEN6);
+               error->done_reg = I915_READ(DONE_REG);
+       }
+
+       /* 3: Feature specific registers */
+       if (IS_GEN6(dev) || IS_GEN7(dev)) {
+               error->gam_ecochk = I915_READ(GAM_ECOCHK);
+               error->gac_eco = I915_READ(GAC_ECO_BITS);
+       }
+
+       /* 4: Everything else */
+       if (HAS_HW_CONTEXTS(dev))
+               error->ccid = I915_READ(CCID);
+
+       if (HAS_PCH_SPLIT(dev))
+               error->ier = I915_READ(DEIER) | I915_READ(GTIER);
+       else {
+               error->ier = I915_READ(IER);
+               for_each_pipe(pipe)
+                       error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
+       }
+
+       /* 4: Everything else */
+       error->eir = I915_READ(EIR);
+       error->pgtbl_er = I915_READ(PGTBL_ER);
+
+       i915_get_extra_instdone(dev, error->extra_instdone);
+}
+
+static void i915_error_capture_msg(struct drm_device *dev,
+                                  struct drm_i915_error_state *error,
+                                  bool wedged,
+                                  const char *error_msg)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       u32 ecode;
+       int ring_id = -1, len;
+
+       ecode = i915_error_generate_code(dev_priv, error, &ring_id);
+
+       len = scnprintf(error->error_msg, sizeof(error->error_msg),
+                       "GPU HANG: ecode %d:0x%08x", ring_id, ecode);
+
+       if (ring_id != -1 && error->ring[ring_id].pid != -1)
+               len += scnprintf(error->error_msg + len,
+                                sizeof(error->error_msg) - len,
+                                ", in %s [%d]",
+                                error->ring[ring_id].comm,
+                                error->ring[ring_id].pid);
+
+       scnprintf(error->error_msg + len, sizeof(error->error_msg) - len,
+                 ", reason: %s, action: %s",
+                 error_msg,
+                 wedged ? "reset" : "continue");
+}
+
+static void i915_capture_gen_state(struct drm_i915_private *dev_priv,
+                                  struct drm_i915_error_state *error)
+{
+       error->reset_count = i915_reset_count(&dev_priv->gpu_error);
+       error->suspend_count = dev_priv->suspend_count;
+}
+
 /**
  * i915_capture_error_state - capture an error record for later analysis
  * @dev: drm device
@@ -904,18 +1141,13 @@ static void i915_gem_capture_buffers(struct drm_i915_private *dev_priv,
  * out a structure which becomes available in debugfs for user level tools
  * to pick up.
  */
-void i915_capture_error_state(struct drm_device *dev)
+void i915_capture_error_state(struct drm_device *dev, bool wedged,
+                             const char *error_msg)
 {
+       static bool warned;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_error_state *error;
        unsigned long flags;
-       int pipe;
-
-       spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
-       error = dev_priv->gpu_error.first_error;
-       spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
-       if (error)
-               return;
 
        /* Account for pipe specific data like PIPE*STAT */
        error = kzalloc(sizeof(*error), GFP_ATOMIC);
@@ -924,52 +1156,10 @@ void i915_capture_error_state(struct drm_device *dev)
                return;
        }
 
-       DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n",
-                dev->primary->index);
-       DRM_INFO("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n");
-       DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n");
-       DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
-       DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n");
-
        kref_init(&error->ref);
-       error->eir = I915_READ(EIR);
-       error->pgtbl_er = I915_READ(PGTBL_ER);
-       if (HAS_HW_CONTEXTS(dev))
-               error->ccid = I915_READ(CCID);
-
-       if (HAS_PCH_SPLIT(dev))
-               error->ier = I915_READ(DEIER) | I915_READ(GTIER);
-       else if (IS_VALLEYVIEW(dev))
-               error->ier = I915_READ(GTIER) | I915_READ(VLV_IER);
-       else if (IS_GEN2(dev))
-               error->ier = I915_READ16(IER);
-       else
-               error->ier = I915_READ(IER);
-
-       if (INTEL_INFO(dev)->gen >= 6)
-               error->derrmr = I915_READ(DERRMR);
-
-       if (IS_VALLEYVIEW(dev))
-               error->forcewake = I915_READ(FORCEWAKE_VLV);
-       else if (INTEL_INFO(dev)->gen >= 7)
-               error->forcewake = I915_READ(FORCEWAKE_MT);
-       else if (INTEL_INFO(dev)->gen == 6)
-               error->forcewake = I915_READ(FORCEWAKE);
-
-       if (!HAS_PCH_SPLIT(dev))
-               for_each_pipe(pipe)
-                       error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
-
-       if (INTEL_INFO(dev)->gen >= 6) {
-               error->error = I915_READ(ERROR_GEN6);
-               error->done_reg = I915_READ(DONE_REG);
-       }
-
-       if (INTEL_INFO(dev)->gen == 7)
-               error->err_int = I915_READ(GEN7_ERR_INT);
-
-       i915_get_extra_instdone(dev, error->extra_instdone);
 
+       i915_capture_gen_state(dev_priv, error);
+       i915_capture_reg_state(dev_priv, error);
        i915_gem_capture_buffers(dev_priv, error);
        i915_gem_record_fences(dev, error);
        i915_gem_record_rings(dev, error);
@@ -979,6 +1169,9 @@ void i915_capture_error_state(struct drm_device *dev)
        error->overlay = intel_overlay_capture_error_state(dev);
        error->display = intel_display_capture_error_state(dev);
 
+       i915_error_capture_msg(dev, error, wedged, error_msg);
+       DRM_INFO("%s\n", error->error_msg);
+
        spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
        if (dev_priv->gpu_error.first_error == NULL) {
                dev_priv->gpu_error.first_error = error;
@@ -986,8 +1179,19 @@ void i915_capture_error_state(struct drm_device *dev)
        }
        spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
 
-       if (error)
+       if (error) {
                i915_error_state_free(&error->ref);
+               return;
+       }
+
+       if (!warned) {
+               DRM_INFO("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n");
+               DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n");
+               DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
+               DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n");
+               DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n", dev->primary->index);
+               warned = true;
+       }
 }
 
 void i915_error_state_get(struct drm_device *dev,
index 9fec71175571e068cce957973431fe21eca962d2..be2713f12e082ba5b972cddced1ac06c4bb0cf7d 100644 (file)
@@ -232,6 +232,18 @@ static bool cpt_can_enable_serr_int(struct drm_device *dev)
        return true;
 }
 
+static void i9xx_clear_fifo_underrun(struct drm_device *dev, enum pipe pipe)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       u32 reg = PIPESTAT(pipe);
+       u32 pipestat = I915_READ(reg) & 0x7fff0000;
+
+       assert_spin_locked(&dev_priv->irq_lock);
+
+       I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS);
+       POSTING_READ(reg);
+}
+
 static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
                                                 enum pipe pipe, bool enable)
 {
@@ -375,16 +387,15 @@ static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
  *
  * Returns the previous state of underrun reporting.
  */
-bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
-                                          enum pipe pipe, bool enable)
+bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
+                                            enum pipe pipe, bool enable)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       unsigned long flags;
        bool ret;
 
-       spin_lock_irqsave(&dev_priv->irq_lock, flags);
+       assert_spin_locked(&dev_priv->irq_lock);
 
        ret = !intel_crtc->cpu_fifo_underrun_disabled;
 
@@ -393,7 +404,9 @@ bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
 
        intel_crtc->cpu_fifo_underrun_disabled = !enable;
 
-       if (IS_GEN5(dev) || IS_GEN6(dev))
+       if (enable && (INTEL_INFO(dev)->gen < 5 || IS_VALLEYVIEW(dev)))
+               i9xx_clear_fifo_underrun(dev, pipe);
+       else if (IS_GEN5(dev) || IS_GEN6(dev))
                ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
        else if (IS_GEN7(dev))
                ivybridge_set_fifo_underrun_reporting(dev, pipe, enable);
@@ -401,10 +414,33 @@ bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
                broadwell_set_fifo_underrun_reporting(dev, pipe, enable);
 
 done:
+       return ret;
+}
+
+bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
+                                          enum pipe pipe, bool enable)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       unsigned long flags;
+       bool ret;
+
+       spin_lock_irqsave(&dev_priv->irq_lock, flags);
+       ret = __intel_set_cpu_fifo_underrun_reporting(dev, pipe, enable);
        spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+
        return ret;
 }
 
+static bool __cpu_fifo_underrun_reporting_enabled(struct drm_device *dev,
+                                                 enum pipe pipe)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+       return !intel_crtc->cpu_fifo_underrun_disabled;
+}
+
 /**
  * intel_set_pch_fifo_underrun_reporting - enable/disable FIFO underrun messages
  * @dev: drm device
@@ -458,39 +494,103 @@ done:
 }
 
 
-void
-i915_enable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask)
+static void
+__i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
+                      u32 enable_mask, u32 status_mask)
 {
        u32 reg = PIPESTAT(pipe);
-       u32 pipestat = I915_READ(reg) & 0x7fff0000;
+       u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
 
        assert_spin_locked(&dev_priv->irq_lock);
 
-       if ((pipestat & mask) == mask)
+       if (WARN_ON_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
+                        status_mask & ~PIPESTAT_INT_STATUS_MASK))
                return;
 
+       if ((pipestat & enable_mask) == enable_mask)
+               return;
+
+       dev_priv->pipestat_irq_mask[pipe] |= status_mask;
+
        /* Enable the interrupt, clear any pending status */
-       pipestat |= mask | (mask >> 16);
+       pipestat |= enable_mask | status_mask;
        I915_WRITE(reg, pipestat);
        POSTING_READ(reg);
 }
 
-void
-i915_disable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask)
+static void
+__i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
+                       u32 enable_mask, u32 status_mask)
 {
        u32 reg = PIPESTAT(pipe);
-       u32 pipestat = I915_READ(reg) & 0x7fff0000;
+       u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
 
        assert_spin_locked(&dev_priv->irq_lock);
 
-       if ((pipestat & mask) == 0)
+       if (WARN_ON_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
+                        status_mask & ~PIPESTAT_INT_STATUS_MASK))
+               return;
+
+       if ((pipestat & enable_mask) == 0)
                return;
 
-       pipestat &= ~mask;
+       dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
+
+       pipestat &= ~enable_mask;
        I915_WRITE(reg, pipestat);
        POSTING_READ(reg);
 }
 
+static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask)
+{
+       u32 enable_mask = status_mask << 16;
+
+       /*
+        * On pipe A we don't support the PSR interrupt yet, on pipe B the
+        * same bit MBZ.
+        */
+       if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
+               return 0;
+
+       enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
+                        SPRITE0_FLIP_DONE_INT_EN_VLV |
+                        SPRITE1_FLIP_DONE_INT_EN_VLV);
+       if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
+               enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
+       if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
+               enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
+
+       return enable_mask;
+}
+
+void
+i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
+                    u32 status_mask)
+{
+       u32 enable_mask;
+
+       if (IS_VALLEYVIEW(dev_priv->dev))
+               enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
+                                                          status_mask);
+       else
+               enable_mask = status_mask << 16;
+       __i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask);
+}
+
+void
+i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
+                     u32 status_mask)
+{
+       u32 enable_mask;
+
+       if (IS_VALLEYVIEW(dev_priv->dev))
+               enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
+                                                          status_mask);
+       else
+               enable_mask = status_mask << 16;
+       __i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask);
+}
+
 /**
  * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
  */
@@ -504,10 +604,10 @@ static void i915_enable_asle_pipestat(struct drm_device *dev)
 
        spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
 
-       i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_ENABLE);
+       i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
        if (INTEL_INFO(dev)->gen >= 4)
                i915_enable_pipestat(dev_priv, PIPE_A,
-                                    PIPE_LEGACY_BLC_EVENT_ENABLE);
+                                    PIPE_LEGACY_BLC_EVENT_STATUS);
 
        spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 }
@@ -915,6 +1015,11 @@ static void i915_hotplug_work_func(struct work_struct *work)
                drm_kms_helper_hotplug_event(dev);
 }
 
+static void intel_hpd_irq_uninstall(struct drm_i915_private *dev_priv)
+{
+       del_timer_sync(&dev_priv->hotplug_reenable_timer);
+}
+
 static void ironlake_rps_change_irq_handler(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
@@ -966,6 +1071,43 @@ static void notify_ring(struct drm_device *dev,
        i915_queue_hangcheck(dev);
 }
 
+void gen6_set_pm_mask(struct drm_i915_private *dev_priv,
+                            u32 pm_iir, int new_delay)
+{
+       if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
+               if (new_delay >= dev_priv->rps.max_delay) {
+                       /* Mask UP THRESHOLD Interrupts */
+                       I915_WRITE(GEN6_PMINTRMSK,
+                                  I915_READ(GEN6_PMINTRMSK) |
+                                  GEN6_PM_RP_UP_THRESHOLD);
+                       dev_priv->rps.rp_up_masked = true;
+               }
+               if (dev_priv->rps.rp_down_masked) {
+                       /* UnMask DOWN THRESHOLD Interrupts */
+                       I915_WRITE(GEN6_PMINTRMSK,
+                                  I915_READ(GEN6_PMINTRMSK) &
+                                  ~GEN6_PM_RP_DOWN_THRESHOLD);
+                       dev_priv->rps.rp_down_masked = false;
+               }
+       } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
+               if (new_delay <= dev_priv->rps.min_delay) {
+                       /* Mask DOWN THRESHOLD Interrupts */
+                       I915_WRITE(GEN6_PMINTRMSK,
+                                  I915_READ(GEN6_PMINTRMSK) |
+                                  GEN6_PM_RP_DOWN_THRESHOLD);
+                       dev_priv->rps.rp_down_masked = true;
+               }
+
+               if (dev_priv->rps.rp_up_masked) {
+                       /* UnMask UP THRESHOLD Interrupts */
+                       I915_WRITE(GEN6_PMINTRMSK,
+                                  I915_READ(GEN6_PMINTRMSK) &
+                                  ~GEN6_PM_RP_UP_THRESHOLD);
+                       dev_priv->rps.rp_up_masked = false;
+               }
+       }
+}
+
 static void gen6_pm_rps_work(struct work_struct *work)
 {
        drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
@@ -1023,6 +1165,8 @@ static void gen6_pm_rps_work(struct work_struct *work)
         */
        new_delay = clamp_t(int, new_delay,
                            dev_priv->rps.min_delay, dev_priv->rps.max_delay);
+
+       gen6_set_pm_mask(dev_priv, pm_iir, new_delay);
        dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_delay;
 
        if (IS_VALLEYVIEW(dev_priv->dev))
@@ -1164,8 +1308,8 @@ static void snb_gt_irq_handler(struct drm_device *dev,
        if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
                      GT_BSD_CS_ERROR_INTERRUPT |
                      GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) {
-               DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir);
-               i915_handle_error(dev, false);
+               i915_handle_error(dev, false, "GT error interrupt 0x%08x",
+                                 gt_iir);
        }
 
        if (gt_iir & GT_PARITY_ERROR(dev))
@@ -1236,6 +1380,9 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev,
        if (!hotplug_trigger)
                return;
 
+       DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
+                         hotplug_trigger);
+
        spin_lock(&dev_priv->irq_lock);
        for (i = 1; i < HPD_NUM_PINS; i++) {
 
@@ -1409,23 +1556,89 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
                        notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
 
                if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) {
-                       DRM_ERROR("VEBOX CS error interrupt 0x%08x\n", pm_iir);
-                       i915_handle_error(dev_priv->dev, false);
+                       i915_handle_error(dev_priv->dev, false,
+                                         "VEBOX CS error interrupt 0x%08x",
+                                         pm_iir);
                }
        }
 }
 
+static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       u32 pipe_stats[I915_MAX_PIPES] = { };
+       int pipe;
+
+       spin_lock(&dev_priv->irq_lock);
+       for_each_pipe(pipe) {
+               int reg;
+               u32 mask, iir_bit = 0;
+
+               /*
+                * PIPESTAT bits get signalled even when the interrupt is
+                * disabled with the mask bits, and some of the status bits do
+                * not generate interrupts at all (like the underrun bit). Hence
+                * we need to be careful that we only handle what we want to
+                * handle.
+                */
+               mask = 0;
+               if (__cpu_fifo_underrun_reporting_enabled(dev, pipe))
+                       mask |= PIPE_FIFO_UNDERRUN_STATUS;
+
+               switch (pipe) {
+               case PIPE_A:
+                       iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
+                       break;
+               case PIPE_B:
+                       iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
+                       break;
+               }
+               if (iir & iir_bit)
+                       mask |= dev_priv->pipestat_irq_mask[pipe];
+
+               if (!mask)
+                       continue;
+
+               reg = PIPESTAT(pipe);
+               mask |= PIPESTAT_INT_ENABLE_MASK;
+               pipe_stats[pipe] = I915_READ(reg) & mask;
+
+               /*
+                * Clear the PIPE*STAT regs before the IIR
+                */
+               if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS |
+                                       PIPESTAT_INT_STATUS_MASK))
+                       I915_WRITE(reg, pipe_stats[pipe]);
+       }
+       spin_unlock(&dev_priv->irq_lock);
+
+       for_each_pipe(pipe) {
+               if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
+                       drm_handle_vblank(dev, pipe);
+
+               if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) {
+                       intel_prepare_page_flip(dev, pipe);
+                       intel_finish_page_flip(dev, pipe);
+               }
+
+               if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
+                       i9xx_pipe_crc_irq_handler(dev, pipe);
+
+               if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
+                   intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
+                       DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
+       }
+
+       if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
+               gmbus_irq_handler(dev);
+}
+
 static irqreturn_t valleyview_irq_handler(int irq, void *arg)
 {
        struct drm_device *dev = (struct drm_device *) arg;
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        u32 iir, gt_iir, pm_iir;
        irqreturn_t ret = IRQ_NONE;
-       unsigned long irqflags;
-       int pipe;
-       u32 pipe_stats[I915_MAX_PIPES];
-
-       atomic_inc(&dev_priv->irq_received);
 
        while (true) {
                iir = I915_READ(VLV_IIR);
@@ -1439,44 +1652,13 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
 
                snb_gt_irq_handler(dev, dev_priv, gt_iir);
 
-               spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
-               for_each_pipe(pipe) {
-                       int reg = PIPESTAT(pipe);
-                       pipe_stats[pipe] = I915_READ(reg);
-
-                       /*
-                        * Clear the PIPE*STAT regs before the IIR
-                        */
-                       if (pipe_stats[pipe] & 0x8000ffff) {
-                               if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
-                                       DRM_DEBUG_DRIVER("pipe %c underrun\n",
-                                                        pipe_name(pipe));
-                               I915_WRITE(reg, pipe_stats[pipe]);
-                       }
-               }
-               spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
-
-               for_each_pipe(pipe) {
-                       if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
-                               drm_handle_vblank(dev, pipe);
-
-                       if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) {
-                               intel_prepare_page_flip(dev, pipe);
-                               intel_finish_page_flip(dev, pipe);
-                       }
-
-                       if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
-                               i9xx_pipe_crc_irq_handler(dev, pipe);
-               }
+               valleyview_pipestat_irq_handler(dev, iir);
 
                /* Consume port.  Then clear IIR or we'll miss events */
                if (iir & I915_DISPLAY_PORT_INTERRUPT) {
                        u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
                        u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
 
-                       DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
-                                        hotplug_status);
-
                        intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915);
 
                        if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
@@ -1486,8 +1668,6 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
                        I915_READ(PORT_HOTPLUG_STAT);
                }
 
-               if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
-                       gmbus_irq_handler(dev);
 
                if (pm_iir)
                        gen6_rps_irq_handler(dev_priv, pm_iir);
@@ -1546,12 +1726,12 @@ static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
        if (pch_iir & SDE_TRANSA_FIFO_UNDER)
                if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
                                                          false))
-                       DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n");
+                       DRM_ERROR("PCH transcoder A FIFO underrun\n");
 
        if (pch_iir & SDE_TRANSB_FIFO_UNDER)
                if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
                                                          false))
-                       DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n");
+                       DRM_ERROR("PCH transcoder B FIFO underrun\n");
 }
 
 static void ivb_err_int_handler(struct drm_device *dev)
@@ -1567,8 +1747,8 @@ static void ivb_err_int_handler(struct drm_device *dev)
                if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) {
                        if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
                                                                  false))
-                               DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n",
-                                                pipe_name(pipe));
+                               DRM_ERROR("Pipe %c FIFO underrun\n",
+                                         pipe_name(pipe));
                }
 
                if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
@@ -1593,17 +1773,17 @@ static void cpt_serr_int_handler(struct drm_device *dev)
        if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
                if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
                                                          false))
-                       DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n");
+                       DRM_ERROR("PCH transcoder A FIFO underrun\n");
 
        if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
                if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
                                                          false))
-                       DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n");
+                       DRM_ERROR("PCH transcoder B FIFO underrun\n");
 
        if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
                if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_C,
                                                          false))
-                       DRM_DEBUG_DRIVER("PCH transcoder C FIFO underrun\n");
+                       DRM_ERROR("PCH transcoder C FIFO underrun\n");
 
        I915_WRITE(SERR_INT, serr_int);
 }
@@ -1665,8 +1845,8 @@ static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
 
                if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
                        if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
-                               DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n",
-                                                pipe_name(pipe));
+                               DRM_ERROR("Pipe %c FIFO underrun\n",
+                                         pipe_name(pipe));
 
                if (de_iir & DE_PIPE_CRC_DONE(pipe))
                        i9xx_pipe_crc_irq_handler(dev, pipe);
@@ -1698,7 +1878,7 @@ static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
 static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       enum pipe i;
+       enum pipe pipe;
 
        if (de_iir & DE_ERR_INT_IVB)
                ivb_err_int_handler(dev);
@@ -1709,14 +1889,14 @@ static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
        if (de_iir & DE_GSE_IVB)
                intel_opregion_asle_intr(dev);
 
-       for_each_pipe(i) {
-               if (de_iir & (DE_PIPE_VBLANK_IVB(i)))
-                       drm_handle_vblank(dev, i);
+       for_each_pipe(pipe) {
+               if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)))
+                       drm_handle_vblank(dev, pipe);
 
                /* plane/pipes map 1:1 on ilk+ */
-               if (de_iir & DE_PLANE_FLIP_DONE_IVB(i)) {
-                       intel_prepare_page_flip(dev, i);
-                       intel_finish_page_flip_plane(dev, i);
+               if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) {
+                       intel_prepare_page_flip(dev, pipe);
+                       intel_finish_page_flip_plane(dev, pipe);
                }
        }
 
@@ -1738,8 +1918,6 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
        u32 de_iir, gt_iir, de_ier, sde_ier = 0;
        irqreturn_t ret = IRQ_NONE;
 
-       atomic_inc(&dev_priv->irq_received);
-
        /* We get interrupts on unclaimed registers, so check for this before we
         * do any I915_{READ,WRITE}. */
        intel_uncore_check_errors(dev);
@@ -1808,8 +1986,6 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
        uint32_t tmp = 0;
        enum pipe pipe;
 
-       atomic_inc(&dev_priv->irq_received);
-
        master_ctl = I915_READ(GEN8_MASTER_IRQ);
        master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
        if (!master_ctl)
@@ -1871,8 +2047,8 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
                if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) {
                        if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
                                                                  false))
-                               DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n",
-                                                pipe_name(pipe));
+                               DRM_ERROR("Pipe %c FIFO underrun\n",
+                                         pipe_name(pipe));
                }
 
                if (pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS) {
@@ -2114,11 +2290,18 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
  * so userspace knows something bad happened (should trigger collection
  * of a ring dump etc.).
  */
-void i915_handle_error(struct drm_device *dev, bool wedged)
+void i915_handle_error(struct drm_device *dev, bool wedged,
+                      const char *fmt, ...)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
+       va_list args;
+       char error_msg[80];
 
-       i915_capture_error_state(dev);
+       va_start(args, fmt);
+       vscnprintf(error_msg, sizeof(error_msg), fmt, args);
+       va_end(args);
+
+       i915_capture_error_state(dev, wedged, error_msg);
        i915_report_and_clear_eir(dev);
 
        if (wedged) {
@@ -2210,13 +2393,13 @@ static int i915_enable_vblank(struct drm_device *dev, int pipe)
        spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
        if (INTEL_INFO(dev)->gen >= 4)
                i915_enable_pipestat(dev_priv, pipe,
-                                    PIPE_START_VBLANK_INTERRUPT_ENABLE);
+                                    PIPE_START_VBLANK_INTERRUPT_STATUS);
        else
                i915_enable_pipestat(dev_priv, pipe,
-                                    PIPE_VBLANK_INTERRUPT_ENABLE);
+                                    PIPE_VBLANK_INTERRUPT_STATUS);
 
        /* maintain vblank delivery even in deep C-states */
-       if (dev_priv->info->gen == 3)
+       if (INTEL_INFO(dev)->gen == 3)
                I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS));
        spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 
@@ -2244,20 +2427,13 @@ static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        unsigned long irqflags;
-       u32 imr;
 
        if (!i915_pipe_enabled(dev, pipe))
                return -EINVAL;
 
        spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
-       imr = I915_READ(VLV_IMR);
-       if (pipe == PIPE_A)
-               imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
-       else
-               imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
-       I915_WRITE(VLV_IMR, imr);
        i915_enable_pipestat(dev_priv, pipe,
-                            PIPE_START_VBLANK_INTERRUPT_ENABLE);
+                            PIPE_START_VBLANK_INTERRUPT_STATUS);
        spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 
        return 0;
@@ -2288,12 +2464,12 @@ static void i915_disable_vblank(struct drm_device *dev, int pipe)
        unsigned long irqflags;
 
        spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
-       if (dev_priv->info->gen == 3)
+       if (INTEL_INFO(dev)->gen == 3)
                I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS));
 
        i915_disable_pipestat(dev_priv, pipe,
-                             PIPE_VBLANK_INTERRUPT_ENABLE |
-                             PIPE_START_VBLANK_INTERRUPT_ENABLE);
+                             PIPE_VBLANK_INTERRUPT_STATUS |
+                             PIPE_START_VBLANK_INTERRUPT_STATUS);
        spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 }
 
@@ -2313,17 +2489,10 @@ static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        unsigned long irqflags;
-       u32 imr;
 
        spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
        i915_disable_pipestat(dev_priv, pipe,
-                             PIPE_START_VBLANK_INTERRUPT_ENABLE);
-       imr = I915_READ(VLV_IMR);
-       if (pipe == PIPE_A)
-               imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
-       else
-               imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
-       I915_WRITE(VLV_IMR, imr);
+                             PIPE_START_VBLANK_INTERRUPT_STATUS);
        spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 }
 
@@ -2435,9 +2604,9 @@ ring_stuck(struct intel_ring_buffer *ring, u32 acthd)
         */
        tmp = I915_READ_CTL(ring);
        if (tmp & RING_WAIT) {
-               DRM_ERROR("Kicking stuck wait on %s\n",
-                         ring->name);
-               i915_handle_error(dev, false);
+               i915_handle_error(dev, false,
+                                 "Kicking stuck wait on %s",
+                                 ring->name);
                I915_WRITE_CTL(ring, tmp);
                return HANGCHECK_KICK;
        }
@@ -2447,9 +2616,9 @@ ring_stuck(struct intel_ring_buffer *ring, u32 acthd)
                default:
                        return HANGCHECK_HUNG;
                case 1:
-                       DRM_ERROR("Kicking stuck semaphore on %s\n",
-                                 ring->name);
-                       i915_handle_error(dev, false);
+                       i915_handle_error(dev, false,
+                                         "Kicking stuck semaphore on %s",
+                                         ring->name);
                        I915_WRITE_CTL(ring, tmp);
                        return HANGCHECK_KICK;
                case 0:
@@ -2479,9 +2648,8 @@ static void i915_hangcheck_elapsed(unsigned long data)
 #define BUSY 1
 #define KICK 5
 #define HUNG 20
-#define FIRE 30
 
-       if (!i915_enable_hangcheck)
+       if (!i915.enable_hangcheck)
                return;
 
        for_each_ring(ring, dev_priv, i) {
@@ -2563,7 +2731,7 @@ static void i915_hangcheck_elapsed(unsigned long data)
        }
 
        for_each_ring(ring, dev_priv, i) {
-               if (ring->hangcheck.score > FIRE) {
+               if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
                        DRM_INFO("%s on %s\n",
                                 stuck[i] ? "stuck" : "no progress",
                                 ring->name);
@@ -2572,7 +2740,7 @@ static void i915_hangcheck_elapsed(unsigned long data)
        }
 
        if (rings_hung)
-               return i915_handle_error(dev, true);
+               return i915_handle_error(dev, true, "Ring hung");
 
        if (busy_count)
                /* Reset timer case chip hangs without another request
@@ -2583,7 +2751,7 @@ static void i915_hangcheck_elapsed(unsigned long data)
 void i915_queue_hangcheck(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       if (!i915_enable_hangcheck)
+       if (!i915.enable_hangcheck)
                return;
 
        mod_timer(&dev_priv->gpu_error.hangcheck_timer,
@@ -2632,8 +2800,6 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 
-       atomic_set(&dev_priv->irq_received, 0);
-
        I915_WRITE(HWSTAM, 0xeffe);
 
        I915_WRITE(DEIMR, 0xffffffff);
@@ -2650,8 +2816,6 @@ static void valleyview_irq_preinstall(struct drm_device *dev)
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        int pipe;
 
-       atomic_set(&dev_priv->irq_received, 0);
-
        /* VLV magic */
        I915_WRITE(VLV_IMR, 0);
        I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
@@ -2681,8 +2845,6 @@ static void gen8_irq_preinstall(struct drm_device *dev)
        struct drm_i915_private *dev_priv = dev->dev_private;
        int pipe;
 
-       atomic_set(&dev_priv->irq_received, 0);
-
        I915_WRITE(GEN8_MASTER_IRQ, 0);
        POSTING_READ(GEN8_MASTER_IRQ);
 
@@ -2874,44 +3036,113 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
        return 0;
 }
 
+static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv)
+{
+       u32 pipestat_mask;
+       u32 iir_mask;
+
+       pipestat_mask = PIPESTAT_INT_STATUS_MASK |
+                       PIPE_FIFO_UNDERRUN_STATUS;
+
+       I915_WRITE(PIPESTAT(PIPE_A), pipestat_mask);
+       I915_WRITE(PIPESTAT(PIPE_B), pipestat_mask);
+       POSTING_READ(PIPESTAT(PIPE_A));
+
+       pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
+                       PIPE_CRC_DONE_INTERRUPT_STATUS;
+
+       i915_enable_pipestat(dev_priv, PIPE_A, pipestat_mask |
+                                              PIPE_GMBUS_INTERRUPT_STATUS);
+       i915_enable_pipestat(dev_priv, PIPE_B, pipestat_mask);
+
+       iir_mask = I915_DISPLAY_PORT_INTERRUPT |
+                  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
+                  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
+       dev_priv->irq_mask &= ~iir_mask;
+
+       I915_WRITE(VLV_IIR, iir_mask);
+       I915_WRITE(VLV_IIR, iir_mask);
+       I915_WRITE(VLV_IMR, dev_priv->irq_mask);
+       I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
+       POSTING_READ(VLV_IER);
+}
+
+static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv)
+{
+       u32 pipestat_mask;
+       u32 iir_mask;
+
+       iir_mask = I915_DISPLAY_PORT_INTERRUPT |
+                  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
+                  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
+
+       dev_priv->irq_mask |= iir_mask;
+       I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
+       I915_WRITE(VLV_IMR, dev_priv->irq_mask);
+       I915_WRITE(VLV_IIR, iir_mask);
+       I915_WRITE(VLV_IIR, iir_mask);
+       POSTING_READ(VLV_IIR);
+
+       pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
+                       PIPE_CRC_DONE_INTERRUPT_STATUS;
+
+       i915_disable_pipestat(dev_priv, PIPE_A, pipestat_mask |
+                                               PIPE_GMBUS_INTERRUPT_STATUS);
+       i915_disable_pipestat(dev_priv, PIPE_B, pipestat_mask);
+
+       pipestat_mask = PIPESTAT_INT_STATUS_MASK |
+                       PIPE_FIFO_UNDERRUN_STATUS;
+       I915_WRITE(PIPESTAT(PIPE_A), pipestat_mask);
+       I915_WRITE(PIPESTAT(PIPE_B), pipestat_mask);
+       POSTING_READ(PIPESTAT(PIPE_A));
+}
+
+void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
+{
+       assert_spin_locked(&dev_priv->irq_lock);
+
+       if (dev_priv->display_irqs_enabled)
+               return;
+
+       dev_priv->display_irqs_enabled = true;
+
+       if (dev_priv->dev->irq_enabled)
+               valleyview_display_irqs_install(dev_priv);
+}
+
+void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
+{
+       assert_spin_locked(&dev_priv->irq_lock);
+
+       if (!dev_priv->display_irqs_enabled)
+               return;
+
+       dev_priv->display_irqs_enabled = false;
+
+       if (dev_priv->dev->irq_enabled)
+               valleyview_display_irqs_uninstall(dev_priv);
+}
+
 static int valleyview_irq_postinstall(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-       u32 enable_mask;
-       u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV |
-               PIPE_CRC_DONE_ENABLE;
        unsigned long irqflags;
 
-       enable_mask = I915_DISPLAY_PORT_INTERRUPT;
-       enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
-               I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
-               I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
-               I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
-
-       /*
-        *Leave vblank interrupts masked initially.  enable/disable will
-        * toggle them based on usage.
-        */
-       dev_priv->irq_mask = (~enable_mask) |
-               I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
-               I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
+       dev_priv->irq_mask = ~0;
 
        I915_WRITE(PORT_HOTPLUG_EN, 0);
        POSTING_READ(PORT_HOTPLUG_EN);
 
        I915_WRITE(VLV_IMR, dev_priv->irq_mask);
-       I915_WRITE(VLV_IER, enable_mask);
+       I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
        I915_WRITE(VLV_IIR, 0xffffffff);
-       I915_WRITE(PIPESTAT(0), 0xffff);
-       I915_WRITE(PIPESTAT(1), 0xffff);
        POSTING_READ(VLV_IER);
 
        /* Interrupt setup is already guaranteed to be single-threaded, this is
         * just to make the assert_spin_locked check happy. */
        spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
-       i915_enable_pipestat(dev_priv, PIPE_A, pipestat_enable);
-       i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_EVENT_ENABLE);
-       i915_enable_pipestat(dev_priv, PIPE_B, pipestat_enable);
+       if (dev_priv->display_irqs_enabled)
+               valleyview_display_irqs_install(dev_priv);
        spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 
        I915_WRITE(VLV_IIR, 0xffffffff);
@@ -3007,8 +3238,6 @@ static void gen8_irq_uninstall(struct drm_device *dev)
        if (!dev_priv)
                return;
 
-       atomic_set(&dev_priv->irq_received, 0);
-
        I915_WRITE(GEN8_MASTER_IRQ, 0);
 
 #define GEN8_IRQ_FINI_NDX(type, which) do { \
@@ -3044,12 +3273,13 @@ static void gen8_irq_uninstall(struct drm_device *dev)
 static void valleyview_irq_uninstall(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+       unsigned long irqflags;
        int pipe;
 
        if (!dev_priv)
                return;
 
-       del_timer_sync(&dev_priv->hotplug_reenable_timer);
+       intel_hpd_irq_uninstall(dev_priv);
 
        for_each_pipe(pipe)
                I915_WRITE(PIPESTAT(pipe), 0xffff);
@@ -3057,8 +3287,14 @@ static void valleyview_irq_uninstall(struct drm_device *dev)
        I915_WRITE(HWSTAM, 0xffffffff);
        I915_WRITE(PORT_HOTPLUG_EN, 0);
        I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
-       for_each_pipe(pipe)
-               I915_WRITE(PIPESTAT(pipe), 0xffff);
+
+       spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+       if (dev_priv->display_irqs_enabled)
+               valleyview_display_irqs_uninstall(dev_priv);
+       spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+
+       dev_priv->irq_mask = 0;
+
        I915_WRITE(VLV_IIR, 0xffffffff);
        I915_WRITE(VLV_IMR, 0xffffffff);
        I915_WRITE(VLV_IER, 0x0);
@@ -3072,7 +3308,7 @@ static void ironlake_irq_uninstall(struct drm_device *dev)
        if (!dev_priv)
                return;
 
-       del_timer_sync(&dev_priv->hotplug_reenable_timer);
+       intel_hpd_irq_uninstall(dev_priv);
 
        I915_WRITE(HWSTAM, 0xffffffff);
 
@@ -3101,8 +3337,6 @@ static void i8xx_irq_preinstall(struct drm_device * dev)
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        int pipe;
 
-       atomic_set(&dev_priv->irq_received, 0);
-
        for_each_pipe(pipe)
                I915_WRITE(PIPESTAT(pipe), 0);
        I915_WRITE16(IMR, 0xffff);
@@ -3137,8 +3371,8 @@ static int i8xx_irq_postinstall(struct drm_device *dev)
        /* Interrupt setup is already guaranteed to be single-threaded, this is
         * just to make the assert_spin_locked check happy. */
        spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
-       i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_ENABLE);
-       i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_ENABLE);
+       i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
+       i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
        spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 
        return 0;
@@ -3187,8 +3421,6 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
                I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
                I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
 
-       atomic_inc(&dev_priv->irq_received);
-
        iir = I915_READ16(IIR);
        if (iir == 0)
                return IRQ_NONE;
@@ -3201,7 +3433,9 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
                 */
                spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
                if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
-                       i915_handle_error(dev, false);
+                       i915_handle_error(dev, false,
+                                         "Command parser error, iir 0x%08x",
+                                         iir);
 
                for_each_pipe(pipe) {
                        int reg = PIPESTAT(pipe);
@@ -3210,12 +3444,8 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
                        /*
                         * Clear the PIPE*STAT regs before the IIR
                         */
-                       if (pipe_stats[pipe] & 0x8000ffff) {
-                               if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
-                                       DRM_DEBUG_DRIVER("pipe %c underrun\n",
-                                                        pipe_name(pipe));
+                       if (pipe_stats[pipe] & 0x8000ffff)
                                I915_WRITE(reg, pipe_stats[pipe]);
-                       }
                }
                spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 
@@ -3238,6 +3468,10 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
 
                        if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
                                i9xx_pipe_crc_irq_handler(dev, pipe);
+
+                       if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
+                           intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
+                               DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
                }
 
                iir = new_iir;
@@ -3266,8 +3500,6 @@ static void i915_irq_preinstall(struct drm_device * dev)
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        int pipe;
 
-       atomic_set(&dev_priv->irq_received, 0);
-
        if (I915_HAS_HOTPLUG(dev)) {
                I915_WRITE(PORT_HOTPLUG_EN, 0);
                I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
@@ -3324,8 +3556,8 @@ static int i915_irq_postinstall(struct drm_device *dev)
        /* Interrupt setup is already guaranteed to be single-threaded, this is
         * just to make the assert_spin_locked check happy. */
        spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
-       i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_ENABLE);
-       i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_ENABLE);
+       i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
+       i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
        spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 
        return 0;
@@ -3373,8 +3605,6 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
                I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
        int pipe, ret = IRQ_NONE;
 
-       atomic_inc(&dev_priv->irq_received);
-
        iir = I915_READ(IIR);
        do {
                bool irq_received = (iir & ~flip_mask) != 0;
@@ -3387,7 +3617,9 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
                 */
                spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
                if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
-                       i915_handle_error(dev, false);
+                       i915_handle_error(dev, false,
+                                         "Command parser error, iir 0x%08x",
+                                         iir);
 
                for_each_pipe(pipe) {
                        int reg = PIPESTAT(pipe);
@@ -3395,9 +3627,6 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
 
                        /* Clear the PIPE*STAT regs before the IIR */
                        if (pipe_stats[pipe] & 0x8000ffff) {
-                               if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
-                                       DRM_DEBUG_DRIVER("pipe %c underrun\n",
-                                                        pipe_name(pipe));
                                I915_WRITE(reg, pipe_stats[pipe]);
                                irq_received = true;
                        }
@@ -3413,9 +3642,6 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
                        u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
                        u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
 
-                       DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
-                                 hotplug_status);
-
                        intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915);
 
                        I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
@@ -3442,6 +3668,10 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
 
                        if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
                                i9xx_pipe_crc_irq_handler(dev, pipe);
+
+                       if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
+                           intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
+                               DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
                }
 
                if (blc_event || (iir & I915_ASLE_INTERRUPT))
@@ -3476,7 +3706,7 @@ static void i915_irq_uninstall(struct drm_device * dev)
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        int pipe;
 
-       del_timer_sync(&dev_priv->hotplug_reenable_timer);
+       intel_hpd_irq_uninstall(dev_priv);
 
        if (I915_HAS_HOTPLUG(dev)) {
                I915_WRITE(PORT_HOTPLUG_EN, 0);
@@ -3500,8 +3730,6 @@ static void i965_irq_preinstall(struct drm_device * dev)
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        int pipe;
 
-       atomic_set(&dev_priv->irq_received, 0);
-
        I915_WRITE(PORT_HOTPLUG_EN, 0);
        I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
 
@@ -3540,9 +3768,9 @@ static int i965_irq_postinstall(struct drm_device *dev)
        /* Interrupt setup is already guaranteed to be single-threaded, this is
         * just to make the assert_spin_locked check happy. */
        spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
-       i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_EVENT_ENABLE);
-       i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_ENABLE);
-       i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_ENABLE);
+       i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
+       i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
+       i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
        spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 
        /*
@@ -3610,21 +3838,17 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
        u32 iir, new_iir;
        u32 pipe_stats[I915_MAX_PIPES];
        unsigned long irqflags;
-       int irq_received;
        int ret = IRQ_NONE, pipe;
        u32 flip_mask =
                I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
                I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
 
-       atomic_inc(&dev_priv->irq_received);
-
        iir = I915_READ(IIR);
 
        for (;;) {
+               bool irq_received = (iir & ~flip_mask) != 0;
                bool blc_event = false;
 
-               irq_received = (iir & ~flip_mask) != 0;
-
                /* Can't rely on pipestat interrupt bit in iir as it might
                 * have been cleared after the pipestat interrupt was received.
                 * It doesn't set the bit in iir again, but it still produces
@@ -3632,7 +3856,9 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
                 */
                spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
                if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
-                       i915_handle_error(dev, false);
+                       i915_handle_error(dev, false,
+                                         "Command parser error, iir 0x%08x",
+                                         iir);
 
                for_each_pipe(pipe) {
                        int reg = PIPESTAT(pipe);
@@ -3642,11 +3868,8 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
                         * Clear the PIPE*STAT regs before the IIR
                         */
                        if (pipe_stats[pipe] & 0x8000ffff) {
-                               if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
-                                       DRM_DEBUG_DRIVER("pipe %c underrun\n",
-                                                        pipe_name(pipe));
                                I915_WRITE(reg, pipe_stats[pipe]);
-                               irq_received = 1;
+                               irq_received = true;
                        }
                }
                spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
@@ -3663,9 +3886,6 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
                                                                  HOTPLUG_INT_STATUS_G4X :
                                                                  HOTPLUG_INT_STATUS_I915);
 
-                       DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
-                                 hotplug_status);
-
                        intel_hpd_irq_handler(dev, hotplug_trigger,
                                              IS_G4X(dev) ? hpd_status_g4x : hpd_status_i915);
 
@@ -3695,8 +3915,11 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
 
                        if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
                                i9xx_pipe_crc_irq_handler(dev, pipe);
-               }
 
+                       if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
+                           intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
+                               DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
+               }
 
                if (blc_event || (iir & I915_ASLE_INTERRUPT))
                        intel_opregion_asle_intr(dev);
@@ -3735,7 +3958,7 @@ static void i965_irq_uninstall(struct drm_device * dev)
        if (!dev_priv)
                return;
 
-       del_timer_sync(&dev_priv->hotplug_reenable_timer);
+       intel_hpd_irq_uninstall(dev_priv);
 
        I915_WRITE(PORT_HOTPLUG_EN, 0);
        I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
@@ -3752,7 +3975,7 @@ static void i965_irq_uninstall(struct drm_device * dev)
        I915_WRITE(IIR, I915_READ(IIR));
 }
 
-static void i915_reenable_hotplug_timer_func(unsigned long data)
+static void intel_hpd_irq_reenable(unsigned long data)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *)data;
        struct drm_device *dev = dev_priv->dev;
@@ -3799,7 +4022,7 @@ void intel_irq_init(struct drm_device *dev)
        setup_timer(&dev_priv->gpu_error.hangcheck_timer,
                    i915_hangcheck_elapsed,
                    (unsigned long) dev);
-       setup_timer(&dev_priv->hotplug_reenable_timer, i915_reenable_hotplug_timer_func,
+       setup_timer(&dev_priv->hotplug_reenable_timer, intel_hpd_irq_reenable,
                    (unsigned long) dev_priv);
 
        pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
new file mode 100644 (file)
index 0000000..a66ffb6
--- /dev/null
@@ -0,0 +1,164 @@
+/*
+ * Copyright Â© 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "i915_drv.h"
+
+struct i915_params i915 __read_mostly = {
+       .modeset = -1,
+       .panel_ignore_lid = 1,
+       .powersave = 1,
+       .semaphores = -1,
+       .lvds_downclock = 0,
+       .lvds_channel_mode = 0,
+       .panel_use_ssc = -1,
+       .vbt_sdvo_panel_type = -1,
+       .enable_rc6 = -1,
+       .enable_fbc = -1,
+       .enable_hangcheck = true,
+       .enable_ppgtt = -1,
+       .enable_psr = 0,
+       .preliminary_hw_support = IS_ENABLED(CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT),
+       .disable_power_well = 1,
+       .enable_ips = 1,
+       .fastboot = 0,
+       .enable_pc8 = 1,
+       .pc8_timeout = 5000,
+       .prefault_disable = 0,
+       .reset = true,
+       .invert_brightness = 0,
+       .disable_display = 0,
+       .enable_cmd_parser = 0,
+};
+
+module_param_named(modeset, i915.modeset, int, 0400);
+MODULE_PARM_DESC(modeset,
+       "Use kernel modesetting [KMS] (0=DRM_I915_KMS from .config, "
+       "1=on, -1=force vga console preference [default])");
+
+module_param_named(panel_ignore_lid, i915.panel_ignore_lid, int, 0600);
+MODULE_PARM_DESC(panel_ignore_lid,
+       "Override lid status (0=autodetect, 1=autodetect disabled [default], "
+       "-1=force lid closed, -2=force lid open)");
+
+module_param_named(powersave, i915.powersave, int, 0600);
+MODULE_PARM_DESC(powersave,
+       "Enable powersavings, fbc, downclocking, etc. (default: true)");
+
+module_param_named(semaphores, i915.semaphores, int, 0400);
+MODULE_PARM_DESC(semaphores,
+       "Use semaphores for inter-ring sync "
+       "(default: -1 (use per-chip defaults))");
+
+module_param_named(enable_rc6, i915.enable_rc6, int, 0400);
+MODULE_PARM_DESC(enable_rc6,
+       "Enable power-saving render C-state 6. "
+       "Different stages can be selected via bitmask values "
+       "(0 = disable; 1 = enable rc6; 2 = enable deep rc6; 4 = enable deepest rc6). "
+       "For example, 3 would enable rc6 and deep rc6, and 7 would enable everything. "
+       "default: -1 (use per-chip default)");
+
+module_param_named(enable_fbc, i915.enable_fbc, int, 0600);
+MODULE_PARM_DESC(enable_fbc,
+       "Enable frame buffer compression for power savings "
+       "(default: -1 (use per-chip default))");
+
+module_param_named(lvds_downclock, i915.lvds_downclock, int, 0400);
+MODULE_PARM_DESC(lvds_downclock,
+       "Use panel (LVDS/eDP) downclocking for power savings "
+       "(default: false)");
+
+module_param_named(lvds_channel_mode, i915.lvds_channel_mode, int, 0600);
+MODULE_PARM_DESC(lvds_channel_mode,
+        "Specify LVDS channel mode "
+        "(0=probe BIOS [default], 1=single-channel, 2=dual-channel)");
+
+module_param_named(lvds_use_ssc, i915.panel_use_ssc, int, 0600);
+MODULE_PARM_DESC(lvds_use_ssc,
+       "Use Spread Spectrum Clock with panels [LVDS/eDP] "
+       "(default: auto from VBT)");
+
+module_param_named(vbt_sdvo_panel_type, i915.vbt_sdvo_panel_type, int, 0600);
+MODULE_PARM_DESC(vbt_sdvo_panel_type,
+       "Override/Ignore selection of SDVO panel mode in the VBT "
+       "(-2=ignore, -1=auto [default], index in VBT BIOS table)");
+
+module_param_named(reset, i915.reset, bool, 0600);
+MODULE_PARM_DESC(reset, "Attempt GPU resets (default: true)");
+
+module_param_named(enable_hangcheck, i915.enable_hangcheck, bool, 0644);
+MODULE_PARM_DESC(enable_hangcheck,
+       "Periodically check GPU activity for detecting hangs. "
+       "WARNING: Disabling this can cause system wide hangs. "
+       "(default: true)");
+
+module_param_named(enable_ppgtt, i915.enable_ppgtt, int, 0400);
+MODULE_PARM_DESC(enable_ppgtt,
+       "Override PPGTT usage. "
+       "(-1=auto [default], 0=disabled, 1=aliasing, 2=full)");
+
+module_param_named(enable_psr, i915.enable_psr, int, 0600);
+MODULE_PARM_DESC(enable_psr, "Enable PSR (default: false)");
+
+module_param_named(preliminary_hw_support, i915.preliminary_hw_support, int, 0600);
+MODULE_PARM_DESC(preliminary_hw_support,
+       "Enable preliminary hardware support.");
+
+module_param_named(disable_power_well, i915.disable_power_well, int, 0600);
+MODULE_PARM_DESC(disable_power_well,
+       "Disable the power well when possible (default: true)");
+
+module_param_named(enable_ips, i915.enable_ips, int, 0600);
+MODULE_PARM_DESC(enable_ips, "Enable IPS (default: true)");
+
+module_param_named(fastboot, i915.fastboot, bool, 0600);
+MODULE_PARM_DESC(fastboot,
+       "Try to skip unnecessary mode sets at boot time (default: false)");
+
+module_param_named(enable_pc8, i915.enable_pc8, int, 0600);
+MODULE_PARM_DESC(enable_pc8,
+       "Enable support for low power package C states (PC8+) (default: true)");
+
+module_param_named(pc8_timeout, i915.pc8_timeout, int, 0600);
+MODULE_PARM_DESC(pc8_timeout,
+       "Number of msecs of idleness required to enter PC8+ (default: 5000)");
+
+module_param_named(prefault_disable, i915.prefault_disable, bool, 0600);
+MODULE_PARM_DESC(prefault_disable,
+       "Disable page prefaulting for pread/pwrite/reloc (default:false). "
+       "For developers only.");
+
+module_param_named(invert_brightness, i915.invert_brightness, int, 0600);
+MODULE_PARM_DESC(invert_brightness,
+       "Invert backlight brightness "
+       "(-1 force normal, 0 machine defaults, 1 force inversion), please "
+       "report PCI device ID, subsystem vendor and subsystem device ID "
+       "to dri-devel@lists.freedesktop.org, if your machine needs it. "
+       "It will then be included in an upcoming module version.");
+
+module_param_named(disable_display, i915.disable_display, bool, 0600);
+MODULE_PARM_DESC(disable_display, "Disable display (default: false)");
+
+module_param_named(enable_cmd_parser, i915.enable_cmd_parser, int, 0600);
+MODULE_PARM_DESC(enable_cmd_parser,
+                "Enable command parsing (1=enabled, 0=disabled [default])");
index a48b7cad6f1135c29742f39cacf23e359f92faf4..146609ab42bb982c1510a28f7e1a432ff53474e2 100644 (file)
@@ -26,7 +26,6 @@
 #define _I915_REG_H_
 
 #define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a)))
-#define _PIPE_INC(pipe, base, inc) ((base) + (pipe)*(inc))
 #define _TRANSCODER(tran, a, b) ((a) + (tran)*((b)-(a)))
 
 #define _PORT(port, a, b) ((a) + (port)*((b)-(a)))
@@ -73,7 +72,8 @@
 #define   I915_GC_RENDER_CLOCK_166_MHZ (0 << 0)
 #define   I915_GC_RENDER_CLOCK_200_MHZ (1 << 0)
 #define   I915_GC_RENDER_CLOCK_333_MHZ (4 << 0)
-#define LBB    0xf4
+#define PCI_LBPC 0xf4 /* legacy/combination backlight modes, also called LBB */
+
 
 /* Graphics reset regs */
 #define I965_GDRST 0xc0 /* PCI config register */
 #define VGA_CR_INDEX_CGA 0x3d4
 #define VGA_CR_DATA_CGA 0x3d5
 
+/*
+ * Instruction field definitions used by the command parser
+ */
+#define INSTR_CLIENT_SHIFT      29
+#define INSTR_CLIENT_MASK       0xE0000000
+#define   INSTR_MI_CLIENT       0x0
+#define   INSTR_BC_CLIENT       0x2
+#define   INSTR_RC_CLIENT       0x3
+#define INSTR_SUBCLIENT_SHIFT   27
+#define INSTR_SUBCLIENT_MASK    0x18000000
+#define   INSTR_MEDIA_SUBCLIENT 0x2
+
 /*
  * Memory interface instructions used by the kernel
  */
 #define   DSPFREQSTAT_MASK                     (0x3 << DSPFREQSTAT_SHIFT)
 #define   DSPFREQGUAR_SHIFT                    14
 #define   DSPFREQGUAR_MASK                     (0x3 << DSPFREQGUAR_SHIFT)
+
+/* See the PUNIT HAS v0.8 for the below bits */
+enum punit_power_well {
+       PUNIT_POWER_WELL_RENDER                 = 0,
+       PUNIT_POWER_WELL_MEDIA                  = 1,
+       PUNIT_POWER_WELL_DISP2D                 = 3,
+       PUNIT_POWER_WELL_DPIO_CMN_BC            = 5,
+       PUNIT_POWER_WELL_DPIO_TX_B_LANES_01     = 6,
+       PUNIT_POWER_WELL_DPIO_TX_B_LANES_23     = 7,
+       PUNIT_POWER_WELL_DPIO_TX_C_LANES_01     = 8,
+       PUNIT_POWER_WELL_DPIO_TX_C_LANES_23     = 9,
+       PUNIT_POWER_WELL_DPIO_RX0               = 10,
+       PUNIT_POWER_WELL_DPIO_RX1               = 11,
+
+       PUNIT_POWER_WELL_NUM,
+};
+
 #define PUNIT_REG_PWRGT_CTRL                   0x60
 #define PUNIT_REG_PWRGT_STATUS                 0x61
-#define          PUNIT_CLK_GATE                        1
-#define          PUNIT_PWR_RESET                       2
-#define          PUNIT_PWR_GATE                        3
-#define          RENDER_PWRGT                          (PUNIT_PWR_GATE << 0)
-#define          MEDIA_PWRGT                           (PUNIT_PWR_GATE << 2)
-#define          DISP2D_PWRGT                          (PUNIT_PWR_GATE << 6)
+#define   PUNIT_PWRGT_MASK(power_well)         (3 << ((power_well) * 2))
+#define   PUNIT_PWRGT_PWR_ON(power_well)       (0 << ((power_well) * 2))
+#define   PUNIT_PWRGT_CLK_GATE(power_well)     (1 << ((power_well) * 2))
+#define   PUNIT_PWRGT_RESET(power_well)                (2 << ((power_well) * 2))
+#define   PUNIT_PWRGT_PWR_GATE(power_well)     (3 << ((power_well) * 2))
 
 #define PUNIT_REG_GPU_LFM                      0xd3
 #define PUNIT_REG_GPU_FREQ_REQ                 0xd4
 #define _3D_CHICKEN3   0x02090
 #define  _3D_CHICKEN_SF_DISABLE_OBJEND_CULL            (1 << 10)
 #define  _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL         (1 << 5)
-#define  _3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(x)      ((x)<<1)
+#define  _3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(x)      ((x)<<1) /* gen8+ */
+#define  _3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH  (1 << 1) /* gen6 */
 
 #define MI_MODE                0x0209c
 # define VS_TIMER_DISPATCH                             (1 << 6)
 # define ASYNC_FLIP_PERF_DISABLE                       (1 << 14)
 
 #define GEN6_GT_MODE   0x20d0
-#define   GEN6_GT_MODE_HI                              (1 << 9)
+#define GEN7_GT_MODE   0x7008
+#define   GEN6_WIZ_HASHING(hi, lo)                     (((hi) << 9) | ((lo) << 7))
+#define   GEN6_WIZ_HASHING_8x8                         GEN6_WIZ_HASHING(0, 0)
+#define   GEN6_WIZ_HASHING_8x4                         GEN6_WIZ_HASHING(0, 1)
+#define   GEN6_WIZ_HASHING_16x4                                GEN6_WIZ_HASHING(1, 0)
+#define   GEN6_WIZ_HASHING_MASK                                (GEN6_WIZ_HASHING(1, 1) << 16)
 #define   GEN6_TD_FOUR_ROW_DISPATCH_DISABLE            (1 << 5)
 
 #define GFX_MODE       0x02520
 #define   ECO_GATING_CX_ONLY   (1<<3)
 #define   ECO_FLIP_DONE                (1<<0)
 
+#define CACHE_MODE_0_GEN7      0x7000 /* IVB+ */
+#define   HIZ_RAW_STALL_OPT_DISABLE (1<<2)
 #define CACHE_MODE_1           0x7004 /* IVB+ */
 #define   PIXEL_SUBSPAN_COLLECT_OPT_DISABLE (1<<6)
 
 #define   GEN6_BLITTER_LOCK_SHIFT                      16
 #define   GEN6_BLITTER_FBC_NOTIFY                      (1<<3)
 
+#define GEN6_RC_SLEEP_PSMI_CONTROL     0x2050
+#define   GEN8_RC_SEMA_IDLE_MSG_DISABLE        (1 << 12)
+
 #define GEN6_BSD_SLEEP_PSMI_CONTROL    0x12050
 #define   GEN6_BSD_SLEEP_MSG_DISABLE   (1 << 0)
 #define   GEN6_BSD_SLEEP_FLUSH_DISABLE (1 << 2)
 #define   FBC_CTL_IDLE_LINE    (2<<2)
 #define   FBC_CTL_IDLE_DEBUG   (3<<2)
 #define   FBC_CTL_CPU_FENCE    (1<<1)
-#define   FBC_CTL_PLANEA       (0<<0)
-#define   FBC_CTL_PLANEB       (1<<0)
-#define FBC_FENCE_OFF          0x0321b
+#define   FBC_CTL_PLANE(plane) ((plane)<<0)
+#define FBC_FENCE_OFF          0x03218 /* BSpec typo has 321Bh */
 #define FBC_TAG                        0x03300
 
 #define FBC_LL_SIZE            (1536)
 #define DPFC_CB_BASE           0x3200
 #define DPFC_CONTROL           0x3208
 #define   DPFC_CTL_EN          (1<<31)
-#define   DPFC_CTL_PLANEA      (0<<30)
-#define   DPFC_CTL_PLANEB      (1<<30)
-#define   IVB_DPFC_CTL_PLANE_SHIFT     (29)
+#define   DPFC_CTL_PLANE(plane)        ((plane)<<30)
+#define   IVB_DPFC_CTL_PLANE(plane)    ((plane)<<29)
 #define   DPFC_CTL_FENCE_EN    (1<<29)
 #define   IVB_DPFC_CTL_FENCE_EN        (1<<28)
 #define   DPFC_CTL_PERSISTENT_MODE     (1<<25)
 #define   FBC_REND_NUKE                (1<<2)
 #define   FBC_REND_CACHE_CLEAN (1<<1)
 
-#define _HSW_PIPE_SLICE_CHICKEN_1_A    0x420B0
-#define _HSW_PIPE_SLICE_CHICKEN_1_B    0x420B4
-#define   HSW_BYPASS_FBC_QUEUE         (1<<22)
-#define HSW_PIPE_SLICE_CHICKEN_1(pipe) _PIPE(pipe, + \
-                                            _HSW_PIPE_SLICE_CHICKEN_1_A, + \
-                                            _HSW_PIPE_SLICE_CHICKEN_1_B)
-
 /*
  * GPIO regs
  */
 /*
  * Clock control & power management
  */
+#define DPLL_A_OFFSET 0x6014
+#define DPLL_B_OFFSET 0x6018
+#define DPLL(pipe) (dev_priv->info.dpll_offsets[pipe] + \
+                   dev_priv->info.display_mmio_offset)
 
 #define VGA0   0x6000
 #define VGA1   0x6004
 #define   VGA1_PD_P1_DIV_2     (1 << 13)
 #define   VGA1_PD_P1_SHIFT     8
 #define   VGA1_PD_P1_MASK      (0x1f << 8)
-#define _DPLL_A        (dev_priv->info->display_mmio_offset + 0x6014)
-#define _DPLL_B        (dev_priv->info->display_mmio_offset + 0x6018)
-#define DPLL(pipe) _PIPE(pipe, _DPLL_A, _DPLL_B)
 #define   DPLL_VCO_ENABLE              (1 << 31)
 #define   DPLL_SDVO_HIGH_SPEED         (1 << 30)
 #define   DPLL_DVO_2X_MODE             (1 << 30)
 #define   SDVO_MULTIPLIER_MASK                 0x000000ff
 #define   SDVO_MULTIPLIER_SHIFT_HIRES          4
 #define   SDVO_MULTIPLIER_SHIFT_VGA            0
-#define _DPLL_A_MD (dev_priv->info->display_mmio_offset + 0x601c) /* 965+ only */
+
+#define DPLL_A_MD_OFFSET 0x601c /* 965+ only */
+#define DPLL_B_MD_OFFSET 0x6020 /* 965+ only */
+#define DPLL_MD(pipe) (dev_priv->info.dpll_md_offsets[pipe] + \
+                      dev_priv->info.display_mmio_offset)
+
 /*
  * UDI pixel divider, controlling how many pixels are stuffed into a packet.
  *
  */
 #define   DPLL_MD_VGA_UDI_MULTIPLIER_MASK      0x0000003f
 #define   DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT     0
-#define _DPLL_B_MD (dev_priv->info->display_mmio_offset + 0x6020) /* 965+ only */
-#define DPLL_MD(pipe) _PIPE(pipe, _DPLL_A_MD, _DPLL_B_MD)
 
 #define _FPA0  0x06040
 #define _FPA1  0x06044
 #define  DSTATE_PLL_D3_OFF                     (1<<3)
 #define  DSTATE_GFX_CLOCK_GATING               (1<<1)
 #define  DSTATE_DOT_CLOCK_GATING               (1<<0)
-#define DSPCLK_GATE_D  (dev_priv->info->display_mmio_offset + 0x6200)
+#define DSPCLK_GATE_D  (dev_priv->info.display_mmio_offset + 0x6200)
 # define DPUNIT_B_CLOCK_GATE_DISABLE           (1 << 30) /* 965 */
 # define VSUNIT_CLOCK_GATE_DISABLE             (1 << 29) /* 965 */
 # define VRHUNIT_CLOCK_GATE_DISABLE            (1 << 28) /* 965 */
 /*
  * Palette regs
  */
-
-#define _PALETTE_A             (dev_priv->info->display_mmio_offset + 0xa000)
-#define _PALETTE_B             (dev_priv->info->display_mmio_offset + 0xa800)
-#define PALETTE(pipe) _PIPE(pipe, _PALETTE_A, _PALETTE_B)
+#define PALETTE_A_OFFSET 0xa000
+#define PALETTE_B_OFFSET 0xa800
+#define PALETTE(pipe) (dev_priv->info.palette_offsets[pipe] + \
+                      dev_priv->info.display_mmio_offset)
 
 /* MCH MMIO space */
 
  */
 
 /* Pipe A CRC regs */
-#define _PIPE_CRC_CTL_A                (dev_priv->info->display_mmio_offset + 0x60050)
+#define _PIPE_CRC_CTL_A                        0x60050
 #define   PIPE_CRC_ENABLE              (1 << 31)
 /* ivb+ source selection */
 #define   PIPE_CRC_SOURCE_PRIMARY_IVB  (0 << 29)
 #define _PIPE_CRC_RES_4_A_IVB          0x60070
 #define _PIPE_CRC_RES_5_A_IVB          0x60074
 
-#define _PIPE_CRC_RES_RED_A            (dev_priv->info->display_mmio_offset + 0x60060)
-#define _PIPE_CRC_RES_GREEN_A          (dev_priv->info->display_mmio_offset + 0x60064)
-#define _PIPE_CRC_RES_BLUE_A           (dev_priv->info->display_mmio_offset + 0x60068)
-#define _PIPE_CRC_RES_RES1_A_I915      (dev_priv->info->display_mmio_offset + 0x6006c)
-#define _PIPE_CRC_RES_RES2_A_G4X       (dev_priv->info->display_mmio_offset + 0x60080)
+#define _PIPE_CRC_RES_RED_A            0x60060
+#define _PIPE_CRC_RES_GREEN_A          0x60064
+#define _PIPE_CRC_RES_BLUE_A           0x60068
+#define _PIPE_CRC_RES_RES1_A_I915      0x6006c
+#define _PIPE_CRC_RES_RES2_A_G4X       0x60080
 
 /* Pipe B CRC regs */
 #define _PIPE_CRC_RES_1_B_IVB          0x61064
 #define _PIPE_CRC_RES_4_B_IVB          0x61070
 #define _PIPE_CRC_RES_5_B_IVB          0x61074
 
-#define PIPE_CRC_CTL(pipe)     _PIPE_INC(pipe, _PIPE_CRC_CTL_A, 0x01000)
+#define PIPE_CRC_CTL(pipe) _TRANSCODER2(pipe, _PIPE_CRC_CTL_A)
 #define PIPE_CRC_RES_1_IVB(pipe)       \
-       _PIPE(pipe, _PIPE_CRC_RES_1_A_IVB, _PIPE_CRC_RES_1_B_IVB)
+       _TRANSCODER2(pipe, _PIPE_CRC_RES_1_A_IVB)
 #define PIPE_CRC_RES_2_IVB(pipe)       \
-       _PIPE(pipe, _PIPE_CRC_RES_2_A_IVB, _PIPE_CRC_RES_2_B_IVB)
+       _TRANSCODER2(pipe, _PIPE_CRC_RES_2_A_IVB)
 #define PIPE_CRC_RES_3_IVB(pipe)       \
-       _PIPE(pipe, _PIPE_CRC_RES_3_A_IVB, _PIPE_CRC_RES_3_B_IVB)
+       _TRANSCODER2(pipe, _PIPE_CRC_RES_3_A_IVB)
 #define PIPE_CRC_RES_4_IVB(pipe)       \
-       _PIPE(pipe, _PIPE_CRC_RES_4_A_IVB, _PIPE_CRC_RES_4_B_IVB)
+       _TRANSCODER2(pipe, _PIPE_CRC_RES_4_A_IVB)
 #define PIPE_CRC_RES_5_IVB(pipe)       \
-       _PIPE(pipe, _PIPE_CRC_RES_5_A_IVB, _PIPE_CRC_RES_5_B_IVB)
+       _TRANSCODER2(pipe, _PIPE_CRC_RES_5_A_IVB)
 
 #define PIPE_CRC_RES_RED(pipe) \
-       _PIPE_INC(pipe, _PIPE_CRC_RES_RED_A, 0x01000)
+       _TRANSCODER2(pipe, _PIPE_CRC_RES_RED_A)
 #define PIPE_CRC_RES_GREEN(pipe) \
-       _PIPE_INC(pipe, _PIPE_CRC_RES_GREEN_A, 0x01000)
+       _TRANSCODER2(pipe, _PIPE_CRC_RES_GREEN_A)
 #define PIPE_CRC_RES_BLUE(pipe) \
-       _PIPE_INC(pipe, _PIPE_CRC_RES_BLUE_A, 0x01000)
+       _TRANSCODER2(pipe, _PIPE_CRC_RES_BLUE_A)
 #define PIPE_CRC_RES_RES1_I915(pipe) \
-       _PIPE_INC(pipe, _PIPE_CRC_RES_RES1_A_I915, 0x01000)
+       _TRANSCODER2(pipe, _PIPE_CRC_RES_RES1_A_I915)
 #define PIPE_CRC_RES_RES2_G4X(pipe) \
-       _PIPE_INC(pipe, _PIPE_CRC_RES_RES2_A_G4X, 0x01000)
+       _TRANSCODER2(pipe, _PIPE_CRC_RES_RES2_A_G4X)
 
 /* Pipe A timing regs */
-#define _HTOTAL_A      (dev_priv->info->display_mmio_offset + 0x60000)
-#define _HBLANK_A      (dev_priv->info->display_mmio_offset + 0x60004)
-#define _HSYNC_A       (dev_priv->info->display_mmio_offset + 0x60008)
-#define _VTOTAL_A      (dev_priv->info->display_mmio_offset + 0x6000c)
-#define _VBLANK_A      (dev_priv->info->display_mmio_offset + 0x60010)
-#define _VSYNC_A       (dev_priv->info->display_mmio_offset + 0x60014)
-#define _PIPEASRC      (dev_priv->info->display_mmio_offset + 0x6001c)
-#define _BCLRPAT_A     (dev_priv->info->display_mmio_offset + 0x60020)
-#define _VSYNCSHIFT_A  (dev_priv->info->display_mmio_offset + 0x60028)
+#define _HTOTAL_A      0x60000
+#define _HBLANK_A      0x60004
+#define _HSYNC_A       0x60008
+#define _VTOTAL_A      0x6000c
+#define _VBLANK_A      0x60010
+#define _VSYNC_A       0x60014
+#define _PIPEASRC      0x6001c
+#define _BCLRPAT_A     0x60020
+#define _VSYNCSHIFT_A  0x60028
 
 /* Pipe B timing regs */
-#define _HTOTAL_B      (dev_priv->info->display_mmio_offset + 0x61000)
-#define _HBLANK_B      (dev_priv->info->display_mmio_offset + 0x61004)
-#define _HSYNC_B       (dev_priv->info->display_mmio_offset + 0x61008)
-#define _VTOTAL_B      (dev_priv->info->display_mmio_offset + 0x6100c)
-#define _VBLANK_B      (dev_priv->info->display_mmio_offset + 0x61010)
-#define _VSYNC_B       (dev_priv->info->display_mmio_offset + 0x61014)
-#define _PIPEBSRC      (dev_priv->info->display_mmio_offset + 0x6101c)
-#define _BCLRPAT_B     (dev_priv->info->display_mmio_offset + 0x61020)
-#define _VSYNCSHIFT_B  (dev_priv->info->display_mmio_offset + 0x61028)
-
-#define HTOTAL(trans) _TRANSCODER(trans, _HTOTAL_A, _HTOTAL_B)
-#define HBLANK(trans) _TRANSCODER(trans, _HBLANK_A, _HBLANK_B)
-#define HSYNC(trans) _TRANSCODER(trans, _HSYNC_A, _HSYNC_B)
-#define VTOTAL(trans) _TRANSCODER(trans, _VTOTAL_A, _VTOTAL_B)
-#define VBLANK(trans) _TRANSCODER(trans, _VBLANK_A, _VBLANK_B)
-#define VSYNC(trans) _TRANSCODER(trans, _VSYNC_A, _VSYNC_B)
-#define BCLRPAT(pipe) _PIPE(pipe, _BCLRPAT_A, _BCLRPAT_B)
-#define VSYNCSHIFT(trans) _TRANSCODER(trans, _VSYNCSHIFT_A, _VSYNCSHIFT_B)
+#define _HTOTAL_B      0x61000
+#define _HBLANK_B      0x61004
+#define _HSYNC_B       0x61008
+#define _VTOTAL_B      0x6100c
+#define _VBLANK_B      0x61010
+#define _VSYNC_B       0x61014
+#define _PIPEBSRC      0x6101c
+#define _BCLRPAT_B     0x61020
+#define _VSYNCSHIFT_B  0x61028
+
+#define TRANSCODER_A_OFFSET 0x60000
+#define TRANSCODER_B_OFFSET 0x61000
+#define TRANSCODER_C_OFFSET 0x62000
+#define TRANSCODER_EDP_OFFSET 0x6f000
+
+#define _TRANSCODER2(pipe, reg) (dev_priv->info.trans_offsets[(pipe)] - \
+       dev_priv->info.trans_offsets[TRANSCODER_A] + (reg) + \
+       dev_priv->info.display_mmio_offset)
+
+#define HTOTAL(trans) _TRANSCODER2(trans, _HTOTAL_A)
+#define HBLANK(trans) _TRANSCODER2(trans, _HBLANK_A)
+#define HSYNC(trans) _TRANSCODER2(trans, _HSYNC_A)
+#define VTOTAL(trans) _TRANSCODER2(trans, _VTOTAL_A)
+#define VBLANK(trans) _TRANSCODER2(trans, _VBLANK_A)
+#define VSYNC(trans) _TRANSCODER2(trans, _VSYNC_A)
+#define BCLRPAT(trans) _TRANSCODER2(trans, _BCLRPAT_A)
+#define VSYNCSHIFT(trans) _TRANSCODER2(trans, _VSYNCSHIFT_A)
+#define PIPESRC(trans) _TRANSCODER2(trans, _PIPEASRC)
 
 /* HSW+ eDP PSR registers */
 #define EDP_PSR_BASE(dev)                       (IS_HASWELL(dev) ? 0x64800 : 0x6f800)
 
 
 /* Hotplug control (945+ only) */
-#define PORT_HOTPLUG_EN                (dev_priv->info->display_mmio_offset + 0x61110)
+#define PORT_HOTPLUG_EN                (dev_priv->info.display_mmio_offset + 0x61110)
 #define   PORTB_HOTPLUG_INT_EN                 (1 << 29)
 #define   PORTC_HOTPLUG_INT_EN                 (1 << 28)
 #define   PORTD_HOTPLUG_INT_EN                 (1 << 27)
 #define CRT_HOTPLUG_DETECT_VOLTAGE_325MV       (0 << 2)
 #define CRT_HOTPLUG_DETECT_VOLTAGE_475MV       (1 << 2)
 
-#define PORT_HOTPLUG_STAT      (dev_priv->info->display_mmio_offset + 0x61114)
+#define PORT_HOTPLUG_STAT      (dev_priv->info.display_mmio_offset + 0x61114)
 /*
  * HDMI/DP bits are gen4+
  *
 #define VIDEO_DIP_CTL          0x61170
 /* Pre HSW: */
 #define   VIDEO_DIP_ENABLE             (1 << 31)
-#define   VIDEO_DIP_PORT_B             (1 << 29)
-#define   VIDEO_DIP_PORT_C             (2 << 29)
-#define   VIDEO_DIP_PORT_D             (3 << 29)
+#define   VIDEO_DIP_PORT(port)         ((port) << 29)
 #define   VIDEO_DIP_PORT_MASK          (3 << 29)
 #define   VIDEO_DIP_ENABLE_GCP         (1 << 25)
 #define   VIDEO_DIP_ENABLE_AVI         (1 << 21)
 #define PP_DIVISOR     0x61210
 
 /* Panel fitting */
-#define PFIT_CONTROL   (dev_priv->info->display_mmio_offset + 0x61230)
+#define PFIT_CONTROL   (dev_priv->info.display_mmio_offset + 0x61230)
 #define   PFIT_ENABLE          (1 << 31)
 #define   PFIT_PIPE_MASK       (3 << 29)
 #define   PFIT_PIPE_SHIFT      29
 #define   PFIT_SCALING_PROGRAMMED (1 << 26)
 #define   PFIT_SCALING_PILLAR  (2 << 26)
 #define   PFIT_SCALING_LETTER  (3 << 26)
-#define PFIT_PGM_RATIOS        (dev_priv->info->display_mmio_offset + 0x61234)
+#define PFIT_PGM_RATIOS        (dev_priv->info.display_mmio_offset + 0x61234)
 /* Pre-965 */
 #define                PFIT_VERT_SCALE_SHIFT           20
 #define                PFIT_VERT_SCALE_MASK            0xfff00000
 #define                PFIT_HORIZ_SCALE_SHIFT_965      0
 #define                PFIT_HORIZ_SCALE_MASK_965       0x00001fff
 
-#define PFIT_AUTO_RATIOS (dev_priv->info->display_mmio_offset + 0x61238)
+#define PFIT_AUTO_RATIOS (dev_priv->info.display_mmio_offset + 0x61238)
 
-#define _VLV_BLC_PWM_CTL2_A (dev_priv->info->display_mmio_offset + 0x61250)
-#define _VLV_BLC_PWM_CTL2_B (dev_priv->info->display_mmio_offset + 0x61350)
+#define _VLV_BLC_PWM_CTL2_A (dev_priv->info.display_mmio_offset + 0x61250)
+#define _VLV_BLC_PWM_CTL2_B (dev_priv->info.display_mmio_offset + 0x61350)
 #define VLV_BLC_PWM_CTL2(pipe) _PIPE(pipe, _VLV_BLC_PWM_CTL2_A, \
                                     _VLV_BLC_PWM_CTL2_B)
 
-#define _VLV_BLC_PWM_CTL_A (dev_priv->info->display_mmio_offset + 0x61254)
-#define _VLV_BLC_PWM_CTL_B (dev_priv->info->display_mmio_offset + 0x61354)
+#define _VLV_BLC_PWM_CTL_A (dev_priv->info.display_mmio_offset + 0x61254)
+#define _VLV_BLC_PWM_CTL_B (dev_priv->info.display_mmio_offset + 0x61354)
 #define VLV_BLC_PWM_CTL(pipe) _PIPE(pipe, _VLV_BLC_PWM_CTL_A, \
                                    _VLV_BLC_PWM_CTL_B)
 
-#define _VLV_BLC_HIST_CTL_A (dev_priv->info->display_mmio_offset + 0x61260)
-#define _VLV_BLC_HIST_CTL_B (dev_priv->info->display_mmio_offset + 0x61360)
+#define _VLV_BLC_HIST_CTL_A (dev_priv->info.display_mmio_offset + 0x61260)
+#define _VLV_BLC_HIST_CTL_B (dev_priv->info.display_mmio_offset + 0x61360)
 #define VLV_BLC_HIST_CTL(pipe) _PIPE(pipe, _VLV_BLC_HIST_CTL_A, \
                                     _VLV_BLC_HIST_CTL_B)
 
 /* Backlight control */
-#define BLC_PWM_CTL2   (dev_priv->info->display_mmio_offset + 0x61250) /* 965+ only */
+#define BLC_PWM_CTL2   (dev_priv->info.display_mmio_offset + 0x61250) /* 965+ only */
 #define   BLM_PWM_ENABLE               (1 << 31)
 #define   BLM_COMBINATION_MODE         (1 << 30) /* gen4 only */
 #define   BLM_PIPE_SELECT              (1 << 29)
 #define   BLM_PHASE_IN_COUNT_MASK      (0xff << 8)
 #define   BLM_PHASE_IN_INCR_SHIFT      (0)
 #define   BLM_PHASE_IN_INCR_MASK       (0xff << 0)
-#define BLC_PWM_CTL    (dev_priv->info->display_mmio_offset + 0x61254)
+#define BLC_PWM_CTL    (dev_priv->info.display_mmio_offset + 0x61254)
 /*
  * This is the most significant 15 bits of the number of backlight cycles in a
  * complete cycle of the modulated backlight control.
 #define   BACKLIGHT_DUTY_CYCLE_MASK_PNV                (0xfffe)
 #define   BLM_POLARITY_PNV                     (1 << 0) /* pnv only */
 
-#define BLC_HIST_CTL   (dev_priv->info->display_mmio_offset + 0x61260)
+#define BLC_HIST_CTL   (dev_priv->info.display_mmio_offset + 0x61260)
 
 /* New registers for PCH-split platforms. Safe where new bits show up, the
  * register layout machtes with gen4 BLC_PWM_CTL[12]. */
 /* Display & cursor control */
 
 /* Pipe A */
-#define _PIPEADSL              (dev_priv->info->display_mmio_offset + 0x70000)
+#define _PIPEADSL              0x70000
 #define   DSL_LINEMASK_GEN2    0x00000fff
 #define   DSL_LINEMASK_GEN3    0x00001fff
-#define _PIPEACONF             (dev_priv->info->display_mmio_offset + 0x70008)
+#define _PIPEACONF             0x70008
 #define   PIPECONF_ENABLE      (1<<31)
 #define   PIPECONF_DISABLE     0
 #define   PIPECONF_DOUBLE_WIDE (1<<30)
 #define   PIPECONF_DITHER_TYPE_ST1 (1<<2)
 #define   PIPECONF_DITHER_TYPE_ST2 (2<<2)
 #define   PIPECONF_DITHER_TYPE_TEMP (3<<2)
-#define _PIPEASTAT             (dev_priv->info->display_mmio_offset + 0x70024)
+#define _PIPEASTAT             0x70024
 #define   PIPE_FIFO_UNDERRUN_STATUS            (1UL<<31)
-#define   SPRITE1_FLIPDONE_INT_EN_VLV          (1UL<<30)
+#define   SPRITE1_FLIP_DONE_INT_EN_VLV         (1UL<<30)
 #define   PIPE_CRC_ERROR_ENABLE                        (1UL<<29)
 #define   PIPE_CRC_DONE_ENABLE                 (1UL<<28)
 #define   PIPE_GMBUS_EVENT_ENABLE              (1UL<<27)
 #define   PIPE_LEGACY_BLC_EVENT_ENABLE         (1UL<<22)
 #define   PIPE_ODD_FIELD_INTERRUPT_ENABLE      (1UL<<21)
 #define   PIPE_EVEN_FIELD_INTERRUPT_ENABLE     (1UL<<20)
+#define   PIPE_B_PSR_INTERRUPT_ENABLE_VLV      (1UL<<19)
 #define   PIPE_HOTPLUG_TV_INTERRUPT_ENABLE     (1UL<<18) /* pre-965 */
 #define   PIPE_START_VBLANK_INTERRUPT_ENABLE   (1UL<<18) /* 965 or later */
 #define   PIPE_VBLANK_INTERRUPT_ENABLE         (1UL<<17)
 #define   PIPEA_HBLANK_INT_EN_VLV              (1UL<<16)
 #define   PIPE_OVERLAY_UPDATED_ENABLE          (1UL<<16)
-#define   SPRITE1_FLIPDONE_INT_STATUS_VLV      (1UL<<15)
-#define   SPRITE0_FLIPDONE_INT_STATUS_VLV      (1UL<<14)
+#define   SPRITE1_FLIP_DONE_INT_STATUS_VLV     (1UL<<15)
+#define   SPRITE0_FLIP_DONE_INT_STATUS_VLV     (1UL<<14)
 #define   PIPE_CRC_ERROR_INTERRUPT_STATUS      (1UL<<13)
 #define   PIPE_CRC_DONE_INTERRUPT_STATUS       (1UL<<12)
 #define   PIPE_GMBUS_INTERRUPT_STATUS          (1UL<<11)
-#define   PLANE_FLIPDONE_INT_STATUS_VLV                (1UL<<10)
+#define   PLANE_FLIP_DONE_INT_STATUS_VLV       (1UL<<10)
 #define   PIPE_HOTPLUG_INTERRUPT_STATUS                (1UL<<10)
 #define   PIPE_VSYNC_INTERRUPT_STATUS          (1UL<<9)
 #define   PIPE_DISPLAY_LINE_COMPARE_STATUS     (1UL<<8)
 #define   PIPE_DPST_EVENT_STATUS               (1UL<<7)
 #define   PIPE_LEGACY_BLC_EVENT_STATUS         (1UL<<6)
+#define   PIPE_A_PSR_STATUS_VLV                        (1UL<<6)
 #define   PIPE_ODD_FIELD_INTERRUPT_STATUS      (1UL<<5)
 #define   PIPE_EVEN_FIELD_INTERRUPT_STATUS     (1UL<<4)
+#define   PIPE_B_PSR_STATUS_VLV                        (1UL<<3)
 #define   PIPE_HOTPLUG_TV_INTERRUPT_STATUS     (1UL<<2) /* pre-965 */
 #define   PIPE_START_VBLANK_INTERRUPT_STATUS   (1UL<<2) /* 965 or later */
 #define   PIPE_VBLANK_INTERRUPT_STATUS         (1UL<<1)
 #define   PIPE_OVERLAY_UPDATED_STATUS          (1UL<<0)
 
-#define PIPESRC(pipe) _PIPE(pipe, _PIPEASRC, _PIPEBSRC)
-#define PIPECONF(tran) _TRANSCODER(tran, _PIPEACONF, _PIPEBCONF)
-#define PIPEDSL(pipe)  _PIPE(pipe, _PIPEADSL, _PIPEBDSL)
-#define PIPEFRAME(pipe) _PIPE(pipe, _PIPEAFRAMEHIGH, _PIPEBFRAMEHIGH)
-#define PIPEFRAMEPIXEL(pipe)  _PIPE(pipe, _PIPEAFRAMEPIXEL, _PIPEBFRAMEPIXEL)
-#define PIPESTAT(pipe) _PIPE(pipe, _PIPEASTAT, _PIPEBSTAT)
+#define PIPESTAT_INT_ENABLE_MASK               0x7fff0000
+#define PIPESTAT_INT_STATUS_MASK               0x0000ffff
+
+#define PIPE_A_OFFSET  0x70000
+#define PIPE_B_OFFSET  0x71000
+#define PIPE_C_OFFSET  0x72000
+/*
+ * There's actually no pipe EDP. Some pipe registers have
+ * simply shifted from the pipe to the transcoder, while
+ * keeping their original offset. Thus we need PIPE_EDP_OFFSET
+ * to access such registers in transcoder EDP.
+ */
+#define PIPE_EDP_OFFSET        0x7f000
+
+#define _PIPE2(pipe, reg) (dev_priv->info.pipe_offsets[pipe] - \
+       dev_priv->info.pipe_offsets[PIPE_A] + (reg) + \
+       dev_priv->info.display_mmio_offset)
+
+#define PIPECONF(pipe) _PIPE2(pipe, _PIPEACONF)
+#define PIPEDSL(pipe)  _PIPE2(pipe, _PIPEADSL)
+#define PIPEFRAME(pipe) _PIPE2(pipe, _PIPEAFRAMEHIGH)
+#define PIPEFRAMEPIXEL(pipe)  _PIPE2(pipe, _PIPEAFRAMEPIXEL)
+#define PIPESTAT(pipe) _PIPE2(pipe, _PIPEASTAT)
 
 #define _PIPE_MISC_A                   0x70030
 #define _PIPE_MISC_B                   0x71030
 #define   PIPEMISC_DITHER_ENABLE       (1<<4)
 #define   PIPEMISC_DITHER_TYPE_MASK    (3<<2)
 #define   PIPEMISC_DITHER_TYPE_SP      (0<<2)
-#define PIPEMISC(pipe) _PIPE(pipe, _PIPE_MISC_A, _PIPE_MISC_B)
+#define PIPEMISC(pipe) _PIPE2(pipe, _PIPE_MISC_A)
 
 #define VLV_DPFLIPSTAT                         (VLV_DISPLAY_BASE + 0x70028)
 #define   PIPEB_LINE_COMPARE_INT_EN            (1<<29)
 #define   PIPEB_HLINE_INT_EN                   (1<<28)
 #define   PIPEB_VBLANK_INT_EN                  (1<<27)
-#define   SPRITED_FLIPDONE_INT_EN              (1<<26)
-#define   SPRITEC_FLIPDONE_INT_EN              (1<<25)
-#define   PLANEB_FLIPDONE_INT_EN               (1<<24)
+#define   SPRITED_FLIP_DONE_INT_EN             (1<<26)
+#define   SPRITEC_FLIP_DONE_INT_EN             (1<<25)
+#define   PLANEB_FLIP_DONE_INT_EN              (1<<24)
 #define   PIPEA_LINE_COMPARE_INT_EN            (1<<21)
 #define   PIPEA_HLINE_INT_EN                   (1<<20)
 #define   PIPEA_VBLANK_INT_EN                  (1<<19)
-#define   SPRITEB_FLIPDONE_INT_EN              (1<<18)
-#define   SPRITEA_FLIPDONE_INT_EN              (1<<17)
+#define   SPRITEB_FLIP_DONE_INT_EN             (1<<18)
+#define   SPRITEA_FLIP_DONE_INT_EN             (1<<17)
 #define   PLANEA_FLIPDONE_INT_EN               (1<<16)
 
 #define DPINVGTT                               (VLV_DISPLAY_BASE + 0x7002c) /* VLV only */
 #define   DSPARB_BEND_SHIFT    9 /* on 855 */
 #define   DSPARB_AEND_SHIFT    0
 
-#define DSPFW1                 (dev_priv->info->display_mmio_offset + 0x70034)
+#define DSPFW1                 (dev_priv->info.display_mmio_offset + 0x70034)
 #define   DSPFW_SR_SHIFT       23
 #define   DSPFW_SR_MASK                (0x1ff<<23)
 #define   DSPFW_CURSORB_SHIFT  16
 #define   DSPFW_PLANEB_SHIFT   8
 #define   DSPFW_PLANEB_MASK    (0x7f<<8)
 #define   DSPFW_PLANEA_MASK    (0x7f)
-#define DSPFW2                 (dev_priv->info->display_mmio_offset + 0x70038)
+#define DSPFW2                 (dev_priv->info.display_mmio_offset + 0x70038)
 #define   DSPFW_CURSORA_MASK   0x00003f00
 #define   DSPFW_CURSORA_SHIFT  8
 #define   DSPFW_PLANEC_MASK    (0x7f)
-#define DSPFW3                 (dev_priv->info->display_mmio_offset + 0x7003c)
+#define DSPFW3                 (dev_priv->info.display_mmio_offset + 0x7003c)
 #define   DSPFW_HPLL_SR_EN     (1<<31)
 #define   DSPFW_CURSOR_SR_SHIFT        24
 #define   PINEVIEW_SELF_REFRESH_EN     (1<<30)
 #define   DSPFW_HPLL_CURSOR_SHIFT      16
 #define   DSPFW_HPLL_CURSOR_MASK       (0x3f<<16)
 #define   DSPFW_HPLL_SR_MASK           (0x1ff)
-#define DSPFW4                 (dev_priv->info->display_mmio_offset + 0x70070)
-#define DSPFW7                 (dev_priv->info->display_mmio_offset + 0x7007c)
+#define DSPFW4                 (dev_priv->info.display_mmio_offset + 0x70070)
+#define DSPFW7                 (dev_priv->info.display_mmio_offset + 0x7007c)
 
 /* drain latency register values*/
 #define DRAIN_LATENCY_PRECISION_32     32
 #define   PIPE_PIXEL_MASK         0x00ffffff
 #define   PIPE_PIXEL_SHIFT        0
 /* GM45+ just has to be different */
-#define _PIPEA_FRMCOUNT_GM45   (dev_priv->info->display_mmio_offset + 0x70040)
-#define _PIPEA_FLIPCOUNT_GM45  (dev_priv->info->display_mmio_offset + 0x70044)
+#define _PIPEA_FRMCOUNT_GM45   (dev_priv->info.display_mmio_offset + 0x70040)
+#define _PIPEA_FLIPCOUNT_GM45  (dev_priv->info.display_mmio_offset + 0x70044)
 #define PIPE_FRMCOUNT_GM45(pipe) _PIPE(pipe, _PIPEA_FRMCOUNT_GM45, _PIPEB_FRMCOUNT_GM45)
 
 /* Cursor A & B regs */
-#define _CURACNTR              (dev_priv->info->display_mmio_offset + 0x70080)
+#define _CURACNTR              (dev_priv->info.display_mmio_offset + 0x70080)
 /* Old style CUR*CNTR flags (desktop 8xx) */
 #define   CURSOR_ENABLE                0x80000000
 #define   CURSOR_GAMMA_ENABLE  0x40000000
 #define   MCURSOR_PIPE_B       (1 << 28)
 #define   MCURSOR_GAMMA_ENABLE  (1 << 26)
 #define   CURSOR_TRICKLE_FEED_DISABLE  (1 << 14)
-#define _CURABASE              (dev_priv->info->display_mmio_offset + 0x70084)
-#define _CURAPOS               (dev_priv->info->display_mmio_offset + 0x70088)
+#define _CURABASE              (dev_priv->info.display_mmio_offset + 0x70084)
+#define _CURAPOS               (dev_priv->info.display_mmio_offset + 0x70088)
 #define   CURSOR_POS_MASK       0x007FF
 #define   CURSOR_POS_SIGN       0x8000
 #define   CURSOR_X_SHIFT        0
 #define   CURSOR_Y_SHIFT        16
 #define CURSIZE                        0x700a0
-#define _CURBCNTR              (dev_priv->info->display_mmio_offset + 0x700c0)
-#define _CURBBASE              (dev_priv->info->display_mmio_offset + 0x700c4)
-#define _CURBPOS               (dev_priv->info->display_mmio_offset + 0x700c8)
+#define _CURBCNTR              (dev_priv->info.display_mmio_offset + 0x700c0)
+#define _CURBBASE              (dev_priv->info.display_mmio_offset + 0x700c4)
+#define _CURBPOS               (dev_priv->info.display_mmio_offset + 0x700c8)
 
 #define _CURBCNTR_IVB          0x71080
 #define _CURBBASE_IVB          0x71084
 #define CURPOS_IVB(pipe) _PIPE(pipe, _CURAPOS, _CURBPOS_IVB)
 
 /* Display A control */
-#define _DSPACNTR                (dev_priv->info->display_mmio_offset + 0x70180)
+#define _DSPACNTR                              0x70180
 #define   DISPLAY_PLANE_ENABLE                 (1<<31)
 #define   DISPLAY_PLANE_DISABLE                        0
 #define   DISPPLANE_GAMMA_ENABLE               (1<<30)
 #define   DISPPLANE_STEREO_POLARITY_SECOND     (1<<18)
 #define   DISPPLANE_TRICKLE_FEED_DISABLE       (1<<14) /* Ironlake */
 #define   DISPPLANE_TILED                      (1<<10)
-#define _DSPAADDR              (dev_priv->info->display_mmio_offset + 0x70184)
-#define _DSPASTRIDE            (dev_priv->info->display_mmio_offset + 0x70188)
-#define _DSPAPOS               (dev_priv->info->display_mmio_offset + 0x7018C) /* reserved */
-#define _DSPASIZE              (dev_priv->info->display_mmio_offset + 0x70190)
-#define _DSPASURF              (dev_priv->info->display_mmio_offset + 0x7019C) /* 965+ only */
-#define _DSPATILEOFF           (dev_priv->info->display_mmio_offset + 0x701A4) /* 965+ only */
-#define _DSPAOFFSET            (dev_priv->info->display_mmio_offset + 0x701A4) /* HSW */
-#define _DSPASURFLIVE          (dev_priv->info->display_mmio_offset + 0x701AC)
-
-#define DSPCNTR(plane) _PIPE(plane, _DSPACNTR, _DSPBCNTR)
-#define DSPADDR(plane) _PIPE(plane, _DSPAADDR, _DSPBADDR)
-#define DSPSTRIDE(plane) _PIPE(plane, _DSPASTRIDE, _DSPBSTRIDE)
-#define DSPPOS(plane) _PIPE(plane, _DSPAPOS, _DSPBPOS)
-#define DSPSIZE(plane) _PIPE(plane, _DSPASIZE, _DSPBSIZE)
-#define DSPSURF(plane) _PIPE(plane, _DSPASURF, _DSPBSURF)
-#define DSPTILEOFF(plane) _PIPE(plane, _DSPATILEOFF, _DSPBTILEOFF)
+#define _DSPAADDR                              0x70184
+#define _DSPASTRIDE                            0x70188
+#define _DSPAPOS                               0x7018C /* reserved */
+#define _DSPASIZE                              0x70190
+#define _DSPASURF                              0x7019C /* 965+ only */
+#define _DSPATILEOFF                           0x701A4 /* 965+ only */
+#define _DSPAOFFSET                            0x701A4 /* HSW */
+#define _DSPASURFLIVE                          0x701AC
+
+#define DSPCNTR(plane) _PIPE2(plane, _DSPACNTR)
+#define DSPADDR(plane) _PIPE2(plane, _DSPAADDR)
+#define DSPSTRIDE(plane) _PIPE2(plane, _DSPASTRIDE)
+#define DSPPOS(plane) _PIPE2(plane, _DSPAPOS)
+#define DSPSIZE(plane) _PIPE2(plane, _DSPASIZE)
+#define DSPSURF(plane) _PIPE2(plane, _DSPASURF)
+#define DSPTILEOFF(plane) _PIPE2(plane, _DSPATILEOFF)
 #define DSPLINOFF(plane) DSPADDR(plane)
-#define DSPOFFSET(plane) _PIPE(plane, _DSPAOFFSET, _DSPBOFFSET)
-#define DSPSURFLIVE(plane) _PIPE(plane, _DSPASURFLIVE, _DSPBSURFLIVE)
+#define DSPOFFSET(plane) _PIPE2(plane, _DSPAOFFSET)
+#define DSPSURFLIVE(plane) _PIPE2(plane, _DSPASURFLIVE)
 
 /* Display/Sprite base address macros */
 #define DISP_BASEADDR_MASK     (0xfffff000)
 #define I915_HI_DISPBASE(val)  (val & DISP_BASEADDR_MASK)
 
 /* VBIOS flags */
-#define SWF00                  (dev_priv->info->display_mmio_offset + 0x71410)
-#define SWF01                  (dev_priv->info->display_mmio_offset + 0x71414)
-#define SWF02                  (dev_priv->info->display_mmio_offset + 0x71418)
-#define SWF03                  (dev_priv->info->display_mmio_offset + 0x7141c)
-#define SWF04                  (dev_priv->info->display_mmio_offset + 0x71420)
-#define SWF05                  (dev_priv->info->display_mmio_offset + 0x71424)
-#define SWF06                  (dev_priv->info->display_mmio_offset + 0x71428)
-#define SWF10                  (dev_priv->info->display_mmio_offset + 0x70410)
-#define SWF11                  (dev_priv->info->display_mmio_offset + 0x70414)
-#define SWF14                  (dev_priv->info->display_mmio_offset + 0x71420)
-#define SWF30                  (dev_priv->info->display_mmio_offset + 0x72414)
-#define SWF31                  (dev_priv->info->display_mmio_offset + 0x72418)
-#define SWF32                  (dev_priv->info->display_mmio_offset + 0x7241c)
+#define SWF00                  (dev_priv->info.display_mmio_offset + 0x71410)
+#define SWF01                  (dev_priv->info.display_mmio_offset + 0x71414)
+#define SWF02                  (dev_priv->info.display_mmio_offset + 0x71418)
+#define SWF03                  (dev_priv->info.display_mmio_offset + 0x7141c)
+#define SWF04                  (dev_priv->info.display_mmio_offset + 0x71420)
+#define SWF05                  (dev_priv->info.display_mmio_offset + 0x71424)
+#define SWF06                  (dev_priv->info.display_mmio_offset + 0x71428)
+#define SWF10                  (dev_priv->info.display_mmio_offset + 0x70410)
+#define SWF11                  (dev_priv->info.display_mmio_offset + 0x70414)
+#define SWF14                  (dev_priv->info.display_mmio_offset + 0x71420)
+#define SWF30                  (dev_priv->info.display_mmio_offset + 0x72414)
+#define SWF31                  (dev_priv->info.display_mmio_offset + 0x72418)
+#define SWF32                  (dev_priv->info.display_mmio_offset + 0x7241c)
 
 /* Pipe B */
-#define _PIPEBDSL              (dev_priv->info->display_mmio_offset + 0x71000)
-#define _PIPEBCONF             (dev_priv->info->display_mmio_offset + 0x71008)
-#define _PIPEBSTAT             (dev_priv->info->display_mmio_offset + 0x71024)
+#define _PIPEBDSL              (dev_priv->info.display_mmio_offset + 0x71000)
+#define _PIPEBCONF             (dev_priv->info.display_mmio_offset + 0x71008)
+#define _PIPEBSTAT             (dev_priv->info.display_mmio_offset + 0x71024)
 #define _PIPEBFRAMEHIGH                0x71040
 #define _PIPEBFRAMEPIXEL       0x71044
-#define _PIPEB_FRMCOUNT_GM45   (dev_priv->info->display_mmio_offset + 0x71040)
-#define _PIPEB_FLIPCOUNT_GM45  (dev_priv->info->display_mmio_offset + 0x71044)
+#define _PIPEB_FRMCOUNT_GM45   (dev_priv->info.display_mmio_offset + 0x71040)
+#define _PIPEB_FLIPCOUNT_GM45  (dev_priv->info.display_mmio_offset + 0x71044)
 
 
 /* Display B control */
-#define _DSPBCNTR              (dev_priv->info->display_mmio_offset + 0x71180)
+#define _DSPBCNTR              (dev_priv->info.display_mmio_offset + 0x71180)
 #define   DISPPLANE_ALPHA_TRANS_ENABLE         (1<<15)
 #define   DISPPLANE_ALPHA_TRANS_DISABLE                0
 #define   DISPPLANE_SPRITE_ABOVE_DISPLAY       0
 #define   DISPPLANE_SPRITE_ABOVE_OVERLAY       (1)
-#define _DSPBADDR              (dev_priv->info->display_mmio_offset + 0x71184)
-#define _DSPBSTRIDE            (dev_priv->info->display_mmio_offset + 0x71188)
-#define _DSPBPOS               (dev_priv->info->display_mmio_offset + 0x7118C)
-#define _DSPBSIZE              (dev_priv->info->display_mmio_offset + 0x71190)
-#define _DSPBSURF              (dev_priv->info->display_mmio_offset + 0x7119C)
-#define _DSPBTILEOFF           (dev_priv->info->display_mmio_offset + 0x711A4)
-#define _DSPBOFFSET            (dev_priv->info->display_mmio_offset + 0x711A4)
-#define _DSPBSURFLIVE          (dev_priv->info->display_mmio_offset + 0x711AC)
+#define _DSPBADDR              (dev_priv->info.display_mmio_offset + 0x71184)
+#define _DSPBSTRIDE            (dev_priv->info.display_mmio_offset + 0x71188)
+#define _DSPBPOS               (dev_priv->info.display_mmio_offset + 0x7118C)
+#define _DSPBSIZE              (dev_priv->info.display_mmio_offset + 0x71190)
+#define _DSPBSURF              (dev_priv->info.display_mmio_offset + 0x7119C)
+#define _DSPBTILEOFF           (dev_priv->info.display_mmio_offset + 0x711A4)
+#define _DSPBOFFSET            (dev_priv->info.display_mmio_offset + 0x711A4)
+#define _DSPBSURFLIVE          (dev_priv->info.display_mmio_offset + 0x711AC)
 
 /* Sprite A control */
 #define _DVSACNTR              0x72180
 #define  FDI_PLL_FREQ_DISABLE_COUNT_LIMIT_MASK  0xff
 
 
-#define _PIPEA_DATA_M1           (dev_priv->info->display_mmio_offset + 0x60030)
+#define _PIPEA_DATA_M1         0x60030
 #define  PIPE_DATA_M1_OFFSET    0
-#define _PIPEA_DATA_N1           (dev_priv->info->display_mmio_offset + 0x60034)
+#define _PIPEA_DATA_N1         0x60034
 #define  PIPE_DATA_N1_OFFSET    0
 
-#define _PIPEA_DATA_M2           (dev_priv->info->display_mmio_offset + 0x60038)
+#define _PIPEA_DATA_M2         0x60038
 #define  PIPE_DATA_M2_OFFSET    0
-#define _PIPEA_DATA_N2           (dev_priv->info->display_mmio_offset + 0x6003c)
+#define _PIPEA_DATA_N2         0x6003c
 #define  PIPE_DATA_N2_OFFSET    0
 
-#define _PIPEA_LINK_M1           (dev_priv->info->display_mmio_offset + 0x60040)
+#define _PIPEA_LINK_M1         0x60040
 #define  PIPE_LINK_M1_OFFSET    0
-#define _PIPEA_LINK_N1           (dev_priv->info->display_mmio_offset + 0x60044)
+#define _PIPEA_LINK_N1         0x60044
 #define  PIPE_LINK_N1_OFFSET    0
 
-#define _PIPEA_LINK_M2           (dev_priv->info->display_mmio_offset + 0x60048)
+#define _PIPEA_LINK_M2         0x60048
 #define  PIPE_LINK_M2_OFFSET    0
-#define _PIPEA_LINK_N2           (dev_priv->info->display_mmio_offset + 0x6004c)
+#define _PIPEA_LINK_N2         0x6004c
 #define  PIPE_LINK_N2_OFFSET    0
 
 /* PIPEB timing regs are same start from 0x61000 */
 
-#define _PIPEB_DATA_M1           (dev_priv->info->display_mmio_offset + 0x61030)
-#define _PIPEB_DATA_N1           (dev_priv->info->display_mmio_offset + 0x61034)
-
-#define _PIPEB_DATA_M2           (dev_priv->info->display_mmio_offset + 0x61038)
-#define _PIPEB_DATA_N2           (dev_priv->info->display_mmio_offset + 0x6103c)
-
-#define _PIPEB_LINK_M1           (dev_priv->info->display_mmio_offset + 0x61040)
-#define _PIPEB_LINK_N1           (dev_priv->info->display_mmio_offset + 0x61044)
-
-#define _PIPEB_LINK_M2           (dev_priv->info->display_mmio_offset + 0x61048)
-#define _PIPEB_LINK_N2           (dev_priv->info->display_mmio_offset + 0x6104c)
-
-#define PIPE_DATA_M1(tran) _TRANSCODER(tran, _PIPEA_DATA_M1, _PIPEB_DATA_M1)
-#define PIPE_DATA_N1(tran) _TRANSCODER(tran, _PIPEA_DATA_N1, _PIPEB_DATA_N1)
-#define PIPE_DATA_M2(tran) _TRANSCODER(tran, _PIPEA_DATA_M2, _PIPEB_DATA_M2)
-#define PIPE_DATA_N2(tran) _TRANSCODER(tran, _PIPEA_DATA_N2, _PIPEB_DATA_N2)
-#define PIPE_LINK_M1(tran) _TRANSCODER(tran, _PIPEA_LINK_M1, _PIPEB_LINK_M1)
-#define PIPE_LINK_N1(tran) _TRANSCODER(tran, _PIPEA_LINK_N1, _PIPEB_LINK_N1)
-#define PIPE_LINK_M2(tran) _TRANSCODER(tran, _PIPEA_LINK_M2, _PIPEB_LINK_M2)
-#define PIPE_LINK_N2(tran) _TRANSCODER(tran, _PIPEA_LINK_N2, _PIPEB_LINK_N2)
+#define _PIPEB_DATA_M1         0x61030
+#define _PIPEB_DATA_N1         0x61034
+#define _PIPEB_DATA_M2         0x61038
+#define _PIPEB_DATA_N2         0x6103c
+#define _PIPEB_LINK_M1         0x61040
+#define _PIPEB_LINK_N1         0x61044
+#define _PIPEB_LINK_M2         0x61048
+#define _PIPEB_LINK_N2         0x6104c
+
+#define PIPE_DATA_M1(tran) _TRANSCODER2(tran, _PIPEA_DATA_M1)
+#define PIPE_DATA_N1(tran) _TRANSCODER2(tran, _PIPEA_DATA_N1)
+#define PIPE_DATA_M2(tran) _TRANSCODER2(tran, _PIPEA_DATA_M2)
+#define PIPE_DATA_N2(tran) _TRANSCODER2(tran, _PIPEA_DATA_N2)
+#define PIPE_LINK_M1(tran) _TRANSCODER2(tran, _PIPEA_LINK_M1)
+#define PIPE_LINK_N1(tran) _TRANSCODER2(tran, _PIPEA_LINK_N1)
+#define PIPE_LINK_M2(tran) _TRANSCODER2(tran, _PIPEA_LINK_M2)
+#define PIPE_LINK_N2(tran) _TRANSCODER2(tran, _PIPEA_LINK_N2)
 
 /* CPU panel fitter */
 /* IVB+ has 3 fitters, 0 is 7x5 capable, the other two only 3x3 */
 #define  ILK_ELPIN_409_SELECT  (1 << 25)
 #define  ILK_DPARB_GATE        (1<<22)
 #define  ILK_VSDPFD_FULL       (1<<21)
-#define ILK_DISPLAY_CHICKEN_FUSES      0x42014
-#define  ILK_INTERNAL_GRAPHICS_DISABLE (1<<31)
-#define  ILK_INTERNAL_DISPLAY_DISABLE  (1<<30)
-#define  ILK_DISPLAY_DEBUG_DISABLE     (1<<29)
-#define  ILK_HDCP_DISABLE              (1<<25)
-#define  ILK_eDP_A_DISABLE             (1<<24)
-#define  ILK_DESKTOP                   (1<<23)
+#define FUSE_STRAP                     0x42014
+#define  ILK_INTERNAL_GRAPHICS_DISABLE (1 << 31)
+#define  ILK_INTERNAL_DISPLAY_DISABLE  (1 << 30)
+#define  ILK_DISPLAY_DEBUG_DISABLE     (1 << 29)
+#define  ILK_HDCP_DISABLE              (1 << 25)
+#define  ILK_eDP_A_DISABLE             (1 << 24)
+#define  HSW_CDCLK_LIMIT               (1 << 24)
+#define  ILK_DESKTOP                   (1 << 23)
 
 #define ILK_DSPCLK_GATE_D                      0x42020
 #define   ILK_VRHUNIT_CLOCK_GATE_DISABLE       (1 << 28)
 
 #define _CHICKEN_PIPESL_1_A    0x420b0
 #define _CHICKEN_PIPESL_1_B    0x420b4
-#define  DPRS_MASK_VBLANK_SRD  (1 << 0)
+#define  HSW_FBCQ_DIS                  (1 << 22)
+#define  BDW_DPRS_MASK_VBLANK_SRD      (1 << 0)
 #define CHICKEN_PIPESL_1(pipe) _PIPE(pipe, _CHICKEN_PIPESL_1_A, _CHICKEN_PIPESL_1_B)
 
 #define DISP_ARB_CTL   0x45000
 #define GEN7_MSG_CTL   0x45010
 #define  WAIT_FOR_PCH_RESET_ACK                (1<<1)
 #define  WAIT_FOR_PCH_FLR_ACK          (1<<0)
+#define HSW_NDE_RSTWRN_OPT     0x46408
+#define  RESET_PCH_HANDSHAKE_ENABLE    (1<<4)
 
 /* GEN7 chicken */
 #define GEN7_COMMON_SLICE_CHICKEN1             0x7010
 #define COMMON_SLICE_CHICKEN2                  0x7014
 # define GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE  (1<<0)
 
+#define GEN7_L3SQCREG1                         0xB010
+#define  VLV_B0_WA_L3SQCREG1_VALUE             0x00D30000
+
 #define GEN7_L3CNTLREG1                                0xB01C
-#define  GEN7_WA_FOR_GEN7_L3_CONTROL                   0x3C4FFF8C
+#define  GEN7_WA_FOR_GEN7_L3_CONTROL                   0x3C47FF8C
 #define  GEN7_L3AGDIS                          (1<<19)
 
 #define GEN7_L3_CHICKEN_MODE_REGISTER          0xB030
 #define HSW_SCRATCH1                           0xb038
 #define  HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE  (1<<27)
 
-#define HSW_FUSE_STRAP         0x42014
-#define  HSW_CDCLK_LIMIT       (1 << 24)
-
 /* PCH */
 
 /* south display engine interrupt: IBX */
 #define HSW_VIDEO_DIP_GCP_B            0x61210
 
 #define HSW_TVIDEO_DIP_CTL(trans) \
-        _TRANSCODER(trans, HSW_VIDEO_DIP_CTL_A, HSW_VIDEO_DIP_CTL_B)
+        _TRANSCODER2(trans, HSW_VIDEO_DIP_CTL_A)
 #define HSW_TVIDEO_DIP_AVI_DATA(trans) \
-        _TRANSCODER(trans, HSW_VIDEO_DIP_AVI_DATA_A, HSW_VIDEO_DIP_AVI_DATA_B)
+        _TRANSCODER2(trans, HSW_VIDEO_DIP_AVI_DATA_A)
 #define HSW_TVIDEO_DIP_VS_DATA(trans) \
-        _TRANSCODER(trans, HSW_VIDEO_DIP_VS_DATA_A, HSW_VIDEO_DIP_VS_DATA_B)
+        _TRANSCODER2(trans, HSW_VIDEO_DIP_VS_DATA_A)
 #define HSW_TVIDEO_DIP_SPD_DATA(trans) \
-        _TRANSCODER(trans, HSW_VIDEO_DIP_SPD_DATA_A, HSW_VIDEO_DIP_SPD_DATA_B)
+        _TRANSCODER2(trans, HSW_VIDEO_DIP_SPD_DATA_A)
 #define HSW_TVIDEO_DIP_GCP(trans) \
-       _TRANSCODER(trans, HSW_VIDEO_DIP_GCP_A, HSW_VIDEO_DIP_GCP_B)
+       _TRANSCODER2(trans, HSW_VIDEO_DIP_GCP_A)
 #define HSW_TVIDEO_DIP_VSC_DATA(trans) \
-        _TRANSCODER(trans, HSW_VIDEO_DIP_VSC_DATA_A, HSW_VIDEO_DIP_VSC_DATA_B)
+        _TRANSCODER2(trans, HSW_VIDEO_DIP_VSC_DATA_A)
 
 #define HSW_STEREO_3D_CTL_A    0x70020
 #define   S3D_ENABLE           (1<<31)
 #define HSW_STEREO_3D_CTL_B    0x71020
 
 #define HSW_STEREO_3D_CTL(trans) \
-       _TRANSCODER(trans, HSW_STEREO_3D_CTL_A, HSW_STEREO_3D_CTL_A)
+       _PIPE2(trans, HSW_STEREO_3D_CTL_A)
 
 #define _PCH_TRANS_HTOTAL_B          0xe1000
 #define _PCH_TRANS_HBLANK_B          0xe1004
 #define GEN7_UCGCTL4                           0x940c
 #define  GEN7_L3BANK2X_CLOCK_GATE_DISABLE      (1<<25)
 
+#define GEN8_UCGCTL6                           0x9430
+#define   GEN8_SDEUNIT_CLOCK_GATE_DISABLE      (1<<14)
+
 #define GEN6_RPNSWREQ                          0xA008
 #define   GEN6_TURBO_DISABLE                   (1<<31)
 #define   GEN6_FREQUENCY(x)                    ((x)<<25)
                                                 GEN6_PM_RP_DOWN_THRESHOLD | \
                                                 GEN6_PM_RP_DOWN_TIMEOUT)
 
+#define VLV_GTLC_SURVIVABILITY_REG              0x130098
+#define VLV_GFX_CLK_STATUS_BIT                 (1<<3)
+#define VLV_GFX_CLK_FORCE_ON_BIT               (1<<2)
+
 #define GEN6_GT_GFX_RC6_LOCKED                 0x138104
 #define VLV_COUNTER_CONTROL                    0x138104
 #define   VLV_COUNT_RANGE_HIGH                 (1<<15)
 #define   GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE  (1<<10)
 #define   GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE (1<<3)
 
+#define GEN8_ROW_CHICKEN               0xe4f0
+#define   PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE        (1<<8)
+#define   STALL_DOP_GATING_DISABLE             (1<<5)
+
 #define GEN7_ROW_CHICKEN2              0xe4f4
 #define GEN7_ROW_CHICKEN2_GT2          0xf4f4
 #define   DOP_CLOCK_GATING_DISABLE     (1<<0)
 #define   GEN8_CENTROID_PIXEL_OPT_DIS  (1<<8)
 #define   GEN8_SAMPLER_POWER_BYPASS_DIS        (1<<1)
 
-#define G4X_AUD_VID_DID                        (dev_priv->info->display_mmio_offset + 0x62020)
+#define G4X_AUD_VID_DID                        (dev_priv->info.display_mmio_offset + 0x62020)
 #define INTEL_AUDIO_DEVCL              0x808629FB
 #define INTEL_AUDIO_DEVBLC             0x80862801
 #define INTEL_AUDIO_DEVCTG             0x80862802
 #define TRANS_DDI_FUNC_CTL_B           0x61400
 #define TRANS_DDI_FUNC_CTL_C           0x62400
 #define TRANS_DDI_FUNC_CTL_EDP         0x6F400
-#define TRANS_DDI_FUNC_CTL(tran) _TRANSCODER(tran, TRANS_DDI_FUNC_CTL_A, \
-                                                  TRANS_DDI_FUNC_CTL_B)
+#define TRANS_DDI_FUNC_CTL(tran) _TRANSCODER2(tran, TRANS_DDI_FUNC_CTL_A)
+
 #define  TRANS_DDI_FUNC_ENABLE         (1<<31)
 /* Those bits are ignored by pipe EDP since it can only connect to DDI A */
 #define  TRANS_DDI_PORT_MASK           (7<<28)
 #define  SPLL_PLL_ENABLE               (1<<31)
 #define  SPLL_PLL_SSC                  (1<<28)
 #define  SPLL_PLL_NON_SSC              (2<<28)
+#define  SPLL_PLL_LCPLL                        (3<<28)
+#define  SPLL_PLL_REF_MASK             (3<<28)
 #define  SPLL_PLL_FREQ_810MHz          (0<<26)
 #define  SPLL_PLL_FREQ_1350MHz         (1<<26)
+#define  SPLL_PLL_FREQ_2700MHz         (2<<26)
+#define  SPLL_PLL_FREQ_MASK            (3<<26)
 
 /* WRPLL */
 #define WRPLL_CTL1                     0x46040
 #define  WRPLL_PLL_SELECT_LCPLL_2700   (0x03<<28)
 /* WRPLL divider programming */
 #define  WRPLL_DIVIDER_REFERENCE(x)    ((x)<<0)
+#define  WRPLL_DIVIDER_REF_MASK                (0xff)
 #define  WRPLL_DIVIDER_POST(x)         ((x)<<8)
+#define  WRPLL_DIVIDER_POST_MASK       (0x3f<<8)
+#define  WRPLL_DIVIDER_POST_SHIFT      8
 #define  WRPLL_DIVIDER_FEEDBACK(x)     ((x)<<16)
+#define  WRPLL_DIVIDER_FB_SHIFT                16
+#define  WRPLL_DIVIDER_FB_MASK         (0xff<<16)
 
 /* Port clock selection */
 #define PORT_CLK_SEL_A                 0x46100
 #define  PORT_CLK_SEL_WRPLL1           (4<<29)
 #define  PORT_CLK_SEL_WRPLL2           (5<<29)
 #define  PORT_CLK_SEL_NONE             (7<<29)
+#define  PORT_CLK_SEL_MASK             (7<<29)
 
 /* Transcoder clock selection */
 #define TRANS_CLK_SEL_A                        0x46140
 #define  TRANS_CLK_SEL_DISABLED                (0x0<<29)
 #define  TRANS_CLK_SEL_PORT(x)         ((x+1)<<29)
 
-#define _TRANSA_MSA_MISC               0x60410
-#define _TRANSB_MSA_MISC               0x61410
-#define TRANS_MSA_MISC(tran) _TRANSCODER(tran, _TRANSA_MSA_MISC, \
-                                              _TRANSB_MSA_MISC)
+#define TRANSA_MSA_MISC                        0x60410
+#define TRANSB_MSA_MISC                        0x61410
+#define TRANSC_MSA_MISC                        0x62410
+#define TRANS_EDP_MSA_MISC             0x6f410
+#define TRANS_MSA_MISC(tran) _TRANSCODER2(tran, TRANSA_MSA_MISC)
+
 #define  TRANS_MSA_SYNC_CLK            (1<<0)
 #define  TRANS_MSA_6_BPC               (0<<5)
 #define  TRANS_MSA_8_BPC               (1<<5)
 
 /* SFUSE_STRAP */
 #define SFUSE_STRAP                    0xc2014
+#define  SFUSE_STRAP_FUSE_LOCK         (1<<13)
+#define  SFUSE_STRAP_DISPLAY_DISABLED  (1<<7)
 #define  SFUSE_STRAP_DDIB_DETECTED     (1<<2)
 #define  SFUSE_STRAP_DDIC_DETECTED     (1<<1)
 #define  SFUSE_STRAP_DDID_DETECTED     (1<<0)
 #define MIPI_READ_DATA_VALID(pipe)     _PIPE(pipe, _MIPIA_READ_DATA_VALID, _MIPIB_READ_DATA_VALID)
 #define  READ_DATA_VALID(n)                            (1 << (n))
 
+/* For UMS only (deprecated): */
+#define _PALETTE_A (dev_priv->info.display_mmio_offset + 0xa000)
+#define _PALETTE_B (dev_priv->info.display_mmio_offset + 0xa800)
+#define _DPLL_A (dev_priv->info.display_mmio_offset + 0x6014)
+#define _DPLL_B (dev_priv->info.display_mmio_offset + 0x6018)
+#define _DPLL_A_MD (dev_priv->info.display_mmio_offset + 0x601c)
+#define _DPLL_B_MD (dev_priv->info.display_mmio_offset + 0x6020)
+
 #endif /* _I915_REG_H_ */
index 8150fdc08d497122c49959b9bbfaf0090259c7df..56785e8fb2eb5f7e209b8713f3815b4c5ecbcf22 100644 (file)
@@ -236,19 +236,9 @@ static void i915_save_display(struct drm_device *dev)
                dev_priv->regfile.savePP_DIVISOR = I915_READ(PP_DIVISOR);
        }
 
-       /* Only regfile.save FBC state on the platform that supports FBC */
-       if (HAS_FBC(dev)) {
-               if (HAS_PCH_SPLIT(dev)) {
-                       dev_priv->regfile.saveDPFC_CB_BASE = I915_READ(ILK_DPFC_CB_BASE);
-               } else if (IS_GM45(dev)) {
-                       dev_priv->regfile.saveDPFC_CB_BASE = I915_READ(DPFC_CB_BASE);
-               } else {
-                       dev_priv->regfile.saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE);
-                       dev_priv->regfile.saveFBC_LL_BASE = I915_READ(FBC_LL_BASE);
-                       dev_priv->regfile.saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2);
-                       dev_priv->regfile.saveFBC_CONTROL = I915_READ(FBC_CONTROL);
-               }
-       }
+       /* save FBC interval */
+       if (HAS_FBC(dev) && INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev))
+               dev_priv->regfile.saveFBC_CONTROL = I915_READ(FBC_CONTROL);
 
        if (!drm_core_check_feature(dev, DRIVER_MODESET))
                i915_save_vga(dev);
@@ -300,18 +290,10 @@ static void i915_restore_display(struct drm_device *dev)
 
        /* only restore FBC info on the platform that supports FBC*/
        intel_disable_fbc(dev);
-       if (HAS_FBC(dev)) {
-               if (HAS_PCH_SPLIT(dev)) {
-                       I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->regfile.saveDPFC_CB_BASE);
-               } else if (IS_GM45(dev)) {
-                       I915_WRITE(DPFC_CB_BASE, dev_priv->regfile.saveDPFC_CB_BASE);
-               } else {
-                       I915_WRITE(FBC_CFB_BASE, dev_priv->regfile.saveFBC_CFB_BASE);
-                       I915_WRITE(FBC_LL_BASE, dev_priv->regfile.saveFBC_LL_BASE);
-                       I915_WRITE(FBC_CONTROL2, dev_priv->regfile.saveFBC_CONTROL2);
-                       I915_WRITE(FBC_CONTROL, dev_priv->regfile.saveFBC_CONTROL);
-               }
-       }
+
+       /* restore FBC interval */
+       if (HAS_FBC(dev) && INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev))
+               I915_WRITE(FBC_CONTROL, dev_priv->regfile.saveFBC_CONTROL);
 
        if (!drm_core_check_feature(dev, DRIVER_MODESET))
                i915_restore_vga(dev);
@@ -324,10 +306,6 @@ int i915_save_state(struct drm_device *dev)
        struct drm_i915_private *dev_priv = dev->dev_private;
        int i;
 
-       if (INTEL_INFO(dev)->gen <= 4)
-               pci_read_config_byte(dev->pdev, LBB,
-                                    &dev_priv->regfile.saveLBB);
-
        mutex_lock(&dev->struct_mutex);
 
        i915_save_display(dev);
@@ -377,10 +355,6 @@ int i915_restore_state(struct drm_device *dev)
        struct drm_i915_private *dev_priv = dev->dev_private;
        int i;
 
-       if (INTEL_INFO(dev)->gen <= 4)
-               pci_write_config_byte(dev->pdev, LBB,
-                                     dev_priv->regfile.saveLBB);
-
        mutex_lock(&dev->struct_mutex);
 
        i915_gem_restore_fences(dev);
index 33bcae314bf86ea5f7a2deafb80625d0382634af..0c741f4eefb04b1fcfbab17e55957dc5324f61df 100644 (file)
@@ -357,6 +357,11 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
                else
                        gen6_set_rps(dev, val);
        }
+       else if (!IS_VALLEYVIEW(dev))
+               /* We still need gen6_set_rps to process the new max_delay
+                  and update the interrupt limits even though frequency
+                  request is unchanged. */
+               gen6_set_rps(dev, dev_priv->rps.cur_delay);
 
        mutex_unlock(&dev_priv->rps.hw_lock);
 
@@ -426,6 +431,11 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
                else
                        gen6_set_rps(dev, val);
        }
+       else if (!IS_VALLEYVIEW(dev))
+               /* We still need gen6_set_rps to process the new min_delay
+                  and update the interrupt limits even though frequency
+                  request is unchanged. */
+               gen6_set_rps(dev, dev_priv->rps.cur_delay);
 
        mutex_unlock(&dev_priv->rps.hw_lock);
 
index 6e580c98dede727175cf0542b0a1024ad0e2a5da..b95a380958dbe8b55c275cd8b047f92f5ebbe81b 100644 (file)
@@ -34,15 +34,15 @@ TRACE_EVENT(i915_gem_object_create,
 );
 
 TRACE_EVENT(i915_vma_bind,
-           TP_PROTO(struct i915_vma *vma, bool mappable),
-           TP_ARGS(vma, mappable),
+           TP_PROTO(struct i915_vma *vma, unsigned flags),
+           TP_ARGS(vma, flags),
 
            TP_STRUCT__entry(
                             __field(struct drm_i915_gem_object *, obj)
                             __field(struct i915_address_space *, vm)
                             __field(u32, offset)
                             __field(u32, size)
-                            __field(bool, mappable)
+                            __field(unsigned, flags)
                             ),
 
            TP_fast_assign(
@@ -50,12 +50,12 @@ TRACE_EVENT(i915_vma_bind,
                           __entry->vm = vma->vm;
                           __entry->offset = vma->node.start;
                           __entry->size = vma->node.size;
-                          __entry->mappable = mappable;
+                          __entry->flags = flags;
                           ),
 
            TP_printk("obj=%p, offset=%08x size=%x%s vm=%p",
                      __entry->obj, __entry->offset, __entry->size,
-                     __entry->mappable ? ", mappable" : "",
+                     __entry->flags & PIN_MAPPABLE ? ", mappable" : "",
                      __entry->vm)
 );
 
@@ -196,26 +196,26 @@ DEFINE_EVENT(i915_gem_object, i915_gem_object_destroy,
 );
 
 TRACE_EVENT(i915_gem_evict,
-           TP_PROTO(struct drm_device *dev, u32 size, u32 align, bool mappable),
-           TP_ARGS(dev, size, align, mappable),
+           TP_PROTO(struct drm_device *dev, u32 size, u32 align, unsigned flags),
+           TP_ARGS(dev, size, align, flags),
 
            TP_STRUCT__entry(
                             __field(u32, dev)
                             __field(u32, size)
                             __field(u32, align)
-                            __field(bool, mappable)
+                            __field(unsigned, flags)
                            ),
 
            TP_fast_assign(
                           __entry->dev = dev->primary->index;
                           __entry->size = size;
                           __entry->align = align;
-                          __entry->mappable = mappable;
+                          __entry->flags = flags;
                          ),
 
            TP_printk("dev=%d, size=%d, align=%d %s",
                      __entry->dev, __entry->size, __entry->align,
-                     __entry->mappable ? ", mappable" : "")
+                     __entry->flags & PIN_MAPPABLE ? ", mappable" : "")
 );
 
 TRACE_EVENT(i915_gem_evict_everything,
index caa18e855815eaf04a39ecb6eedcc572cf4d6290..480da593e6c036f68c20a827c7f499377dccaf27 100644 (file)
@@ -271,6 +271,10 @@ void i915_save_display_reg(struct drm_device *dev)
        /* FIXME: regfile.save TV & SDVO state */
 
        /* Backlight */
+       if (INTEL_INFO(dev)->gen <= 4)
+               pci_read_config_byte(dev->pdev, PCI_LBPC,
+                                    &dev_priv->regfile.saveLBB);
+
        if (HAS_PCH_SPLIT(dev)) {
                dev_priv->regfile.saveBLC_PWM_CTL = I915_READ(BLC_PWM_PCH_CTL1);
                dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2);
@@ -293,6 +297,10 @@ void i915_restore_display_reg(struct drm_device *dev)
        int i;
 
        /* Backlight */
+       if (INTEL_INFO(dev)->gen <= 4)
+               pci_write_config_byte(dev->pdev, PCI_LBPC,
+                                     dev_priv->regfile.saveLBB);
+
        if (HAS_PCH_SPLIT(dev)) {
                I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->regfile.saveBLC_PWM_CTL);
                I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2);
index f22041973f3a0a426e29424cb60ba68e042c3ed8..4867f4cc0938cba2db4a33bfdb464712defa5e9c 100644 (file)
@@ -259,7 +259,7 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
                        downclock = dvo_timing->clock;
        }
 
-       if (downclock < panel_dvo_timing->clock && i915_lvds_downclock) {
+       if (downclock < panel_dvo_timing->clock && i915.lvds_downclock) {
                dev_priv->lvds_downclock_avail = 1;
                dev_priv->lvds_downclock = downclock * 10;
                DRM_DEBUG_KMS("LVDS downclock is found in VBT. "
@@ -318,7 +318,7 @@ parse_sdvo_panel_data(struct drm_i915_private *dev_priv,
        struct drm_display_mode *panel_fixed_mode;
        int index;
 
-       index = i915_vbt_sdvo_panel_type;
+       index = i915.vbt_sdvo_panel_type;
        if (index == -2) {
                DRM_DEBUG_KMS("Ignore SDVO panel mode from BIOS VBT tables.\n");
                return;
@@ -599,14 +599,14 @@ parse_mipi(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
 {
        struct bdb_mipi *mipi;
 
-       mipi = find_section(bdb, BDB_MIPI);
+       mipi = find_section(bdb, BDB_MIPI_CONFIG);
        if (!mipi) {
                DRM_DEBUG_KMS("No MIPI BDB found");
                return;
        }
 
        /* XXX: add more info */
-       dev_priv->vbt.dsi.panel_id = mipi->panel_id;
+       dev_priv->vbt.dsi.panel_id = MIPI_DSI_GENERIC_PANEL_ID;
 }
 
 static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
index 282de5e9f39dee6566bb6d220f378864df86e8b9..83b7629e4367bdac9a992f1c63d684074c8ef10f 100644 (file)
@@ -104,7 +104,8 @@ struct vbios_data {
 #define BDB_LVDS_LFP_DATA       42
 #define BDB_LVDS_BACKLIGHT      43
 #define BDB_LVDS_POWER          44
-#define BDB_MIPI                50
+#define BDB_MIPI_CONFIG                 52
+#define BDB_MIPI_SEQUENCE       53
 #define BDB_SKIP               254 /* VBIOS private block, ignore */
 
 struct bdb_general_features {
@@ -711,44 +712,159 @@ int intel_parse_bios(struct drm_device *dev);
 #define DVO_PORT_DPD   9
 #define DVO_PORT_DPA   10
 
-/* MIPI DSI panel info */
-struct bdb_mipi {
-       u16 panel_id;
-       u16 bridge_revision;
-
-       /* General params */
-       u32 dithering:1;
-       u32 bpp_pixel_format:1;
-       u32 rsvd1:1;
-       u32 dphy_valid:1;
-       u32 resvd2:28;
+/* Block 52 contains MIPI Panel info
+ * 6 such enteries will there. Index into correct
+ * entery is based on the panel_index in #40 LFP
+ */
+#define MAX_MIPI_CONFIGURATIONS        6
 
-       u16 port_info;
-       u16 rsvd3:2;
-       u16 num_lanes:2;
-       u16 rsvd4:12;
+#define MIPI_DSI_UNDEFINED_PANEL_ID    0
+#define MIPI_DSI_GENERIC_PANEL_ID      1
 
-       /* DSI config */
-       u16 virt_ch_num:2;
-       u16 vtm:2;
-       u16 rsvd5:12;
+struct mipi_config {
+       u16 panel_id;
 
-       u32 dsi_clock;
+       /* General Params */
+       u32 enable_dithering:1;
+       u32 rsvd1:1;
+       u32 is_bridge:1;
+
+       u32 panel_arch_type:2;
+       u32 is_cmd_mode:1;
+
+#define NON_BURST_SYNC_PULSE   0x1
+#define NON_BURST_SYNC_EVENTS  0x2
+#define BURST_MODE             0x3
+       u32 video_transfer_mode:2;
+
+       u32 cabc_supported:1;
+       u32 pwm_blc:1;
+
+       /* Bit 13:10 */
+#define PIXEL_FORMAT_RGB565                    0x1
+#define PIXEL_FORMAT_RGB666                    0x2
+#define PIXEL_FORMAT_RGB666_LOOSELY_PACKED     0x3
+#define PIXEL_FORMAT_RGB888                    0x4
+       u32 videomode_color_format:4;
+
+       /* Bit 15:14 */
+#define ENABLE_ROTATION_0      0x0
+#define ENABLE_ROTATION_90     0x1
+#define ENABLE_ROTATION_180    0x2
+#define ENABLE_ROTATION_270    0x3
+       u32 rotation:2;
+       u32 bta_enabled:1;
+       u32 rsvd2:15;
+
+       /* 2 byte Port Description */
+#define DUAL_LINK_NOT_SUPPORTED        0
+#define DUAL_LINK_FRONT_BACK   1
+#define DUAL_LINK_PIXEL_ALT    2
+       u16 dual_link:2;
+       u16 lane_cnt:2;
+       u16 rsvd3:12;
+
+       u16 rsvd4;
+
+       u8 rsvd5[5];
+       u32 dsi_ddr_clk;
        u32 bridge_ref_clk;
-       u16 rsvd_pwr;
 
-       /* Dphy Params */
-       u32 prepare_cnt:5;
-       u32 rsvd6:3;
+#define  BYTE_CLK_SEL_20MHZ            0
+#define  BYTE_CLK_SEL_10MHZ            1
+#define  BYTE_CLK_SEL_5MHZ             2
+       u8 byte_clk_sel:2;
+
+       u8 rsvd6:6;
+
+       /* DPHY Flags */
+       u16 dphy_param_valid:1;
+       u16 eot_pkt_disabled:1;
+       u16 enable_clk_stop:1;
+       u16 rsvd7:13;
+
+       u32 hs_tx_timeout;
+       u32 lp_rx_timeout;
+       u32 turn_around_timeout;
+       u32 device_reset_timer;
+       u32 master_init_timer;
+       u32 dbi_bw_timer;
+       u32 lp_byte_clk_val;
+
+       /*  4 byte Dphy Params */
+       u32 prepare_cnt:6;
+       u32 rsvd8:2;
        u32 clk_zero_cnt:8;
        u32 trail_cnt:5;
-       u32 rsvd7:3;
+       u32 rsvd9:3;
        u32 exit_zero_cnt:6;
-       u32 rsvd8:2;
+       u32 rsvd10:2;
 
-       u32 hl_switch_cnt;
-       u32 lp_byte_clk;
        u32 clk_lane_switch_cnt;
+       u32 hl_switch_cnt;
+
+       u32 rsvd11[6];
+
+       /* timings based on dphy spec */
+       u8 tclk_miss;
+       u8 tclk_post;
+       u8 rsvd12;
+       u8 tclk_pre;
+       u8 tclk_prepare;
+       u8 tclk_settle;
+       u8 tclk_term_enable;
+       u8 tclk_trail;
+       u16 tclk_prepare_clkzero;
+       u8 rsvd13;
+       u8 td_term_enable;
+       u8 teot;
+       u8 ths_exit;
+       u8 ths_prepare;
+       u16 ths_prepare_hszero;
+       u8 rsvd14;
+       u8 ths_settle;
+       u8 ths_skip;
+       u8 ths_trail;
+       u8 tinit;
+       u8 tlpx;
+       u8 rsvd15[3];
+
+       /* GPIOs */
+       u8 panel_enable;
+       u8 bl_enable;
+       u8 pwm_enable;
+       u8 reset_r_n;
+       u8 pwr_down_r;
+       u8 stdby_r_n;
+
 } __packed;
 
+/* Block 52 contains MIPI configuration block
+ * 6 * bdb_mipi_config, followed by 6 pps data
+ * block below
+ *
+ * all delays has a unit of 100us
+ */
+struct mipi_pps_data {
+       u16 panel_on_delay;
+       u16 bl_enable_delay;
+       u16 bl_disable_delay;
+       u16 panel_off_delay;
+       u16 panel_power_cycle_delay;
+};
+
+struct bdb_mipi_config {
+       struct mipi_config config[MAX_MIPI_CONFIGURATIONS];
+       struct mipi_pps_data pps[MAX_MIPI_CONFIGURATIONS];
+};
+
+/* Block 53 contains MIPI sequences as needed by the panel
+ * for enabling it. This block can be variable in size and
+ * can be maximum of 6 blocks
+ */
+struct bdb_mipi_sequence {
+       u8 version;
+       u8 data[0];
+};
+
 #endif /* _I830_BIOS_H_ */
index e2e39e65f10954b8076b3ed01b565f0cb173bb0d..32b7d49306f8c74e0944f9c3d1b74a4699faffeb 100644 (file)
@@ -68,8 +68,13 @@ static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
        struct drm_device *dev = encoder->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crt *crt = intel_encoder_to_crt(encoder);
+       enum intel_display_power_domain power_domain;
        u32 tmp;
 
+       power_domain = intel_display_port_power_domain(encoder);
+       if (!intel_display_power_enabled(dev_priv, power_domain))
+               return false;
+
        tmp = I915_READ(crt->adpa_reg);
 
        if (!(tmp & ADPA_DAC_ENABLE))
@@ -262,6 +267,10 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder,
        if (HAS_PCH_LPT(dev))
                pipe_config->pipe_bpp = 24;
 
+       /* FDI must always be 2.7 GHz */
+       if (HAS_DDI(dev))
+               pipe_config->port_clock = 135000 * 2;
+
        return true;
 }
 
@@ -630,14 +639,22 @@ static enum drm_connector_status
 intel_crt_detect(struct drm_connector *connector, bool force)
 {
        struct drm_device *dev = connector->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crt *crt = intel_attached_crt(connector);
+       struct intel_encoder *intel_encoder = &crt->base;
+       enum intel_display_power_domain power_domain;
        enum drm_connector_status status;
        struct intel_load_detect_pipe tmp;
 
+       intel_runtime_pm_get(dev_priv);
+
        DRM_DEBUG_KMS("[CONNECTOR:%d:%s] force=%d\n",
                      connector->base.id, drm_get_connector_name(connector),
                      force);
 
+       power_domain = intel_display_port_power_domain(intel_encoder);
+       intel_display_power_get(dev_priv, power_domain);
+
        if (I915_HAS_HOTPLUG(dev)) {
                /* We can not rely on the HPD pin always being correctly wired
                 * up, for example many KVM do not pass it through, and so
@@ -645,23 +662,30 @@ intel_crt_detect(struct drm_connector *connector, bool force)
                 */
                if (intel_crt_detect_hotplug(connector)) {
                        DRM_DEBUG_KMS("CRT detected via hotplug\n");
-                       return connector_status_connected;
+                       status = connector_status_connected;
+                       goto out;
                } else
                        DRM_DEBUG_KMS("CRT not detected via hotplug\n");
        }
 
-       if (intel_crt_detect_ddc(connector))
-               return connector_status_connected;
+       if (intel_crt_detect_ddc(connector)) {
+               status = connector_status_connected;
+               goto out;
+       }
 
        /* Load detection is broken on HPD capable machines. Whoever wants a
         * broken monitor (without edid) to work behind a broken kvm (that fails
         * to have the right resistors for HP detection) needs to fix this up.
         * For now just bail out. */
-       if (I915_HAS_HOTPLUG(dev))
-               return connector_status_disconnected;
+       if (I915_HAS_HOTPLUG(dev)) {
+               status = connector_status_disconnected;
+               goto out;
+       }
 
-       if (!force)
-               return connector->status;
+       if (!force) {
+               status = connector->status;
+               goto out;
+       }
 
        /* for pre-945g platforms use load detect */
        if (intel_get_load_detect_pipe(connector, NULL, &tmp)) {
@@ -673,6 +697,10 @@ intel_crt_detect(struct drm_connector *connector, bool force)
        } else
                status = connector_status_unknown;
 
+out:
+       intel_display_power_put(dev_priv, power_domain);
+       intel_runtime_pm_put(dev_priv);
+
        return status;
 }
 
@@ -686,17 +714,28 @@ static int intel_crt_get_modes(struct drm_connector *connector)
 {
        struct drm_device *dev = connector->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_crt *crt = intel_attached_crt(connector);
+       struct intel_encoder *intel_encoder = &crt->base;
+       enum intel_display_power_domain power_domain;
        int ret;
        struct i2c_adapter *i2c;
 
+       power_domain = intel_display_port_power_domain(intel_encoder);
+       intel_display_power_get(dev_priv, power_domain);
+
        i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->vbt.crt_ddc_pin);
        ret = intel_crt_ddc_get_modes(connector, i2c);
        if (ret || !IS_G4X(dev))
-               return ret;
+               goto out;
 
        /* Try to probe digital port for output in DVI-I -> VGA mode. */
        i2c = intel_gmbus_get_adapter(dev_priv, GMBUS_PORT_DPB);
-       return intel_crt_ddc_get_modes(connector, i2c);
+       ret = intel_crt_ddc_get_modes(connector, i2c);
+
+out:
+       intel_display_power_put(dev_priv, power_domain);
+
+       return ret;
 }
 
 static int intel_crt_set_property(struct drm_connector *connector,
@@ -800,7 +839,7 @@ void intel_crt_init(struct drm_device *dev)
        intel_connector_attach_encoder(intel_connector, &crt->base);
 
        crt->base.type = INTEL_OUTPUT_ANALOG;
-       crt->base.cloneable = true;
+       crt->base.cloneable = 1 << INTEL_OUTPUT_DVO;
        if (IS_I830(dev))
                crt->base.crtc_mask = (1 << 0);
        else
@@ -833,6 +872,7 @@ void intel_crt_init(struct drm_device *dev)
                crt->base.get_hw_state = intel_crt_get_hw_state;
        }
        intel_connector->get_hw_state = intel_connector_get_hw_state;
+       intel_connector->unregister = intel_connector_unregister;
 
        drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
 
@@ -857,4 +897,6 @@ void intel_crt_init(struct drm_device *dev)
 
                dev_priv->fdi_rx_config = I915_READ(_FDI_RXA_CTL) & fdi_config;
        }
+
+       intel_crt_reset(connector);
 }
index e06b9e017d6ba918b87557de301df47076852434..3565d61531f0281cf4e32a0f5e02a90b07e6f13a 100644 (file)
@@ -633,6 +633,97 @@ static void wrpll_update_rnp(uint64_t freq2k, unsigned budget,
        /* Otherwise a < c && b >= d, do nothing */
 }
 
+static int intel_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv,
+                                    int reg)
+{
+       int refclk = LC_FREQ;
+       int n, p, r;
+       u32 wrpll;
+
+       wrpll = I915_READ(reg);
+       switch (wrpll & SPLL_PLL_REF_MASK) {
+       case SPLL_PLL_SSC:
+       case SPLL_PLL_NON_SSC:
+               /*
+                * We could calculate spread here, but our checking
+                * code only cares about 5% accuracy, and spread is a max of
+                * 0.5% downspread.
+                */
+               refclk = 135;
+               break;
+       case SPLL_PLL_LCPLL:
+               refclk = LC_FREQ;
+               break;
+       default:
+               WARN(1, "bad wrpll refclk\n");
+               return 0;
+       }
+
+       r = wrpll & WRPLL_DIVIDER_REF_MASK;
+       p = (wrpll & WRPLL_DIVIDER_POST_MASK) >> WRPLL_DIVIDER_POST_SHIFT;
+       n = (wrpll & WRPLL_DIVIDER_FB_MASK) >> WRPLL_DIVIDER_FB_SHIFT;
+
+       /* Convert to KHz, p & r have a fixed point portion */
+       return (refclk * n * 100) / (p * r);
+}
+
+static void intel_ddi_clock_get(struct intel_encoder *encoder,
+                               struct intel_crtc_config *pipe_config)
+{
+       struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+       enum port port = intel_ddi_get_encoder_port(encoder);
+       int link_clock = 0;
+       u32 val, pll;
+
+       val = I915_READ(PORT_CLK_SEL(port));
+       switch (val & PORT_CLK_SEL_MASK) {
+       case PORT_CLK_SEL_LCPLL_810:
+               link_clock = 81000;
+               break;
+       case PORT_CLK_SEL_LCPLL_1350:
+               link_clock = 135000;
+               break;
+       case PORT_CLK_SEL_LCPLL_2700:
+               link_clock = 270000;
+               break;
+       case PORT_CLK_SEL_WRPLL1:
+               link_clock = intel_ddi_calc_wrpll_link(dev_priv, WRPLL_CTL1);
+               break;
+       case PORT_CLK_SEL_WRPLL2:
+               link_clock = intel_ddi_calc_wrpll_link(dev_priv, WRPLL_CTL2);
+               break;
+       case PORT_CLK_SEL_SPLL:
+               pll = I915_READ(SPLL_CTL) & SPLL_PLL_FREQ_MASK;
+               if (pll == SPLL_PLL_FREQ_810MHz)
+                       link_clock = 81000;
+               else if (pll == SPLL_PLL_FREQ_1350MHz)
+                       link_clock = 135000;
+               else if (pll == SPLL_PLL_FREQ_2700MHz)
+                       link_clock = 270000;
+               else {
+                       WARN(1, "bad spll freq\n");
+                       return;
+               }
+               break;
+       default:
+               WARN(1, "bad port clock sel\n");
+               return;
+       }
+
+       pipe_config->port_clock = link_clock * 2;
+
+       if (pipe_config->has_pch_encoder)
+               pipe_config->adjusted_mode.crtc_clock =
+                       intel_dotclock_calculate(pipe_config->port_clock,
+                                                &pipe_config->fdi_m_n);
+       else if (pipe_config->has_dp_encoder)
+               pipe_config->adjusted_mode.crtc_clock =
+                       intel_dotclock_calculate(pipe_config->port_clock,
+                                                &pipe_config->dp_m_n);
+       else
+               pipe_config->adjusted_mode.crtc_clock = pipe_config->port_clock;
+}
+
 static void
 intel_ddi_calculate_wrpll(int clock /* in Hz */,
                          unsigned *r2_out, unsigned *n2_out, unsigned *p_out)
@@ -1054,9 +1145,14 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
        struct drm_device *dev = encoder->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        enum port port = intel_ddi_get_encoder_port(encoder);
+       enum intel_display_power_domain power_domain;
        u32 tmp;
        int i;
 
+       power_domain = intel_display_port_power_domain(encoder);
+       if (!intel_display_power_enabled(dev_priv, power_domain))
+               return false;
+
        tmp = I915_READ(DDI_BUF_CTL(port));
 
        if (!(tmp & DDI_BUF_CTL_ENABLE))
@@ -1200,7 +1296,7 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
 
        if (type == INTEL_OUTPUT_EDP) {
                struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
-               ironlake_edp_panel_on(intel_dp);
+               intel_edp_panel_on(intel_dp);
        }
 
        WARN_ON(intel_crtc->ddi_pll_sel == PORT_CLK_SEL_NONE);
@@ -1244,7 +1340,7 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder)
        if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
                struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
                intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
-               ironlake_edp_panel_off(intel_dp);
+               intel_edp_panel_off(intel_dp);
        }
 
        I915_WRITE(PORT_CLK_SEL(port), PORT_CLK_SEL_NONE);
@@ -1279,7 +1375,7 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder)
                if (port == PORT_A)
                        intel_dp_stop_link_train(intel_dp);
 
-               ironlake_edp_backlight_on(intel_dp);
+               intel_edp_backlight_on(intel_dp);
                intel_edp_psr_enable(intel_dp);
        }
 
@@ -1312,7 +1408,7 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder)
                struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 
                intel_edp_psr_disable(intel_dp);
-               ironlake_edp_backlight_off(intel_dp);
+               intel_edp_backlight_off(intel_dp);
        }
 }
 
@@ -1324,7 +1420,7 @@ int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv)
 
        if (lcpll & LCPLL_CD_SOURCE_FCLK) {
                return 800000;
-       } else if (I915_READ(HSW_FUSE_STRAP) & HSW_CDCLK_LIMIT) {
+       } else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT) {
                return 450000;
        } else if (freq == LCPLL_CLK_FREQ_450) {
                return 450000;
@@ -1509,6 +1605,8 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
                              pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
                dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
        }
+
+       intel_ddi_clock_get(encoder, pipe_config);
 }
 
 static void intel_ddi_destroy(struct drm_encoder *encoder)
@@ -1619,7 +1717,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
 
        intel_encoder->type = INTEL_OUTPUT_UNKNOWN;
        intel_encoder->crtc_mask =  (1 << 0) | (1 << 1) | (1 << 2);
-       intel_encoder->cloneable = false;
+       intel_encoder->cloneable = 0;
        intel_encoder->hot_plug = intel_ddi_hot_plug;
 
        if (init_dp)
index 9b8a7c7ea7fc1925610b4dcb11ca9f768057fd41..e71dda5feab2775e7e55e53e759f7dc05a71f986 100644 (file)
@@ -51,7 +51,10 @@ static void ironlake_pch_clock_get(struct intel_crtc *crtc,
 
 static int intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
                          int x, int y, struct drm_framebuffer *old_fb);
-
+static int intel_framebuffer_init(struct drm_device *dev,
+                                 struct intel_framebuffer *ifb,
+                                 struct drm_mode_fb_cmd2 *mode_cmd,
+                                 struct drm_i915_gem_object *obj);
 
 typedef struct {
        int     min, max;
@@ -1030,7 +1033,7 @@ static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
        u32 val;
 
        /* ILK FDI PLL is always enabled */
-       if (dev_priv->info->gen == 5)
+       if (INTEL_INFO(dev_priv->dev)->gen == 5)
                return;
 
        /* On Haswell, DDI ports are responsible for the FDI PLL setup */
@@ -1119,7 +1122,7 @@ void assert_pipe(struct drm_i915_private *dev_priv,
        if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
                state = true;
 
-       if (!intel_display_power_enabled(dev_priv->dev,
+       if (!intel_display_power_enabled(dev_priv,
                                POWER_DOMAIN_TRANSCODER(cpu_transcoder))) {
                cur_state = false;
        } else {
@@ -1185,16 +1188,16 @@ static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
                                    enum pipe pipe)
 {
        struct drm_device *dev = dev_priv->dev;
-       int reg, i;
+       int reg, sprite;
        u32 val;
 
        if (IS_VALLEYVIEW(dev)) {
-               for (i = 0; i < dev_priv->num_plane; i++) {
-                       reg = SPCNTR(pipe, i);
+               for_each_sprite(pipe, sprite) {
+                       reg = SPCNTR(pipe, sprite);
                        val = I915_READ(reg);
                        WARN((val & SP_ENABLE),
                             "sprite %c assertion failure, should be off on pipe %c but is still active\n",
-                            sprite_name(pipe, i), pipe_name(pipe));
+                            sprite_name(pipe, sprite), pipe_name(pipe));
                }
        } else if (INTEL_INFO(dev)->gen >= 7) {
                reg = SPRCTL(pipe);
@@ -1443,7 +1446,7 @@ static void i9xx_enable_pll(struct intel_crtc *crtc)
        assert_pipe_disabled(dev_priv, crtc->pipe);
 
        /* No really, not for ILK+ */
-       BUG_ON(dev_priv->info->gen >= 5);
+       BUG_ON(INTEL_INFO(dev)->gen >= 5);
 
        /* PLL is protected by panel, make sure we can write it */
        if (IS_MOBILE(dev) && !IS_I830(dev))
@@ -1549,11 +1552,12 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
  */
 static void ironlake_enable_shared_dpll(struct intel_crtc *crtc)
 {
-       struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+       struct drm_device *dev = crtc->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
 
        /* PCH PLLs only available on ILK, SNB and IVB */
-       BUG_ON(dev_priv->info->gen < 5);
+       BUG_ON(INTEL_INFO(dev)->gen < 5);
        if (WARN_ON(pll == NULL))
                return;
 
@@ -1578,11 +1582,12 @@ static void ironlake_enable_shared_dpll(struct intel_crtc *crtc)
 
 static void intel_disable_shared_dpll(struct intel_crtc *crtc)
 {
-       struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+       struct drm_device *dev = crtc->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
 
        /* PCH only available on ILK+ */
-       BUG_ON(dev_priv->info->gen < 5);
+       BUG_ON(INTEL_INFO(dev)->gen < 5);
        if (WARN_ON(pll == NULL))
               return;
 
@@ -1617,7 +1622,7 @@ static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
        uint32_t reg, val, pipeconf_val;
 
        /* PCH only available on ILK+ */
-       BUG_ON(dev_priv->info->gen < 5);
+       BUG_ON(INTEL_INFO(dev)->gen < 5);
 
        /* Make sure PCH DPLL is enabled */
        assert_shared_dpll_enabled(dev_priv,
@@ -1670,7 +1675,7 @@ static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
        u32 val, pipeconf_val;
 
        /* PCH only available on ILK+ */
-       BUG_ON(dev_priv->info->gen < 5);
+       BUG_ON(INTEL_INFO(dev_priv->dev)->gen < 5);
 
        /* FDI must be feeding us bits for PCH ports */
        assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
@@ -1744,21 +1749,16 @@ static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
 
 /**
  * intel_enable_pipe - enable a pipe, asserting requirements
- * @dev_priv: i915 private structure
- * @pipe: pipe to enable
- * @pch_port: on ILK+, is this pipe driving a PCH port or not
+ * @crtc: crtc responsible for the pipe
  *
- * Enable @pipe, making sure that various hardware specific requirements
+ * Enable @crtc's pipe, making sure that various hardware specific requirements
  * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
- *
- * @pipe should be %PIPE_A or %PIPE_B.
- *
- * Will wait until the pipe is actually running (i.e. first vblank) before
- * returning.
  */
-static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
-                             bool pch_port, bool dsi)
+static void intel_enable_pipe(struct intel_crtc *crtc)
 {
+       struct drm_device *dev = crtc->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       enum pipe pipe = crtc->pipe;
        enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
                                                                      pipe);
        enum pipe pch_transcoder;
@@ -1780,12 +1780,12 @@ static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
         * need the check.
         */
        if (!HAS_PCH_SPLIT(dev_priv->dev))
-               if (dsi)
+               if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DSI))
                        assert_dsi_pll_enabled(dev_priv);
                else
                        assert_pll_enabled(dev_priv, pipe);
        else {
-               if (pch_port) {
+               if (crtc->config.has_pch_encoder) {
                        /* if driving the PCH, we need FDI enabled */
                        assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder);
                        assert_fdi_tx_pll_enabled(dev_priv,
@@ -1796,11 +1796,24 @@ static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
 
        reg = PIPECONF(cpu_transcoder);
        val = I915_READ(reg);
-       if (val & PIPECONF_ENABLE)
+       if (val & PIPECONF_ENABLE) {
+               WARN_ON(!(pipe == PIPE_A &&
+                         dev_priv->quirks & QUIRK_PIPEA_FORCE));
                return;
+       }
 
        I915_WRITE(reg, val | PIPECONF_ENABLE);
-       intel_wait_for_vblank(dev_priv->dev, pipe);
+       POSTING_READ(reg);
+
+       /*
+        * There's no guarantee the pipe will really start running now. It
+        * depends on the Gen, the output type and the relative order between
+        * pipe and plane enabling. Avoid waiting on HSW+ since it's not
+        * necessary.
+        * TODO: audit the previous gens.
+        */
+       if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
+               intel_wait_for_vblank(dev_priv->dev, pipe);
 }
 
 /**
@@ -1851,7 +1864,8 @@ static void intel_disable_pipe(struct drm_i915_private *dev_priv,
 void intel_flush_primary_plane(struct drm_i915_private *dev_priv,
                               enum plane plane)
 {
-       u32 reg = dev_priv->info->gen >= 4 ? DSPSURF(plane) : DSPADDR(plane);
+       struct drm_device *dev = dev_priv->dev;
+       u32 reg = INTEL_INFO(dev)->gen >= 4 ? DSPSURF(plane) : DSPADDR(plane);
 
        I915_WRITE(reg, I915_READ(reg));
        POSTING_READ(reg);
@@ -1929,6 +1943,14 @@ static bool need_vtd_wa(struct drm_device *dev)
        return false;
 }
 
+static int intel_align_height(struct drm_device *dev, int height, bool tiled)
+{
+       int tile_height;
+
+       tile_height = tiled ? (IS_GEN2(dev) ? 16 : 8) : 1;
+       return ALIGN(height, tile_height);
+}
+
 int
 intel_pin_and_fence_fb_obj(struct drm_device *dev,
                           struct drm_i915_gem_object *obj,
@@ -2025,6 +2047,111 @@ unsigned long intel_gen4_compute_page_offset(int *x, int *y,
        }
 }
 
+int intel_format_to_fourcc(int format)
+{
+       switch (format) {
+       case DISPPLANE_8BPP:
+               return DRM_FORMAT_C8;
+       case DISPPLANE_BGRX555:
+               return DRM_FORMAT_XRGB1555;
+       case DISPPLANE_BGRX565:
+               return DRM_FORMAT_RGB565;
+       default:
+       case DISPPLANE_BGRX888:
+               return DRM_FORMAT_XRGB8888;
+       case DISPPLANE_RGBX888:
+               return DRM_FORMAT_XBGR8888;
+       case DISPPLANE_BGRX101010:
+               return DRM_FORMAT_XRGB2101010;
+       case DISPPLANE_RGBX101010:
+               return DRM_FORMAT_XBGR2101010;
+       }
+}
+
+static bool intel_alloc_plane_obj(struct intel_crtc *crtc,
+                                 struct intel_plane_config *plane_config)
+{
+       struct drm_device *dev = crtc->base.dev;
+       struct drm_i915_gem_object *obj = NULL;
+       struct drm_mode_fb_cmd2 mode_cmd = { 0 };
+       u32 base = plane_config->base;
+
+       if (plane_config->size == 0)
+               return false;
+
+       obj = i915_gem_object_create_stolen_for_preallocated(dev, base, base,
+                                                            plane_config->size);
+       if (!obj)
+               return false;
+
+       if (plane_config->tiled) {
+               obj->tiling_mode = I915_TILING_X;
+               obj->stride = crtc->base.fb->pitches[0];
+       }
+
+       mode_cmd.pixel_format = crtc->base.fb->pixel_format;
+       mode_cmd.width = crtc->base.fb->width;
+       mode_cmd.height = crtc->base.fb->height;
+       mode_cmd.pitches[0] = crtc->base.fb->pitches[0];
+
+       mutex_lock(&dev->struct_mutex);
+
+       if (intel_framebuffer_init(dev, to_intel_framebuffer(crtc->base.fb),
+                                  &mode_cmd, obj)) {
+               DRM_DEBUG_KMS("intel fb init failed\n");
+               goto out_unref_obj;
+       }
+
+       mutex_unlock(&dev->struct_mutex);
+
+       DRM_DEBUG_KMS("plane fb obj %p\n", obj);
+       return true;
+
+out_unref_obj:
+       drm_gem_object_unreference(&obj->base);
+       mutex_unlock(&dev->struct_mutex);
+       return false;
+}
+
+static void intel_find_plane_obj(struct intel_crtc *intel_crtc,
+                                struct intel_plane_config *plane_config)
+{
+       struct drm_device *dev = intel_crtc->base.dev;
+       struct drm_crtc *c;
+       struct intel_crtc *i;
+       struct intel_framebuffer *fb;
+
+       if (!intel_crtc->base.fb)
+               return;
+
+       if (intel_alloc_plane_obj(intel_crtc, plane_config))
+               return;
+
+       kfree(intel_crtc->base.fb);
+       intel_crtc->base.fb = NULL;
+
+       /*
+        * Failed to alloc the obj, check to see if we should share
+        * an fb with another CRTC instead
+        */
+       list_for_each_entry(c, &dev->mode_config.crtc_list, head) {
+               i = to_intel_crtc(c);
+
+               if (c == &intel_crtc->base)
+                       continue;
+
+               if (!i->active || !c->fb)
+                       continue;
+
+               fb = to_intel_framebuffer(c->fb);
+               if (i915_gem_obj_ggtt_offset(fb->obj) == plane_config->base) {
+                       drm_framebuffer_reference(c->fb);
+                       intel_crtc->base.fb = c->fb;
+                       break;
+               }
+       }
+}
+
 static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
                             int x, int y)
 {
@@ -2299,31 +2426,23 @@ intel_finish_fb(struct drm_framebuffer *old_fb)
        return ret;
 }
 
-static void intel_crtc_update_sarea_pos(struct drm_crtc *crtc, int x, int y)
+static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
-       struct drm_i915_master_private *master_priv;
+       struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       unsigned long flags;
+       bool pending;
 
-       if (!dev->primary->master)
-               return;
+       if (i915_reset_in_progress(&dev_priv->gpu_error) ||
+           intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
+               return false;
 
-       master_priv = dev->primary->master->driver_priv;
-       if (!master_priv->sarea_priv)
-               return;
+       spin_lock_irqsave(&dev->event_lock, flags);
+       pending = to_intel_crtc(crtc)->unpin_work != NULL;
+       spin_unlock_irqrestore(&dev->event_lock, flags);
 
-       switch (intel_crtc->pipe) {
-       case 0:
-               master_priv->sarea_priv->pipeA_x = x;
-               master_priv->sarea_priv->pipeA_y = y;
-               break;
-       case 1:
-               master_priv->sarea_priv->pipeB_x = x;
-               master_priv->sarea_priv->pipeB_y = y;
-               break;
-       default:
-               break;
-       }
+       return pending;
 }
 
 static int
@@ -2336,6 +2455,11 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
        struct drm_framebuffer *old_fb;
        int ret;
 
+       if (intel_crtc_has_pending_flip(crtc)) {
+               DRM_ERROR("pipe is still busy with an old pageflip\n");
+               return -EBUSY;
+       }
+
        /* no fb bound */
        if (!fb) {
                DRM_ERROR("No FB bound\n");
@@ -2372,7 +2496,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
         * whether the platform allows pfit disable with pipe active, and only
         * then update the pipesrc and pfit state, even on the flip path.
         */
-       if (i915_fastboot) {
+       if (i915.fastboot) {
                const struct drm_display_mode *adjusted_mode =
                        &intel_crtc->config.adjusted_mode;
 
@@ -2413,8 +2537,6 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
        intel_edp_psr_update(dev);
        mutex_unlock(&dev->struct_mutex);
 
-       intel_crtc_update_sarea_pos(crtc, x, y);
-
        return 0;
 }
 
@@ -2963,25 +3085,6 @@ static void ironlake_fdi_disable(struct drm_crtc *crtc)
        udelay(100);
 }
 
-static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
-{
-       struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       unsigned long flags;
-       bool pending;
-
-       if (i915_reset_in_progress(&dev_priv->gpu_error) ||
-           intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
-               return false;
-
-       spin_lock_irqsave(&dev->event_lock, flags);
-       pending = to_intel_crtc(crtc)->unpin_work != NULL;
-       spin_unlock_irqrestore(&dev->event_lock, flags);
-
-       return pending;
-}
-
 bool intel_has_pending_fb_unpin(struct drm_device *dev)
 {
        struct intel_crtc *crtc;
@@ -3587,8 +3690,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
        intel_crtc_load_lut(crtc);
 
        intel_update_watermarks(crtc);
-       intel_enable_pipe(dev_priv, pipe,
-                         intel_crtc->config.has_pch_encoder, false);
+       intel_enable_pipe(intel_crtc);
        intel_enable_primary_plane(dev_priv, plane, pipe);
        intel_enable_planes(crtc);
        intel_crtc_update_cursor(crtc, true);
@@ -3733,8 +3835,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
        intel_ddi_enable_transcoder_func(crtc);
 
        intel_update_watermarks(crtc);
-       intel_enable_pipe(dev_priv, pipe,
-                         intel_crtc->config.has_pch_encoder, false);
+       intel_enable_pipe(intel_crtc);
 
        if (intel_crtc->config.has_pch_encoder)
                lpt_pch_enable(crtc);
@@ -3748,16 +3849,6 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
         * to change the workaround. */
        haswell_mode_set_planes_workaround(intel_crtc);
        haswell_crtc_enable_planes(crtc);
-
-       /*
-        * There seems to be a race in PCH platform hw (at least on some
-        * outputs) where an enabled pipe still completes any pageflip right
-        * away (as if the pipe is off) instead of waiting for vblank. As soon
-        * as the first vblank happend, everything works as expected. Hence just
-        * wait for one vblank before returning to avoid strange things
-        * happening.
-        */
-       intel_wait_for_vblank(dev, intel_crtc->pipe);
 }
 
 static void ironlake_pfit_disable(struct intel_crtc *crtc)
@@ -3972,6 +4063,117 @@ static void i9xx_pfit_enable(struct intel_crtc *crtc)
        I915_WRITE(BCLRPAT(crtc->pipe), 0);
 }
 
+#define for_each_power_domain(domain, mask)                            \
+       for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++)     \
+               if ((1 << (domain)) & (mask))
+
+enum intel_display_power_domain
+intel_display_port_power_domain(struct intel_encoder *intel_encoder)
+{
+       struct drm_device *dev = intel_encoder->base.dev;
+       struct intel_digital_port *intel_dig_port;
+
+       switch (intel_encoder->type) {
+       case INTEL_OUTPUT_UNKNOWN:
+               /* Only DDI platforms should ever use this output type */
+               WARN_ON_ONCE(!HAS_DDI(dev));
+       case INTEL_OUTPUT_DISPLAYPORT:
+       case INTEL_OUTPUT_HDMI:
+       case INTEL_OUTPUT_EDP:
+               intel_dig_port = enc_to_dig_port(&intel_encoder->base);
+               switch (intel_dig_port->port) {
+               case PORT_A:
+                       return POWER_DOMAIN_PORT_DDI_A_4_LANES;
+               case PORT_B:
+                       return POWER_DOMAIN_PORT_DDI_B_4_LANES;
+               case PORT_C:
+                       return POWER_DOMAIN_PORT_DDI_C_4_LANES;
+               case PORT_D:
+                       return POWER_DOMAIN_PORT_DDI_D_4_LANES;
+               default:
+                       WARN_ON_ONCE(1);
+                       return POWER_DOMAIN_PORT_OTHER;
+               }
+       case INTEL_OUTPUT_ANALOG:
+               return POWER_DOMAIN_PORT_CRT;
+       case INTEL_OUTPUT_DSI:
+               return POWER_DOMAIN_PORT_DSI;
+       default:
+               return POWER_DOMAIN_PORT_OTHER;
+       }
+}
+
+static unsigned long get_crtc_power_domains(struct drm_crtc *crtc)
+{
+       struct drm_device *dev = crtc->dev;
+       struct intel_encoder *intel_encoder;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       enum pipe pipe = intel_crtc->pipe;
+       bool pfit_enabled = intel_crtc->config.pch_pfit.enabled;
+       unsigned long mask;
+       enum transcoder transcoder;
+
+       transcoder = intel_pipe_to_cpu_transcoder(dev->dev_private, pipe);
+
+       mask = BIT(POWER_DOMAIN_PIPE(pipe));
+       mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder));
+       if (pfit_enabled)
+               mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
+
+       for_each_encoder_on_crtc(dev, crtc, intel_encoder)
+               mask |= BIT(intel_display_port_power_domain(intel_encoder));
+
+       return mask;
+}
+
+void intel_display_set_init_power(struct drm_i915_private *dev_priv,
+                                 bool enable)
+{
+       if (dev_priv->power_domains.init_power_on == enable)
+               return;
+
+       if (enable)
+               intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
+       else
+               intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
+
+       dev_priv->power_domains.init_power_on = enable;
+}
+
+static void modeset_update_crtc_power_domains(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       unsigned long pipe_domains[I915_MAX_PIPES] = { 0, };
+       struct intel_crtc *crtc;
+
+       /*
+        * First get all needed power domains, then put all unneeded, to avoid
+        * any unnecessary toggling of the power wells.
+        */
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
+               enum intel_display_power_domain domain;
+
+               if (!crtc->base.enabled)
+                       continue;
+
+               pipe_domains[crtc->pipe] = get_crtc_power_domains(&crtc->base);
+
+               for_each_power_domain(domain, pipe_domains[crtc->pipe])
+                       intel_display_power_get(dev_priv, domain);
+       }
+
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
+               enum intel_display_power_domain domain;
+
+               for_each_power_domain(domain, crtc->enabled_power_domains)
+                       intel_display_power_put(dev_priv, domain);
+
+               crtc->enabled_power_domains = pipe_domains[crtc->pipe];
+       }
+
+       intel_display_set_init_power(dev_priv, false);
+}
+
 int valleyview_get_vco(struct drm_i915_private *dev_priv)
 {
        int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
@@ -4088,9 +4290,8 @@ static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
        /* Looks like the 200MHz CDclk freq doesn't work on some configs */
 }
 
-static int intel_mode_max_pixclk(struct drm_i915_private *dev_priv,
-                                unsigned modeset_pipes,
-                                struct intel_crtc_config *pipe_config)
+/* compute the max pixel clock for new configuration */
+static int intel_mode_max_pixclk(struct drm_i915_private *dev_priv)
 {
        struct drm_device *dev = dev_priv->dev;
        struct intel_crtc *intel_crtc;
@@ -4098,31 +4299,26 @@ static int intel_mode_max_pixclk(struct drm_i915_private *dev_priv,
 
        list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
                            base.head) {
-               if (modeset_pipes & (1 << intel_crtc->pipe))
+               if (intel_crtc->new_enabled)
                        max_pixclk = max(max_pixclk,
-                                        pipe_config->adjusted_mode.crtc_clock);
-               else if (intel_crtc->base.enabled)
-                       max_pixclk = max(max_pixclk,
-                                        intel_crtc->config.adjusted_mode.crtc_clock);
+                                        intel_crtc->new_config->adjusted_mode.crtc_clock);
        }
 
        return max_pixclk;
 }
 
 static void valleyview_modeset_global_pipes(struct drm_device *dev,
-                                           unsigned *prepare_pipes,
-                                           unsigned modeset_pipes,
-                                           struct intel_crtc_config *pipe_config)
+                                           unsigned *prepare_pipes)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crtc *intel_crtc;
-       int max_pixclk = intel_mode_max_pixclk(dev_priv, modeset_pipes,
-                                              pipe_config);
+       int max_pixclk = intel_mode_max_pixclk(dev_priv);
        int cur_cdclk = valleyview_cur_cdclk(dev_priv);
 
        if (valleyview_calc_cdclk(dev_priv, max_pixclk) == cur_cdclk)
                return;
 
+       /* disable/enable all currently active pipes while we change cdclk */
        list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
                            base.head)
                if (intel_crtc->base.enabled)
@@ -4132,12 +4328,13 @@ static void valleyview_modeset_global_pipes(struct drm_device *dev,
 static void valleyview_modeset_global_resources(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       int max_pixclk = intel_mode_max_pixclk(dev_priv, 0, NULL);
+       int max_pixclk = intel_mode_max_pixclk(dev_priv);
        int cur_cdclk = valleyview_cur_cdclk(dev_priv);
        int req_cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk);
 
        if (req_cdclk != cur_cdclk)
                valleyview_set_cdclk(dev, req_cdclk);
+       modeset_update_crtc_power_domains(dev);
 }
 
 static void valleyview_crtc_enable(struct drm_crtc *crtc)
@@ -4175,7 +4372,8 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
        intel_crtc_load_lut(crtc);
 
        intel_update_watermarks(crtc);
-       intel_enable_pipe(dev_priv, pipe, false, is_dsi);
+       intel_enable_pipe(intel_crtc);
+       intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
        intel_enable_primary_plane(dev_priv, plane, pipe);
        intel_enable_planes(crtc);
        intel_crtc_update_cursor(crtc, true);
@@ -4213,7 +4411,8 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
        intel_crtc_load_lut(crtc);
 
        intel_update_watermarks(crtc);
-       intel_enable_pipe(dev_priv, pipe, false, false);
+       intel_enable_pipe(intel_crtc);
+       intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
        intel_enable_primary_plane(dev_priv, plane, pipe);
        intel_enable_planes(crtc);
        /* The fixup needs to happen before cursor is enabled */
@@ -4272,6 +4471,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
        intel_disable_planes(crtc);
        intel_disable_primary_plane(dev_priv, plane, pipe);
 
+       intel_set_cpu_fifo_underrun_reporting(dev, pipe, false);
        intel_disable_pipe(dev_priv, pipe);
 
        i9xx_pfit_disable(intel_crtc);
@@ -4583,7 +4783,7 @@ retry:
 static void hsw_compute_ips_config(struct intel_crtc *crtc,
                                   struct intel_crtc_config *pipe_config)
 {
-       pipe_config->ips_enabled = i915_enable_ips &&
+       pipe_config->ips_enabled = i915.enable_ips &&
                                   hsw_crtc_supports_ips(crtc) &&
                                   pipe_config->pipe_bpp <= 24;
 }
@@ -4784,8 +4984,8 @@ intel_link_compute_m_n(int bits_per_pixel, int nlanes,
 
 static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
 {
-       if (i915_panel_use_ssc >= 0)
-               return i915_panel_use_ssc != 0;
+       if (i915.panel_use_ssc >= 0)
+               return i915.panel_use_ssc != 0;
        return dev_priv->vbt.lvds_use_ssc
                && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
 }
@@ -4844,7 +5044,7 @@ static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
 
        crtc->lowfreq_avail = false;
        if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
-           reduced_clock && i915_powersave) {
+           reduced_clock && i915.powersave) {
                I915_WRITE(FP1(pipe), fp2);
                crtc->config.dpll_hw_state.fp1 = fp2;
                crtc->lowfreq_avail = true;
@@ -5259,25 +5459,23 @@ static void intel_get_pipe_timings(struct intel_crtc *crtc,
        pipe_config->requested_mode.hdisplay = pipe_config->pipe_src_w;
 }
 
-static void intel_crtc_mode_from_pipe_config(struct intel_crtc *intel_crtc,
-                                            struct intel_crtc_config *pipe_config)
+void intel_mode_from_pipe_config(struct drm_display_mode *mode,
+                                struct intel_crtc_config *pipe_config)
 {
-       struct drm_crtc *crtc = &intel_crtc->base;
-
-       crtc->mode.hdisplay = pipe_config->adjusted_mode.crtc_hdisplay;
-       crtc->mode.htotal = pipe_config->adjusted_mode.crtc_htotal;
-       crtc->mode.hsync_start = pipe_config->adjusted_mode.crtc_hsync_start;
-       crtc->mode.hsync_end = pipe_config->adjusted_mode.crtc_hsync_end;
+       mode->hdisplay = pipe_config->adjusted_mode.crtc_hdisplay;
+       mode->htotal = pipe_config->adjusted_mode.crtc_htotal;
+       mode->hsync_start = pipe_config->adjusted_mode.crtc_hsync_start;
+       mode->hsync_end = pipe_config->adjusted_mode.crtc_hsync_end;
 
-       crtc->mode.vdisplay = pipe_config->adjusted_mode.crtc_vdisplay;
-       crtc->mode.vtotal = pipe_config->adjusted_mode.crtc_vtotal;
-       crtc->mode.vsync_start = pipe_config->adjusted_mode.crtc_vsync_start;
-       crtc->mode.vsync_end = pipe_config->adjusted_mode.crtc_vsync_end;
+       mode->vdisplay = pipe_config->adjusted_mode.crtc_vdisplay;
+       mode->vtotal = pipe_config->adjusted_mode.crtc_vtotal;
+       mode->vsync_start = pipe_config->adjusted_mode.crtc_vsync_start;
+       mode->vsync_end = pipe_config->adjusted_mode.crtc_vsync_end;
 
-       crtc->mode.flags = pipe_config->adjusted_mode.flags;
+       mode->flags = pipe_config->adjusted_mode.flags;
 
-       crtc->mode.clock = pipe_config->adjusted_mode.crtc_clock;
-       crtc->mode.flags |= pipe_config->adjusted_mode.flags;
+       mode->clock = pipe_config->adjusted_mode.crtc_clock;
+       mode->flags |= pipe_config->adjusted_mode.flags;
 }
 
 static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
@@ -5512,6 +5710,67 @@ static void vlv_crtc_clock_get(struct intel_crtc *crtc,
        pipe_config->port_clock = clock.dot / 5;
 }
 
+static void i9xx_get_plane_config(struct intel_crtc *crtc,
+                                 struct intel_plane_config *plane_config)
+{
+       struct drm_device *dev = crtc->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       u32 val, base, offset;
+       int pipe = crtc->pipe, plane = crtc->plane;
+       int fourcc, pixel_format;
+       int aligned_height;
+
+       crtc->base.fb = kzalloc(sizeof(struct intel_framebuffer), GFP_KERNEL);
+       if (!crtc->base.fb) {
+               DRM_DEBUG_KMS("failed to alloc fb\n");
+               return;
+       }
+
+       val = I915_READ(DSPCNTR(plane));
+
+       if (INTEL_INFO(dev)->gen >= 4)
+               if (val & DISPPLANE_TILED)
+                       plane_config->tiled = true;
+
+       pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
+       fourcc = intel_format_to_fourcc(pixel_format);
+       crtc->base.fb->pixel_format = fourcc;
+       crtc->base.fb->bits_per_pixel =
+               drm_format_plane_cpp(fourcc, 0) * 8;
+
+       if (INTEL_INFO(dev)->gen >= 4) {
+               if (plane_config->tiled)
+                       offset = I915_READ(DSPTILEOFF(plane));
+               else
+                       offset = I915_READ(DSPLINOFF(plane));
+               base = I915_READ(DSPSURF(plane)) & 0xfffff000;
+       } else {
+               base = I915_READ(DSPADDR(plane));
+       }
+       plane_config->base = base;
+
+       val = I915_READ(PIPESRC(pipe));
+       crtc->base.fb->width = ((val >> 16) & 0xfff) + 1;
+       crtc->base.fb->height = ((val >> 0) & 0xfff) + 1;
+
+       val = I915_READ(DSPSTRIDE(pipe));
+       crtc->base.fb->pitches[0] = val & 0xffffff80;
+
+       aligned_height = intel_align_height(dev, crtc->base.fb->height,
+                                           plane_config->tiled);
+
+       plane_config->size = ALIGN(crtc->base.fb->pitches[0] *
+                                  aligned_height, PAGE_SIZE);
+
+       DRM_DEBUG_KMS("pipe/plane %d/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
+                     pipe, plane, crtc->base.fb->width,
+                     crtc->base.fb->height,
+                     crtc->base.fb->bits_per_pixel, base,
+                     crtc->base.fb->pitches[0],
+                     plane_config->size);
+
+}
+
 static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
                                 struct intel_crtc_config *pipe_config)
 {
@@ -5519,6 +5778,10 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
        struct drm_i915_private *dev_priv = dev->dev_private;
        uint32_t tmp;
 
+       if (!intel_display_power_enabled(dev_priv,
+                                        POWER_DOMAIN_PIPE(crtc->pipe)))
+               return false;
+
        pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
        pipe_config->shared_dpll = DPLL_ID_PRIVATE;
 
@@ -6180,7 +6443,7 @@ int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
         * is 2.5%; use 5% for safety's sake.
         */
        u32 bps = target_clock * bpp * 21 / 20;
-       return bps / (link_bw * 8) + 1;
+       return DIV_ROUND_UP(bps, link_bw * 8);
 }
 
 static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
@@ -6348,7 +6611,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
        if (intel_crtc->config.has_dp_encoder)
                intel_dp_set_m_n(intel_crtc);
 
-       if (is_lvds && has_reduced_clock && i915_powersave)
+       if (is_lvds && has_reduced_clock && i915.powersave)
                intel_crtc->lowfreq_avail = true;
        else
                intel_crtc->lowfreq_avail = false;
@@ -6455,25 +6718,85 @@ static void ironlake_get_pfit_config(struct intel_crtc *crtc,
        }
 }
 
-static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
-                                    struct intel_crtc_config *pipe_config)
+static void ironlake_get_plane_config(struct intel_crtc *crtc,
+                                     struct intel_plane_config *plane_config)
 {
        struct drm_device *dev = crtc->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       uint32_t tmp;
+       u32 val, base, offset;
+       int pipe = crtc->pipe, plane = crtc->plane;
+       int fourcc, pixel_format;
+       int aligned_height;
+
+       crtc->base.fb = kzalloc(sizeof(struct intel_framebuffer), GFP_KERNEL);
+       if (!crtc->base.fb) {
+               DRM_DEBUG_KMS("failed to alloc fb\n");
+               return;
+       }
 
-       pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
-       pipe_config->shared_dpll = DPLL_ID_PRIVATE;
+       val = I915_READ(DSPCNTR(plane));
 
-       tmp = I915_READ(PIPECONF(crtc->pipe));
-       if (!(tmp & PIPECONF_ENABLE))
-               return false;
+       if (INTEL_INFO(dev)->gen >= 4)
+               if (val & DISPPLANE_TILED)
+                       plane_config->tiled = true;
 
-       switch (tmp & PIPECONF_BPC_MASK) {
-       case PIPECONF_6BPC:
-               pipe_config->pipe_bpp = 18;
-               break;
-       case PIPECONF_8BPC:
+       pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
+       fourcc = intel_format_to_fourcc(pixel_format);
+       crtc->base.fb->pixel_format = fourcc;
+       crtc->base.fb->bits_per_pixel =
+               drm_format_plane_cpp(fourcc, 0) * 8;
+
+       base = I915_READ(DSPSURF(plane)) & 0xfffff000;
+       if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+               offset = I915_READ(DSPOFFSET(plane));
+       } else {
+               if (plane_config->tiled)
+                       offset = I915_READ(DSPTILEOFF(plane));
+               else
+                       offset = I915_READ(DSPLINOFF(plane));
+       }
+       plane_config->base = base;
+
+       val = I915_READ(PIPESRC(pipe));
+       crtc->base.fb->width = ((val >> 16) & 0xfff) + 1;
+       crtc->base.fb->height = ((val >> 0) & 0xfff) + 1;
+
+       val = I915_READ(DSPSTRIDE(pipe));
+       crtc->base.fb->pitches[0] = val & 0xffffff80;
+
+       aligned_height = intel_align_height(dev, crtc->base.fb->height,
+                                           plane_config->tiled);
+
+       plane_config->size = ALIGN(crtc->base.fb->pitches[0] *
+                                  aligned_height, PAGE_SIZE);
+
+       DRM_DEBUG_KMS("pipe/plane %d/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
+                     pipe, plane, crtc->base.fb->width,
+                     crtc->base.fb->height,
+                     crtc->base.fb->bits_per_pixel, base,
+                     crtc->base.fb->pitches[0],
+                     plane_config->size);
+}
+
+static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
+                                    struct intel_crtc_config *pipe_config)
+{
+       struct drm_device *dev = crtc->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       uint32_t tmp;
+
+       pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
+       pipe_config->shared_dpll = DPLL_ID_PRIVATE;
+
+       tmp = I915_READ(PIPECONF(crtc->pipe));
+       if (!(tmp & PIPECONF_ENABLE))
+               return false;
+
+       switch (tmp & PIPECONF_BPC_MASK) {
+       case PIPECONF_6BPC:
+               pipe_config->pipe_bpp = 18;
+               break;
+       case PIPECONF_8BPC:
                pipe_config->pipe_bpp = 24;
                break;
        case PIPECONF_10BPC:
@@ -6716,7 +7039,7 @@ static void __hsw_enable_package_c8(struct drm_i915_private *dev_priv)
                return;
 
        schedule_delayed_work(&dev_priv->pc8.enable_work,
-                             msecs_to_jiffies(i915_pc8_timeout));
+                             msecs_to_jiffies(i915.pc8_timeout));
 }
 
 static void __hsw_disable_package_c8(struct drm_i915_private *dev_priv)
@@ -6815,7 +7138,7 @@ static void hsw_update_package_c8(struct drm_device *dev)
        if (!HAS_PC8(dev_priv->dev))
                return;
 
-       if (!i915_enable_pc8)
+       if (!i915.enable_pc8)
                return;
 
        mutex_lock(&dev_priv->pc8.lock);
@@ -6836,105 +7159,9 @@ done:
        mutex_unlock(&dev_priv->pc8.lock);
 }
 
-static void hsw_package_c8_gpu_idle(struct drm_i915_private *dev_priv)
-{
-       if (!HAS_PC8(dev_priv->dev))
-               return;
-
-       mutex_lock(&dev_priv->pc8.lock);
-       if (!dev_priv->pc8.gpu_idle) {
-               dev_priv->pc8.gpu_idle = true;
-               __hsw_enable_package_c8(dev_priv);
-       }
-       mutex_unlock(&dev_priv->pc8.lock);
-}
-
-static void hsw_package_c8_gpu_busy(struct drm_i915_private *dev_priv)
-{
-       if (!HAS_PC8(dev_priv->dev))
-               return;
-
-       mutex_lock(&dev_priv->pc8.lock);
-       if (dev_priv->pc8.gpu_idle) {
-               dev_priv->pc8.gpu_idle = false;
-               __hsw_disable_package_c8(dev_priv);
-       }
-       mutex_unlock(&dev_priv->pc8.lock);
-}
-
-#define for_each_power_domain(domain, mask)                            \
-       for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++)     \
-               if ((1 << (domain)) & (mask))
-
-static unsigned long get_pipe_power_domains(struct drm_device *dev,
-                                           enum pipe pipe, bool pfit_enabled)
-{
-       unsigned long mask;
-       enum transcoder transcoder;
-
-       transcoder = intel_pipe_to_cpu_transcoder(dev->dev_private, pipe);
-
-       mask = BIT(POWER_DOMAIN_PIPE(pipe));
-       mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder));
-       if (pfit_enabled)
-               mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
-
-       return mask;
-}
-
-void intel_display_set_init_power(struct drm_device *dev, bool enable)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
-       if (dev_priv->power_domains.init_power_on == enable)
-               return;
-
-       if (enable)
-               intel_display_power_get(dev, POWER_DOMAIN_INIT);
-       else
-               intel_display_power_put(dev, POWER_DOMAIN_INIT);
-
-       dev_priv->power_domains.init_power_on = enable;
-}
-
-static void modeset_update_power_wells(struct drm_device *dev)
-{
-       unsigned long pipe_domains[I915_MAX_PIPES] = { 0, };
-       struct intel_crtc *crtc;
-
-       /*
-        * First get all needed power domains, then put all unneeded, to avoid
-        * any unnecessary toggling of the power wells.
-        */
-       list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
-               enum intel_display_power_domain domain;
-
-               if (!crtc->base.enabled)
-                       continue;
-
-               pipe_domains[crtc->pipe] = get_pipe_power_domains(dev,
-                                               crtc->pipe,
-                                               crtc->config.pch_pfit.enabled);
-
-               for_each_power_domain(domain, pipe_domains[crtc->pipe])
-                       intel_display_power_get(dev, domain);
-       }
-
-       list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
-               enum intel_display_power_domain domain;
-
-               for_each_power_domain(domain, crtc->enabled_power_domains)
-                       intel_display_power_put(dev, domain);
-
-               crtc->enabled_power_domains = pipe_domains[crtc->pipe];
-       }
-
-       intel_display_set_init_power(dev, false);
-}
-
 static void haswell_modeset_global_resources(struct drm_device *dev)
 {
-       modeset_update_power_wells(dev);
+       modeset_update_crtc_power_domains(dev);
        hsw_update_package_c8(dev);
 }
 
@@ -6985,6 +7212,10 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
        enum intel_display_power_domain pfit_domain;
        uint32_t tmp;
 
+       if (!intel_display_power_enabled(dev_priv,
+                                        POWER_DOMAIN_PIPE(crtc->pipe)))
+               return false;
+
        pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
        pipe_config->shared_dpll = DPLL_ID_PRIVATE;
 
@@ -7010,7 +7241,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
                        pipe_config->cpu_transcoder = TRANSCODER_EDP;
        }
 
-       if (!intel_display_power_enabled(dev,
+       if (!intel_display_power_enabled(dev_priv,
                        POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder)))
                return false;
 
@@ -7038,7 +7269,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
        intel_get_pipe_timings(crtc, pipe_config);
 
        pfit_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
-       if (intel_display_power_enabled(dev, pfit_domain))
+       if (intel_display_power_enabled(dev_priv, pfit_domain))
                ironlake_get_pfit_config(crtc, pipe_config);
 
        if (IS_HASWELL(dev))
@@ -7573,18 +7804,18 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
                return -ENOENT;
 
        if (obj->base.size < width * height * 4) {
-               DRM_ERROR("buffer is to small\n");
+               DRM_DEBUG_KMS("buffer is to small\n");
                ret = -ENOMEM;
                goto fail;
        }
 
        /* we only need to pin inside GTT if cursor is non-phy */
        mutex_lock(&dev->struct_mutex);
-       if (!dev_priv->info->cursor_needs_physical) {
+       if (!INTEL_INFO(dev)->cursor_needs_physical) {
                unsigned alignment;
 
                if (obj->tiling_mode) {
-                       DRM_ERROR("cursor cannot be tiled\n");
+                       DRM_DEBUG_KMS("cursor cannot be tiled\n");
                        ret = -EINVAL;
                        goto fail_locked;
                }
@@ -7600,13 +7831,13 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
 
                ret = i915_gem_object_pin_to_display_plane(obj, alignment, NULL);
                if (ret) {
-                       DRM_ERROR("failed to move cursor bo into the GTT\n");
+                       DRM_DEBUG_KMS("failed to move cursor bo into the GTT\n");
                        goto fail_locked;
                }
 
                ret = i915_gem_object_put_fence(obj);
                if (ret) {
-                       DRM_ERROR("failed to release fence for cursor");
+                       DRM_DEBUG_KMS("failed to release fence for cursor");
                        goto fail_unpin;
                }
 
@@ -7617,7 +7848,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
                                                  (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1,
                                                  align);
                if (ret) {
-                       DRM_ERROR("failed to attach phys object\n");
+                       DRM_DEBUG_KMS("failed to attach phys object\n");
                        goto fail_locked;
                }
                addr = obj->phys_obj->handle->busaddr;
@@ -7628,7 +7859,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
 
  finish:
        if (intel_crtc->cursor_bo) {
-               if (dev_priv->info->cursor_needs_physical) {
+               if (INTEL_INFO(dev)->cursor_needs_physical) {
                        if (intel_crtc->cursor_bo != obj)
                                i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo);
                } else
@@ -7690,10 +7921,10 @@ static struct drm_display_mode load_detect_mode = {
                 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
 };
 
-static struct drm_framebuffer *
-intel_framebuffer_create(struct drm_device *dev,
-                        struct drm_mode_fb_cmd2 *mode_cmd,
-                        struct drm_i915_gem_object *obj)
+struct drm_framebuffer *
+__intel_framebuffer_create(struct drm_device *dev,
+                          struct drm_mode_fb_cmd2 *mode_cmd,
+                          struct drm_i915_gem_object *obj)
 {
        struct intel_framebuffer *intel_fb;
        int ret;
@@ -7704,12 +7935,7 @@ intel_framebuffer_create(struct drm_device *dev,
                return ERR_PTR(-ENOMEM);
        }
 
-       ret = i915_mutex_lock_interruptible(dev);
-       if (ret)
-               goto err;
-
        ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
-       mutex_unlock(&dev->struct_mutex);
        if (ret)
                goto err;
 
@@ -7721,6 +7947,23 @@ err:
        return ERR_PTR(ret);
 }
 
+static struct drm_framebuffer *
+intel_framebuffer_create(struct drm_device *dev,
+                        struct drm_mode_fb_cmd2 *mode_cmd,
+                        struct drm_i915_gem_object *obj)
+{
+       struct drm_framebuffer *fb;
+       int ret;
+
+       ret = i915_mutex_lock_interruptible(dev);
+       if (ret)
+               return ERR_PTR(ret);
+       fb = __intel_framebuffer_create(dev, mode_cmd, obj);
+       mutex_unlock(&dev->struct_mutex);
+
+       return fb;
+}
+
 static u32
 intel_framebuffer_pitch_for_width(int width, int bpp)
 {
@@ -7766,14 +8009,16 @@ mode_fits_in_fbdev(struct drm_device *dev,
        struct drm_i915_gem_object *obj;
        struct drm_framebuffer *fb;
 
-       if (dev_priv->fbdev == NULL)
+       if (!dev_priv->fbdev)
                return NULL;
 
-       obj = dev_priv->fbdev->ifb.obj;
-       if (obj == NULL)
+       if (!dev_priv->fbdev->fb)
                return NULL;
 
-       fb = &dev_priv->fbdev->ifb.base;
+       obj = dev_priv->fbdev->fb->obj;
+       BUG_ON(!obj);
+
+       fb = &dev_priv->fbdev->fb->base;
        if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay,
                                                               fb->bits_per_pixel))
                return NULL;
@@ -7855,6 +8100,8 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
        to_intel_connector(connector)->new_encoder = intel_encoder;
 
        intel_crtc = to_intel_crtc(crtc);
+       intel_crtc->new_enabled = true;
+       intel_crtc->new_config = &intel_crtc->config;
        old->dpms_mode = connector->dpms;
        old->load_detect_temp = true;
        old->release_fb = NULL;
@@ -7878,21 +8125,28 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
                DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
        if (IS_ERR(fb)) {
                DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
-               mutex_unlock(&crtc->mutex);
-               return false;
+               goto fail;
        }
 
        if (intel_set_mode(crtc, mode, 0, 0, fb)) {
                DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
                if (old->release_fb)
                        old->release_fb->funcs->destroy(old->release_fb);
-               mutex_unlock(&crtc->mutex);
-               return false;
+               goto fail;
        }
 
        /* let the connector get through one full cycle before testing */
        intel_wait_for_vblank(dev, intel_crtc->pipe);
        return true;
+
+ fail:
+       intel_crtc->new_enabled = crtc->enabled;
+       if (intel_crtc->new_enabled)
+               intel_crtc->new_config = &intel_crtc->config;
+       else
+               intel_crtc->new_config = NULL;
+       mutex_unlock(&crtc->mutex);
+       return false;
 }
 
 void intel_release_load_detect_pipe(struct drm_connector *connector,
@@ -7902,6 +8156,7 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
                intel_attached_encoder(connector);
        struct drm_encoder *encoder = &intel_encoder->base;
        struct drm_crtc *crtc = encoder->crtc;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
        DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
                      connector->base.id, drm_get_connector_name(connector),
@@ -7910,6 +8165,8 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
        if (old->load_detect_temp) {
                to_intel_connector(connector)->new_encoder = NULL;
                intel_encoder->new_crtc = NULL;
+               intel_crtc->new_enabled = false;
+               intel_crtc->new_config = NULL;
                intel_set_mode(crtc, NULL, 0, 0, NULL);
 
                if (old->release_fb) {
@@ -8190,8 +8447,12 @@ void intel_mark_busy(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
 
-       hsw_package_c8_gpu_busy(dev_priv);
+       if (dev_priv->mm.busy)
+               return;
+
+       hsw_disable_package_c8(dev_priv);
        i915_update_gfx_val(dev_priv);
+       dev_priv->mm.busy = true;
 }
 
 void intel_mark_idle(struct drm_device *dev)
@@ -8199,11 +8460,14 @@ void intel_mark_idle(struct drm_device *dev)
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_crtc *crtc;
 
-       hsw_package_c8_gpu_idle(dev_priv);
-
-       if (!i915_powersave)
+       if (!dev_priv->mm.busy)
                return;
 
+       dev_priv->mm.busy = false;
+
+       if (!i915.powersave)
+               goto out;
+
        list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
                if (!crtc->fb)
                        continue;
@@ -8211,8 +8475,11 @@ void intel_mark_idle(struct drm_device *dev)
                intel_decrease_pllclock(crtc);
        }
 
-       if (dev_priv->info->gen >= 6)
+       if (INTEL_INFO(dev)->gen >= 6)
                gen6_rps_idle(dev->dev_private);
+
+out:
+       hsw_enable_package_c8(dev_priv);
 }
 
 void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
@@ -8221,7 +8488,7 @@ void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
        struct drm_device *dev = obj->base.dev;
        struct drm_crtc *crtc;
 
-       if (!i915_powersave)
+       if (!i915.powersave)
                return;
 
        list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
@@ -8676,6 +8943,9 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
             fb->pitches[0] != crtc->fb->pitches[0]))
                return -EINVAL;
 
+       if (i915_terminally_wedged(&dev_priv->gpu_error))
+               goto out_hang;
+
        work = kzalloc(sizeof(*work), GFP_KERNEL);
        if (work == NULL)
                return -ENOMEM;
@@ -8750,6 +9020,13 @@ cleanup:
 free_work:
        kfree(work);
 
+       if (ret == -EIO) {
+out_hang:
+               intel_crtc_wait_for_pending_flips(crtc);
+               ret = intel_pipe_set_base(crtc, crtc->x, crtc->y, fb);
+               if (ret == 0 && event)
+                       drm_send_vblank_event(dev, intel_crtc->pipe, event);
+       }
        return ret;
 }
 
@@ -8766,6 +9043,7 @@ static struct drm_crtc_helper_funcs intel_helper_funcs = {
  */
 static void intel_modeset_update_staged_output_state(struct drm_device *dev)
 {
+       struct intel_crtc *crtc;
        struct intel_encoder *encoder;
        struct intel_connector *connector;
 
@@ -8780,6 +9058,16 @@ static void intel_modeset_update_staged_output_state(struct drm_device *dev)
                encoder->new_crtc =
                        to_intel_crtc(encoder->base.crtc);
        }
+
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list,
+                           base.head) {
+               crtc->new_enabled = crtc->base.enabled;
+
+               if (crtc->new_enabled)
+                       crtc->new_config = &crtc->config;
+               else
+                       crtc->new_config = NULL;
+       }
 }
 
 /**
@@ -8789,6 +9077,7 @@ static void intel_modeset_update_staged_output_state(struct drm_device *dev)
  */
 static void intel_modeset_commit_output_state(struct drm_device *dev)
 {
+       struct intel_crtc *crtc;
        struct intel_encoder *encoder;
        struct intel_connector *connector;
 
@@ -8801,6 +9090,11 @@ static void intel_modeset_commit_output_state(struct drm_device *dev)
                            base.head) {
                encoder->base.crtc = &encoder->new_crtc->base;
        }
+
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list,
+                           base.head) {
+               crtc->base.enabled = crtc->new_enabled;
+       }
 }
 
 static void
@@ -8941,23 +9235,47 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
        DRM_DEBUG_KMS("double wide: %i\n", pipe_config->double_wide);
 }
 
-static bool check_encoder_cloning(struct drm_crtc *crtc)
+static bool encoders_cloneable(const struct intel_encoder *a,
+                              const struct intel_encoder *b)
+{
+       /* masks could be asymmetric, so check both ways */
+       return a == b || (a->cloneable & (1 << b->type) &&
+                         b->cloneable & (1 << a->type));
+}
+
+static bool check_single_encoder_cloning(struct intel_crtc *crtc,
+                                        struct intel_encoder *encoder)
+{
+       struct drm_device *dev = crtc->base.dev;
+       struct intel_encoder *source_encoder;
+
+       list_for_each_entry(source_encoder,
+                           &dev->mode_config.encoder_list, base.head) {
+               if (source_encoder->new_crtc != crtc)
+                       continue;
+
+               if (!encoders_cloneable(encoder, source_encoder))
+                       return false;
+       }
+
+       return true;
+}
+
+static bool check_encoder_cloning(struct intel_crtc *crtc)
 {
-       int num_encoders = 0;
-       bool uncloneable_encoders = false;
+       struct drm_device *dev = crtc->base.dev;
        struct intel_encoder *encoder;
 
-       list_for_each_entry(encoder, &crtc->dev->mode_config.encoder_list,
-                           base.head) {
-               if (&encoder->new_crtc->base != crtc)
+       list_for_each_entry(encoder,
+                           &dev->mode_config.encoder_list, base.head) {
+               if (encoder->new_crtc != crtc)
                        continue;
 
-               num_encoders++;
-               if (!encoder->cloneable)
-                       uncloneable_encoders = true;
+               if (!check_single_encoder_cloning(crtc, encoder))
+                       return false;
        }
 
-       return !(num_encoders > 1 && uncloneable_encoders);
+       return true;
 }
 
 static struct intel_crtc_config *
@@ -8971,7 +9289,7 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
        int plane_bpp, ret = -EINVAL;
        bool retry = true;
 
-       if (!check_encoder_cloning(crtc)) {
+       if (!check_encoder_cloning(to_intel_crtc(crtc))) {
                DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
                return ERR_PTR(-EINVAL);
        }
@@ -9127,29 +9445,22 @@ intel_modeset_affected_pipes(struct drm_crtc *crtc, unsigned *modeset_pipes,
                        *prepare_pipes |= 1 << encoder->new_crtc->pipe;
        }
 
-       /* Check for any pipes that will be fully disabled ... */
+       /* Check for pipes that will be enabled/disabled ... */
        list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
                            base.head) {
-               bool used = false;
-
-               /* Don't try to disable disabled crtcs. */
-               if (!intel_crtc->base.enabled)
+               if (intel_crtc->base.enabled == intel_crtc->new_enabled)
                        continue;
 
-               list_for_each_entry(encoder, &dev->mode_config.encoder_list,
-                                   base.head) {
-                       if (encoder->new_crtc == intel_crtc)
-                               used = true;
-               }
-
-               if (!used)
+               if (!intel_crtc->new_enabled)
                        *disable_pipes |= 1 << intel_crtc->pipe;
+               else
+                       *prepare_pipes |= 1 << intel_crtc->pipe;
        }
 
 
        /* set_mode is also used to update properties on life display pipes. */
        intel_crtc = to_intel_crtc(crtc);
-       if (crtc->enabled)
+       if (intel_crtc->new_enabled)
                *prepare_pipes |= 1 << intel_crtc->pipe;
 
        /*
@@ -9208,10 +9519,13 @@ intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes)
 
        intel_modeset_commit_output_state(dev);
 
-       /* Update computed state. */
+       /* Double check state. */
        list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
                            base.head) {
-               intel_crtc->base.enabled = intel_crtc_in_use(&intel_crtc->base);
+               WARN_ON(intel_crtc->base.enabled != intel_crtc_in_use(&intel_crtc->base));
+               WARN_ON(intel_crtc->new_config &&
+                       intel_crtc->new_config != &intel_crtc->config);
+               WARN_ON(intel_crtc->base.enabled != !!intel_crtc->new_config);
        }
 
        list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
@@ -9380,10 +9694,8 @@ intel_pipe_config_compare(struct drm_device *dev,
        if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
                PIPE_CONF_CHECK_I(pipe_bpp);
 
-       if (!HAS_DDI(dev)) {
-               PIPE_CONF_CHECK_CLOCK_FUZZY(adjusted_mode.crtc_clock);
-               PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
-       }
+       PIPE_CONF_CHECK_CLOCK_FUZZY(adjusted_mode.crtc_clock);
+       PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
 
 #undef PIPE_CONF_CHECK_X
 #undef PIPE_CONF_CHECK_I
@@ -9643,6 +9955,7 @@ static int __intel_set_mode(struct drm_crtc *crtc,
                }
                intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
                                       "[modeset]");
+               to_intel_crtc(crtc)->new_config = pipe_config;
        }
 
        /*
@@ -9653,8 +9966,7 @@ static int __intel_set_mode(struct drm_crtc *crtc,
         * adjusted_mode bits in the crtc directly.
         */
        if (IS_VALLEYVIEW(dev)) {
-               valleyview_modeset_global_pipes(dev, &prepare_pipes,
-                                               modeset_pipes, pipe_config);
+               valleyview_modeset_global_pipes(dev, &prepare_pipes);
 
                /* may have added more to prepare_pipes than we should */
                prepare_pipes &= ~disable_pipes;
@@ -9676,6 +9988,7 @@ static int __intel_set_mode(struct drm_crtc *crtc,
                /* mode_set/enable/disable functions rely on a correct pipe
                 * config. */
                to_intel_crtc(crtc)->config = *pipe_config;
+               to_intel_crtc(crtc)->new_config = &to_intel_crtc(crtc)->config;
 
                /*
                 * Calculate and store various constants which
@@ -9746,16 +10059,24 @@ static void intel_set_config_free(struct intel_set_config *config)
 
        kfree(config->save_connector_encoders);
        kfree(config->save_encoder_crtcs);
+       kfree(config->save_crtc_enabled);
        kfree(config);
 }
 
 static int intel_set_config_save_state(struct drm_device *dev,
                                       struct intel_set_config *config)
 {
+       struct drm_crtc *crtc;
        struct drm_encoder *encoder;
        struct drm_connector *connector;
        int count;
 
+       config->save_crtc_enabled =
+               kcalloc(dev->mode_config.num_crtc,
+                       sizeof(bool), GFP_KERNEL);
+       if (!config->save_crtc_enabled)
+               return -ENOMEM;
+
        config->save_encoder_crtcs =
                kcalloc(dev->mode_config.num_encoder,
                        sizeof(struct drm_crtc *), GFP_KERNEL);
@@ -9772,6 +10093,11 @@ static int intel_set_config_save_state(struct drm_device *dev,
         * Should anything bad happen only the expected state is
         * restored, not the drivers personal bookkeeping.
         */
+       count = 0;
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+               config->save_crtc_enabled[count++] = crtc->enabled;
+       }
+
        count = 0;
        list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
                config->save_encoder_crtcs[count++] = encoder->crtc;
@@ -9788,10 +10114,21 @@ static int intel_set_config_save_state(struct drm_device *dev,
 static void intel_set_config_restore_state(struct drm_device *dev,
                                           struct intel_set_config *config)
 {
+       struct intel_crtc *crtc;
        struct intel_encoder *encoder;
        struct intel_connector *connector;
        int count;
 
+       count = 0;
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
+               crtc->new_enabled = config->save_crtc_enabled[count++];
+
+               if (crtc->new_enabled)
+                       crtc->new_config = &crtc->config;
+               else
+                       crtc->new_config = NULL;
+       }
+
        count = 0;
        list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
                encoder->new_crtc =
@@ -9840,7 +10177,7 @@ intel_set_config_compute_mode_changes(struct drm_mode_set *set,
                        struct intel_crtc *intel_crtc =
                                to_intel_crtc(set->crtc);
 
-                       if (intel_crtc->active && i915_fastboot) {
+                       if (intel_crtc->active && i915.fastboot) {
                                DRM_DEBUG_KMS("crtc has no fb, will flip\n");
                                config->fb_changed = true;
                        } else {
@@ -9876,9 +10213,9 @@ intel_modeset_stage_output_state(struct drm_device *dev,
                                 struct drm_mode_set *set,
                                 struct intel_set_config *config)
 {
-       struct drm_crtc *new_crtc;
        struct intel_connector *connector;
        struct intel_encoder *encoder;
+       struct intel_crtc *crtc;
        int ro;
 
        /* The upper layers ensure that we either disable a crtc or have a list
@@ -9921,6 +10258,8 @@ intel_modeset_stage_output_state(struct drm_device *dev,
        /* Update crtc of enabled connectors. */
        list_for_each_entry(connector, &dev->mode_config.connector_list,
                            base.head) {
+               struct drm_crtc *new_crtc;
+
                if (!connector->new_encoder)
                        continue;
 
@@ -9971,9 +10310,58 @@ intel_modeset_stage_output_state(struct drm_device *dev,
        }
        /* Now we've also updated encoder->new_crtc for all encoders. */
 
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list,
+                           base.head) {
+               crtc->new_enabled = false;
+
+               list_for_each_entry(encoder,
+                                   &dev->mode_config.encoder_list,
+                                   base.head) {
+                       if (encoder->new_crtc == crtc) {
+                               crtc->new_enabled = true;
+                               break;
+                       }
+               }
+
+               if (crtc->new_enabled != crtc->base.enabled) {
+                       DRM_DEBUG_KMS("crtc %sabled, full mode switch\n",
+                                     crtc->new_enabled ? "en" : "dis");
+                       config->mode_changed = true;
+               }
+
+               if (crtc->new_enabled)
+                       crtc->new_config = &crtc->config;
+               else
+                       crtc->new_config = NULL;
+       }
+
        return 0;
 }
 
+static void disable_crtc_nofb(struct intel_crtc *crtc)
+{
+       struct drm_device *dev = crtc->base.dev;
+       struct intel_encoder *encoder;
+       struct intel_connector *connector;
+
+       DRM_DEBUG_KMS("Trying to restore without FB -> disabling pipe %c\n",
+                     pipe_name(crtc->pipe));
+
+       list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) {
+               if (connector->new_encoder &&
+                   connector->new_encoder->new_crtc == crtc)
+                       connector->new_encoder = NULL;
+       }
+
+       list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
+               if (encoder->new_crtc == crtc)
+                       encoder->new_crtc = NULL;
+       }
+
+       crtc->new_enabled = false;
+       crtc->new_config = NULL;
+}
+
 static int intel_crtc_set_config(struct drm_mode_set *set)
 {
        struct drm_device *dev;
@@ -10040,7 +10428,7 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
                 * flipping, so increasing its cost here shouldn't be a big
                 * deal).
                 */
-               if (i915_fastboot && ret == 0)
+               if (i915.fastboot && ret == 0)
                        intel_modeset_check_state(set->crtc->dev);
        }
 
@@ -10050,6 +10438,15 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
 fail:
                intel_set_config_restore_state(dev, config);
 
+               /*
+                * HACK: if the pipe was on, but we didn't have a framebuffer,
+                * force the pipe off to avoid oopsing in the modeset code
+                * due to fb==NULL. This should only happen during boot since
+                * we don't yet reconstruct the FB from the hardware state.
+                */
+               if (to_intel_crtc(save_set.crtc)->new_enabled && !save_set.fb)
+                       disable_crtc_nofb(to_intel_crtc(save_set.crtc));
+
                /* Try to restore the config */
                if (config->mode_changed &&
                    intel_set_mode(save_set.crtc, save_set.mode,
@@ -10255,12 +10652,7 @@ static int intel_encoder_clones(struct intel_encoder *encoder)
 
        list_for_each_entry(source_encoder,
                            &dev->mode_config.encoder_list, base.head) {
-
-               if (encoder == source_encoder)
-                       index_mask |= (1 << entry);
-
-               /* Intel hw has only one MUX where enocoders could be cloned. */
-               if (encoder->cloneable && source_encoder->cloneable)
+               if (encoders_cloneable(encoder, source_encoder))
                        index_mask |= (1 << entry);
 
                entry++;
@@ -10279,8 +10671,7 @@ static bool has_edp_a(struct drm_device *dev)
        if ((I915_READ(DP_A) & DP_DETECTED) == 0)
                return false;
 
-       if (IS_GEN5(dev) &&
-           (I915_READ(ILK_DISPLAY_CHICKEN_FUSES) & ILK_eDP_A_DISABLE))
+       if (IS_GEN5(dev) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
                return false;
 
        return true;
@@ -10433,18 +10824,13 @@ static void intel_setup_outputs(struct drm_device *dev)
        drm_helper_move_panel_connectors_to_head(dev);
 }
 
-void intel_framebuffer_fini(struct intel_framebuffer *fb)
-{
-       drm_framebuffer_cleanup(&fb->base);
-       WARN_ON(!fb->obj->framebuffer_references--);
-       drm_gem_object_unreference_unlocked(&fb->obj->base);
-}
-
 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
 {
        struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
 
-       intel_framebuffer_fini(intel_fb);
+       drm_framebuffer_cleanup(fb);
+       WARN_ON(!intel_fb->obj->framebuffer_references--);
+       drm_gem_object_unreference_unlocked(&intel_fb->obj->base);
        kfree(intel_fb);
 }
 
@@ -10463,12 +10849,12 @@ static const struct drm_framebuffer_funcs intel_fb_funcs = {
        .create_handle = intel_user_framebuffer_create_handle,
 };
 
-int intel_framebuffer_init(struct drm_device *dev,
-                          struct intel_framebuffer *intel_fb,
-                          struct drm_mode_fb_cmd2 *mode_cmd,
-                          struct drm_i915_gem_object *obj)
+static int intel_framebuffer_init(struct drm_device *dev,
+                                 struct intel_framebuffer *intel_fb,
+                                 struct drm_mode_fb_cmd2 *mode_cmd,
+                                 struct drm_i915_gem_object *obj)
 {
-       int aligned_height, tile_height;
+       int aligned_height;
        int pitch_limit;
        int ret;
 
@@ -10562,9 +10948,8 @@ int intel_framebuffer_init(struct drm_device *dev,
        if (mode_cmd->offsets[0] != 0)
                return -EINVAL;
 
-       tile_height = IS_GEN2(dev) ? 16 : 8;
-       aligned_height = ALIGN(mode_cmd->height,
-                              obj->tiling_mode ? tile_height : 1);
+       aligned_height = intel_align_height(dev, mode_cmd->height,
+                                           obj->tiling_mode);
        /* FIXME drm helper for size checks (especially planar formats)? */
        if (obj->base.size < aligned_height * mode_cmd->pitches[0])
                return -EINVAL;
@@ -10624,6 +11009,7 @@ static void intel_init_display(struct drm_device *dev)
 
        if (HAS_DDI(dev)) {
                dev_priv->display.get_pipe_config = haswell_get_pipe_config;
+               dev_priv->display.get_plane_config = ironlake_get_plane_config;
                dev_priv->display.crtc_mode_set = haswell_crtc_mode_set;
                dev_priv->display.crtc_enable = haswell_crtc_enable;
                dev_priv->display.crtc_disable = haswell_crtc_disable;
@@ -10631,6 +11017,7 @@ static void intel_init_display(struct drm_device *dev)
                dev_priv->display.update_plane = ironlake_update_plane;
        } else if (HAS_PCH_SPLIT(dev)) {
                dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
+               dev_priv->display.get_plane_config = ironlake_get_plane_config;
                dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
                dev_priv->display.crtc_enable = ironlake_crtc_enable;
                dev_priv->display.crtc_disable = ironlake_crtc_disable;
@@ -10638,6 +11025,7 @@ static void intel_init_display(struct drm_device *dev)
                dev_priv->display.update_plane = ironlake_update_plane;
        } else if (IS_VALLEYVIEW(dev)) {
                dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
+               dev_priv->display.get_plane_config = i9xx_get_plane_config;
                dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
                dev_priv->display.crtc_enable = valleyview_crtc_enable;
                dev_priv->display.crtc_disable = i9xx_crtc_disable;
@@ -10645,6 +11033,7 @@ static void intel_init_display(struct drm_device *dev)
                dev_priv->display.update_plane = i9xx_update_plane;
        } else {
                dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
+               dev_priv->display.get_plane_config = i9xx_get_plane_config;
                dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
                dev_priv->display.crtc_enable = i9xx_crtc_enable;
                dev_priv->display.crtc_disable = i9xx_crtc_disable;
@@ -10839,6 +11228,9 @@ static struct intel_quirk intel_quirks[] = {
 
        /* Acer Aspire 4736Z */
        { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
+
+       /* Acer Aspire 5336 */
+       { 0x2a42, 0x1025, 0x048a, quirk_invert_brightness },
 };
 
 static void intel_init_quirks(struct drm_device *dev)
@@ -10869,6 +11261,7 @@ static void i915_disable_vga(struct drm_device *dev)
        u8 sr1;
        u32 vga_reg = i915_vgacntrl_reg(dev);
 
+       /* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */
        vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
        outb(SR01, VGA_SR_INDEX);
        sr1 = inb(VGA_SR_DATA);
@@ -10901,7 +11294,9 @@ void intel_modeset_suspend_hw(struct drm_device *dev)
 void intel_modeset_init(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       int i, j, ret;
+       int sprite, ret;
+       enum pipe pipe;
+       struct intel_crtc *crtc;
 
        drm_mode_config_init(dev);
 
@@ -10938,13 +11333,13 @@ void intel_modeset_init(struct drm_device *dev)
                      INTEL_INFO(dev)->num_pipes,
                      INTEL_INFO(dev)->num_pipes > 1 ? "s" : "");
 
-       for_each_pipe(i) {
-               intel_crtc_init(dev, i);
-               for (j = 0; j < dev_priv->num_plane; j++) {
-                       ret = intel_plane_init(dev, i, j);
+       for_each_pipe(pipe) {
+               intel_crtc_init(dev, pipe);
+               for_each_sprite(pipe, sprite) {
+                       ret = intel_plane_init(dev, pipe, sprite);
                        if (ret)
                                DRM_DEBUG_KMS("pipe %c sprite %c init failed: %d\n",
-                                             pipe_name(i), sprite_name(i, j), ret);
+                                             pipe_name(pipe), sprite_name(pipe, sprite), ret);
                }
        }
 
@@ -10960,6 +11355,33 @@ void intel_modeset_init(struct drm_device *dev)
 
        /* Just in case the BIOS is doing something questionable. */
        intel_disable_fbc(dev);
+
+       mutex_lock(&dev->mode_config.mutex);
+       intel_modeset_setup_hw_state(dev, false);
+       mutex_unlock(&dev->mode_config.mutex);
+
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list,
+                           base.head) {
+               if (!crtc->active)
+                       continue;
+
+               /*
+                * Note that reserving the BIOS fb up front prevents us
+                * from stuffing other stolen allocations like the ring
+                * on top.  This prevents some ugliness at boot time, and
+                * can even allow for smooth boot transitions if the BIOS
+                * fb is large enough for the active pipe configuration.
+                */
+               if (dev_priv->display.get_plane_config) {
+                       dev_priv->display.get_plane_config(crtc,
+                                                          &crtc->plane_config);
+                       /*
+                        * If the fb is shared between multiple heads, we'll
+                        * just get the first one.
+                        */
+                       intel_find_plane_obj(crtc, &crtc->plane_config);
+               }
+       }
 }
 
 static void
@@ -11142,11 +11564,21 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
         * the crtc fixup. */
 }
 
-void i915_redisable_vga(struct drm_device *dev)
+void i915_redisable_vga_power_on(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 vga_reg = i915_vgacntrl_reg(dev);
 
+       if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
+               DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
+               i915_disable_vga(dev);
+       }
+}
+
+void i915_redisable_vga(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
        /* This function can be called both from intel_modeset_setup_hw_state or
         * at a very early point in our resume sequence, where the power well
         * structures are not yet restored. Since this function is at a very
@@ -11154,14 +11586,10 @@ void i915_redisable_vga(struct drm_device *dev)
         * level, just check if the power well is enabled instead of trying to
         * follow the "don't touch the power well if we don't need it" policy
         * the rest of the driver uses. */
-       if ((IS_HASWELL(dev) || IS_BROADWELL(dev)) &&
-           (I915_READ(HSW_PWR_WELL_DRIVER) & HSW_PWR_WELL_STATE_ENABLED) == 0)
+       if (!intel_display_power_enabled(dev_priv, POWER_DOMAIN_VGA))
                return;
 
-       if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
-               DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
-               i915_disable_vga(dev);
-       }
+       i915_redisable_vga_power_on(dev);
 }
 
 static void intel_modeset_readout_hw_state(struct drm_device *dev)
@@ -11265,9 +11693,8 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
         */
        list_for_each_entry(crtc, &dev->mode_config.crtc_list,
                            base.head) {
-               if (crtc->active && i915_fastboot) {
-                       intel_crtc_mode_from_pipe_config(crtc, &crtc->config);
-
+               if (crtc->active && i915.fastboot) {
+                       intel_mode_from_pipe_config(&crtc->base.mode, &crtc->config);
                        DRM_DEBUG_KMS("[CRTC:%d] found active mode: ",
                                      crtc->base.base.id);
                        drm_mode_debug_printmodeline(&crtc->base.mode);
@@ -11324,14 +11751,40 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
 
 void intel_modeset_gem_init(struct drm_device *dev)
 {
+       struct drm_crtc *c;
+       struct intel_framebuffer *fb;
+
        intel_modeset_init_hw(dev);
 
        intel_setup_overlay(dev);
 
-       mutex_lock(&dev->mode_config.mutex);
-       drm_mode_config_reset(dev);
-       intel_modeset_setup_hw_state(dev, false);
-       mutex_unlock(&dev->mode_config.mutex);
+       /*
+        * Make sure any fbs we allocated at startup are properly
+        * pinned & fenced.  When we do the allocation it's too early
+        * for this.
+        */
+       mutex_lock(&dev->struct_mutex);
+       list_for_each_entry(c, &dev->mode_config.crtc_list, head) {
+               if (!c->fb)
+                       continue;
+
+               fb = to_intel_framebuffer(c->fb);
+               if (intel_pin_and_fence_fb_obj(dev, fb->obj, NULL)) {
+                       DRM_ERROR("failed to pin boot fb on pipe %d\n",
+                                 to_intel_crtc(c)->pipe);
+                       drm_framebuffer_unreference(c->fb);
+                       c->fb = NULL;
+               }
+       }
+       mutex_unlock(&dev->struct_mutex);
+}
+
+void intel_connector_unregister(struct intel_connector *intel_connector)
+{
+       struct drm_connector *connector = &intel_connector->base;
+
+       intel_panel_destroy_backlight(connector);
+       drm_sysfs_connector_remove(connector);
 }
 
 void intel_modeset_cleanup(struct drm_device *dev)
@@ -11378,8 +11831,10 @@ void intel_modeset_cleanup(struct drm_device *dev)
 
        /* destroy the backlight and sysfs files before encoders/connectors */
        list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-               intel_panel_destroy_backlight(connector);
-               drm_sysfs_connector_remove(connector);
+               struct intel_connector *intel_connector;
+
+               intel_connector = to_intel_connector(connector);
+               intel_connector->unregister(intel_connector);
        }
 
        drm_mode_config_cleanup(dev);
@@ -11412,12 +11867,24 @@ int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
        unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
        u16 gmch_ctrl;
 
-       pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl);
+       if (pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl)) {
+               DRM_ERROR("failed to read control word\n");
+               return -EIO;
+       }
+
+       if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !state)
+               return 0;
+
        if (state)
                gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
        else
                gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
-       pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl);
+
+       if (pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl)) {
+               DRM_ERROR("failed to write control word\n");
+               return -EIO;
+       }
+
        return 0;
 }
 
@@ -11489,7 +11956,8 @@ intel_display_capture_error_state(struct drm_device *dev)
 
        for_each_pipe(i) {
                error->pipe[i].power_domain_on =
-                       intel_display_power_enabled_sw(dev, POWER_DOMAIN_PIPE(i));
+                       intel_display_power_enabled_sw(dev_priv,
+                                                      POWER_DOMAIN_PIPE(i));
                if (!error->pipe[i].power_domain_on)
                        continue;
 
@@ -11527,7 +11995,7 @@ intel_display_capture_error_state(struct drm_device *dev)
                enum transcoder cpu_transcoder = transcoders[i];
 
                error->transcoder[i].power_domain_on =
-                       intel_display_power_enabled_sw(dev,
+                       intel_display_power_enabled_sw(dev_priv,
                                POWER_DOMAIN_TRANSCODER(cpu_transcoder));
                if (!error->transcoder[i].power_domain_on)
                        continue;
index 57552eb386b0b1b6d735a7fb0f9b770405e5f411..d2b2f51f839fe949d39d845706be7081b884c722 100644 (file)
@@ -91,18 +91,25 @@ static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
 }
 
 static void intel_dp_link_down(struct intel_dp *intel_dp);
+static void edp_panel_vdd_on(struct intel_dp *intel_dp);
+static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
 
 static int
 intel_dp_max_link_bw(struct intel_dp *intel_dp)
 {
        int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
+       struct drm_device *dev = intel_dp->attached_connector->base.dev;
 
        switch (max_link_bw) {
        case DP_LINK_BW_1_62:
        case DP_LINK_BW_2_7:
                break;
        case DP_LINK_BW_5_4: /* 1.2 capable displays may advertise higher bw */
-               max_link_bw = DP_LINK_BW_2_7;
+               if ((IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8) &&
+                   intel_dp->dpcd[DP_DPCD_REV] >= 0x12)
+                       max_link_bw = DP_LINK_BW_5_4;
+               else
+                       max_link_bw = DP_LINK_BW_2_7;
                break;
        default:
                WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
@@ -294,7 +301,7 @@ static u32 _pp_stat_reg(struct intel_dp *intel_dp)
                return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
 }
 
-static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp)
+static bool edp_have_panel_power(struct intel_dp *intel_dp)
 {
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -302,7 +309,7 @@ static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp)
        return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
 }
 
-static bool ironlake_edp_have_panel_vdd(struct intel_dp *intel_dp)
+static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
 {
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -319,7 +326,7 @@ intel_dp_check_edp(struct intel_dp *intel_dp)
        if (!is_edp(intel_dp))
                return;
 
-       if (!ironlake_edp_have_panel_power(intel_dp) && !ironlake_edp_have_panel_vdd(intel_dp)) {
+       if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
                WARN(1, "eDP powered off while attempting aux channel communication.\n");
                DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
                              I915_READ(_pp_stat_reg(intel_dp)),
@@ -351,31 +358,46 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
        return status;
 }
 
-static uint32_t get_aux_clock_divider(struct intel_dp *intel_dp,
-                                     int index)
+static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
 {
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
        struct drm_device *dev = intel_dig_port->base.base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
 
-       /* The clock divider is based off the hrawclk,
-        * and would like to run at 2MHz. So, take the
-        * hrawclk value and divide by 2 and use that
-        *
-        * Note that PCH attached eDP panels should use a 125MHz input
-        * clock divider.
+       /*
+        * The clock divider is based off the hrawclk, and would like to run at
+        * 2MHz.  So, take the hrawclk value and divide by 2 and use that
         */
-       if (IS_VALLEYVIEW(dev)) {
-               return index ? 0 : 100;
-       } else if (intel_dig_port->port == PORT_A) {
-               if (index)
-                       return 0;
-               if (HAS_DDI(dev))
-                       return DIV_ROUND_CLOSEST(intel_ddi_get_cdclk_freq(dev_priv), 2000);
-               else if (IS_GEN6(dev) || IS_GEN7(dev))
+       return index ? 0 : intel_hrawclk(dev) / 2;
+}
+
+static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
+{
+       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+       struct drm_device *dev = intel_dig_port->base.base.dev;
+
+       if (index)
+               return 0;
+
+       if (intel_dig_port->port == PORT_A) {
+               if (IS_GEN6(dev) || IS_GEN7(dev))
                        return 200; /* SNB & IVB eDP input clock at 400Mhz */
                else
                        return 225; /* eDP input clock at 450Mhz */
+       } else {
+               return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
+       }
+}
+
+static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
+{
+       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+       struct drm_device *dev = intel_dig_port->base.base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       if (intel_dig_port->port == PORT_A) {
+               if (index)
+                       return 0;
+               return DIV_ROUND_CLOSEST(intel_ddi_get_cdclk_freq(dev_priv), 2000);
        } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
                /* Workaround for non-ULT HSW */
                switch (index) {
@@ -383,13 +405,46 @@ static uint32_t get_aux_clock_divider(struct intel_dp *intel_dp,
                case 1: return 72;
                default: return 0;
                }
-       } else if (HAS_PCH_SPLIT(dev)) {
+       } else  {
                return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
-       } else {
-               return index ? 0 :intel_hrawclk(dev) / 2;
        }
 }
 
+static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
+{
+       return index ? 0 : 100;
+}
+
+static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
+                                     bool has_aux_irq,
+                                     int send_bytes,
+                                     uint32_t aux_clock_divider)
+{
+       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+       struct drm_device *dev = intel_dig_port->base.base.dev;
+       uint32_t precharge, timeout;
+
+       if (IS_GEN6(dev))
+               precharge = 3;
+       else
+               precharge = 5;
+
+       if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
+               timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
+       else
+               timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
+
+       return DP_AUX_CH_CTL_SEND_BUSY |
+              DP_AUX_CH_CTL_DONE |
+              (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
+              DP_AUX_CH_CTL_TIME_OUT_ERROR |
+              timeout |
+              DP_AUX_CH_CTL_RECEIVE_ERROR |
+              (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
+              (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
+              (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
+}
+
 static int
 intel_dp_aux_ch(struct intel_dp *intel_dp,
                uint8_t *send, int send_bytes,
@@ -403,9 +458,8 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
        uint32_t aux_clock_divider;
        int i, ret, recv_bytes;
        uint32_t status;
-       int try, precharge, clock = 0;
+       int try, clock = 0;
        bool has_aux_irq = HAS_AUX_IRQ(dev);
-       uint32_t timeout;
 
        /* dp aux is extremely sensitive to irq latency, hence request the
         * lowest possible wakeup latency and so prevent the cpu from going into
@@ -415,16 +469,6 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
 
        intel_dp_check_edp(intel_dp);
 
-       if (IS_GEN6(dev))
-               precharge = 3;
-       else
-               precharge = 5;
-
-       if (IS_BROADWELL(dev) && ch_ctl == DPA_AUX_CH_CTL)
-               timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
-       else
-               timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
-
        intel_aux_display_runtime_get(dev_priv);
 
        /* Try to wait for any previous AUX channel activity */
@@ -448,7 +492,12 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
                goto out;
        }
 
-       while ((aux_clock_divider = get_aux_clock_divider(intel_dp, clock++))) {
+       while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
+               u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
+                                                         has_aux_irq,
+                                                         send_bytes,
+                                                         aux_clock_divider);
+
                /* Must try at least 3 times according to DP spec */
                for (try = 0; try < 5; try++) {
                        /* Load the send data into the aux channel data registers */
@@ -457,16 +506,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
                                           pack_aux(send + i, send_bytes - i));
 
                        /* Send the command and wait for it to complete */
-                       I915_WRITE(ch_ctl,
-                                  DP_AUX_CH_CTL_SEND_BUSY |
-                                  (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
-                                  timeout |
-                                  (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
-                                  (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
-                                  (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) |
-                                  DP_AUX_CH_CTL_DONE |
-                                  DP_AUX_CH_CTL_TIME_OUT_ERROR |
-                                  DP_AUX_CH_CTL_RECEIVE_ERROR);
+                       I915_WRITE(ch_ctl, send_ctl);
 
                        status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
 
@@ -637,7 +677,7 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
        int reply_bytes;
        int ret;
 
-       ironlake_edp_panel_vdd_on(intel_dp);
+       edp_panel_vdd_on(intel_dp);
        intel_dp_check_edp(intel_dp);
        /* Set up the command byte */
        if (mode & MODE_I2C_READ)
@@ -740,10 +780,20 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
        ret = -EREMOTEIO;
 
 out:
-       ironlake_edp_panel_vdd_off(intel_dp, false);
+       edp_panel_vdd_off(intel_dp, false);
        return ret;
 }
 
+static void
+intel_dp_connector_unregister(struct intel_connector *intel_connector)
+{
+       struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
+
+       sysfs_remove_link(&intel_connector->base.kdev->kobj,
+                         intel_dp->adapter.dev.kobj.name);
+       intel_connector_unregister(intel_connector);
+}
+
 static int
 intel_dp_i2c_init(struct intel_dp *intel_dp,
                  struct intel_connector *intel_connector, const char *name)
@@ -761,9 +811,19 @@ intel_dp_i2c_init(struct intel_dp *intel_dp,
        strncpy(intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1);
        intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0';
        intel_dp->adapter.algo_data = &intel_dp->algo;
-       intel_dp->adapter.dev.parent = intel_connector->base.kdev;
+       intel_dp->adapter.dev.parent = intel_connector->base.dev->dev;
 
        ret = i2c_dp_aux_add_bus(&intel_dp->adapter);
+       if (ret < 0)
+               return ret;
+
+       ret = sysfs_create_link(&intel_connector->base.kdev->kobj,
+                               &intel_dp->adapter.dev.kobj,
+                               intel_dp->adapter.dev.kobj.name);
+
+       if (ret < 0)
+               i2c_del_adapter(&intel_dp->adapter);
+
        return ret;
 }
 
@@ -812,9 +872,10 @@ intel_dp_compute_config(struct intel_encoder *encoder,
        struct intel_connector *intel_connector = intel_dp->attached_connector;
        int lane_count, clock;
        int max_lane_count = drm_dp_max_lane_count(intel_dp->dpcd);
-       int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
+       /* Conveniently, the link BW constants become indices with a shift...*/
+       int max_clock = intel_dp_max_link_bw(intel_dp) >> 3;
        int bpp, mode_rate;
-       static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
+       static int bws[] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7, DP_LINK_BW_5_4 };
        int link_avail, link_clock;
 
        if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
@@ -855,8 +916,8 @@ intel_dp_compute_config(struct intel_encoder *encoder,
                mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
                                                   bpp);
 
-               for (clock = 0; clock <= max_clock; clock++) {
-                       for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
+               for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
+                       for (clock = 0; clock <= max_clock; clock++) {
                                link_clock = drm_dp_bw_code_to_link_rate(bws[clock]);
                                link_avail = intel_dp_max_data_rate(link_clock,
                                                                    lane_count);
@@ -1015,16 +1076,16 @@ static void intel_dp_mode_set(struct intel_encoder *encoder)
                ironlake_set_pll_cpu_edp(intel_dp);
 }
 
-#define IDLE_ON_MASK           (PP_ON | 0        | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
-#define IDLE_ON_VALUE          (PP_ON | 0        | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_ON_IDLE)
+#define IDLE_ON_MASK           (PP_ON | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
+#define IDLE_ON_VALUE          (PP_ON | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_ON_IDLE)
 
-#define IDLE_OFF_MASK          (PP_ON | 0        | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
-#define IDLE_OFF_VALUE         (0     | 0        | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
+#define IDLE_OFF_MASK          (PP_ON | PP_SEQUENCE_MASK | 0                     | 0)
+#define IDLE_OFF_VALUE         (0     | PP_SEQUENCE_NONE | 0                     | 0)
 
-#define IDLE_CYCLE_MASK                (PP_ON | 0        | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
-#define IDLE_CYCLE_VALUE       (0     | 0        | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
+#define IDLE_CYCLE_MASK                (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
+#define IDLE_CYCLE_VALUE       (0     | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
 
-static void ironlake_wait_panel_status(struct intel_dp *intel_dp,
+static void wait_panel_status(struct intel_dp *intel_dp,
                                       u32 mask,
                                       u32 value)
 {
@@ -1049,24 +1110,41 @@ static void ironlake_wait_panel_status(struct intel_dp *intel_dp,
        DRM_DEBUG_KMS("Wait complete\n");
 }
 
-static void ironlake_wait_panel_on(struct intel_dp *intel_dp)
+static void wait_panel_on(struct intel_dp *intel_dp)
 {
        DRM_DEBUG_KMS("Wait for panel power on\n");
-       ironlake_wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
+       wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
 }
 
-static void ironlake_wait_panel_off(struct intel_dp *intel_dp)
+static void wait_panel_off(struct intel_dp *intel_dp)
 {
        DRM_DEBUG_KMS("Wait for panel power off time\n");
-       ironlake_wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
+       wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
 }
 
-static void ironlake_wait_panel_power_cycle(struct intel_dp *intel_dp)
+static void wait_panel_power_cycle(struct intel_dp *intel_dp)
 {
        DRM_DEBUG_KMS("Wait for panel power cycle\n");
-       ironlake_wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
+
+       /* When we disable the VDD override bit last we have to do the manual
+        * wait. */
+       wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
+                                      intel_dp->panel_power_cycle_delay);
+
+       wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
 }
 
+static void wait_backlight_on(struct intel_dp *intel_dp)
+{
+       wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
+                                      intel_dp->backlight_on_delay);
+}
+
+static void edp_wait_backlight_off(struct intel_dp *intel_dp)
+{
+       wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
+                                      intel_dp->backlight_off_delay);
+}
 
 /* Read the current pp_control value, unlocking the register if it
  * is locked
@@ -1084,7 +1162,7 @@ static  u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
        return control;
 }
 
-void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
+static void edp_panel_vdd_on(struct intel_dp *intel_dp)
 {
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1099,15 +1177,15 @@ void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
 
        intel_dp->want_panel_vdd = true;
 
-       if (ironlake_edp_have_panel_vdd(intel_dp))
+       if (edp_have_panel_vdd(intel_dp))
                return;
 
        intel_runtime_pm_get(dev_priv);
 
        DRM_DEBUG_KMS("Turning eDP VDD on\n");
 
-       if (!ironlake_edp_have_panel_power(intel_dp))
-               ironlake_wait_panel_power_cycle(intel_dp);
+       if (!edp_have_panel_power(intel_dp))
+               wait_panel_power_cycle(intel_dp);
 
        pp = ironlake_get_pp_control(intel_dp);
        pp |= EDP_FORCE_VDD;
@@ -1122,13 +1200,13 @@ void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
        /*
         * If the panel wasn't on, delay before accessing aux channel
         */
-       if (!ironlake_edp_have_panel_power(intel_dp)) {
+       if (!edp_have_panel_power(intel_dp)) {
                DRM_DEBUG_KMS("eDP was not running\n");
                msleep(intel_dp->panel_power_up_delay);
        }
 }
 
-static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp)
+static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
 {
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1137,7 +1215,7 @@ static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp)
 
        WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
 
-       if (!intel_dp->want_panel_vdd && ironlake_edp_have_panel_vdd(intel_dp)) {
+       if (!intel_dp->want_panel_vdd && edp_have_panel_vdd(intel_dp)) {
                DRM_DEBUG_KMS("Turning eDP VDD off\n");
 
                pp = ironlake_get_pp_control(intel_dp);
@@ -1154,24 +1232,24 @@ static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp)
                I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
 
                if ((pp & POWER_TARGET_ON) == 0)
-                       msleep(intel_dp->panel_power_cycle_delay);
+                       intel_dp->last_power_cycle = jiffies;
 
                intel_runtime_pm_put(dev_priv);
        }
 }
 
-static void ironlake_panel_vdd_work(struct work_struct *__work)
+static void edp_panel_vdd_work(struct work_struct *__work)
 {
        struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
                                                 struct intel_dp, panel_vdd_work);
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
 
        mutex_lock(&dev->mode_config.mutex);
-       ironlake_panel_vdd_off_sync(intel_dp);
+       edp_panel_vdd_off_sync(intel_dp);
        mutex_unlock(&dev->mode_config.mutex);
 }
 
-void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
+static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
 {
        if (!is_edp(intel_dp))
                return;
@@ -1181,7 +1259,7 @@ void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
        intel_dp->want_panel_vdd = false;
 
        if (sync) {
-               ironlake_panel_vdd_off_sync(intel_dp);
+               edp_panel_vdd_off_sync(intel_dp);
        } else {
                /*
                 * Queue the timer to fire a long
@@ -1193,7 +1271,7 @@ void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
        }
 }
 
-void ironlake_edp_panel_on(struct intel_dp *intel_dp)
+void intel_edp_panel_on(struct intel_dp *intel_dp)
 {
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1205,12 +1283,12 @@ void ironlake_edp_panel_on(struct intel_dp *intel_dp)
 
        DRM_DEBUG_KMS("Turn eDP power on\n");
 
-       if (ironlake_edp_have_panel_power(intel_dp)) {
+       if (edp_have_panel_power(intel_dp)) {
                DRM_DEBUG_KMS("eDP power already on\n");
                return;
        }
 
-       ironlake_wait_panel_power_cycle(intel_dp);
+       wait_panel_power_cycle(intel_dp);
 
        pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
        pp = ironlake_get_pp_control(intel_dp);
@@ -1228,7 +1306,8 @@ void ironlake_edp_panel_on(struct intel_dp *intel_dp)
        I915_WRITE(pp_ctrl_reg, pp);
        POSTING_READ(pp_ctrl_reg);
 
-       ironlake_wait_panel_on(intel_dp);
+       wait_panel_on(intel_dp);
+       intel_dp->last_power_on = jiffies;
 
        if (IS_GEN5(dev)) {
                pp |= PANEL_POWER_RESET; /* restore panel reset bit */
@@ -1237,7 +1316,7 @@ void ironlake_edp_panel_on(struct intel_dp *intel_dp)
        }
 }
 
-void ironlake_edp_panel_off(struct intel_dp *intel_dp)
+void intel_edp_panel_off(struct intel_dp *intel_dp)
 {
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1249,20 +1328,24 @@ void ironlake_edp_panel_off(struct intel_dp *intel_dp)
 
        DRM_DEBUG_KMS("Turn eDP power off\n");
 
+       edp_wait_backlight_off(intel_dp);
+
        pp = ironlake_get_pp_control(intel_dp);
        /* We need to switch off panel power _and_ force vdd, for otherwise some
         * panels get very unhappy and cease to work. */
-       pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_BLC_ENABLE);
+       pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
+               EDP_BLC_ENABLE);
 
        pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
 
        I915_WRITE(pp_ctrl_reg, pp);
        POSTING_READ(pp_ctrl_reg);
 
-       ironlake_wait_panel_off(intel_dp);
+       intel_dp->last_power_cycle = jiffies;
+       wait_panel_off(intel_dp);
 }
 
-void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
+void intel_edp_backlight_on(struct intel_dp *intel_dp)
 {
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
        struct drm_device *dev = intel_dig_port->base.base.dev;
@@ -1280,7 +1363,7 @@ void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
         * link.  So delay a bit to make sure the image is solid before
         * allowing it to appear.
         */
-       msleep(intel_dp->backlight_on_delay);
+       wait_backlight_on(intel_dp);
        pp = ironlake_get_pp_control(intel_dp);
        pp |= EDP_BLC_ENABLE;
 
@@ -1292,7 +1375,7 @@ void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
        intel_panel_enable_backlight(intel_dp->attached_connector);
 }
 
-void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
+void intel_edp_backlight_off(struct intel_dp *intel_dp)
 {
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1312,7 +1395,7 @@ void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
 
        I915_WRITE(pp_ctrl_reg, pp);
        POSTING_READ(pp_ctrl_reg);
-       msleep(intel_dp->backlight_off_delay);
+       intel_dp->last_backlight_off = jiffies;
 }
 
 static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
@@ -1403,7 +1486,14 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
        enum port port = dp_to_dig_port(intel_dp)->port;
        struct drm_device *dev = encoder->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       u32 tmp = I915_READ(intel_dp->output_reg);
+       enum intel_display_power_domain power_domain;
+       u32 tmp;
+
+       power_domain = intel_display_port_power_domain(encoder);
+       if (!intel_display_power_enabled(dev_priv, power_domain))
+               return false;
+
+       tmp = I915_READ(intel_dp->output_reg);
 
        if (!(tmp & DP_PORT_EN))
                return false;
@@ -1597,10 +1687,12 @@ static void intel_edp_psr_enable_sink(struct intel_dp *intel_dp)
 {
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
        struct drm_i915_private *dev_priv = dev->dev_private;
-       uint32_t aux_clock_divider = get_aux_clock_divider(intel_dp, 0);
+       uint32_t aux_clock_divider;
        int precharge = 0x3;
        int msg_size = 5;       /* Header(4) + Message(1) */
 
+       aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
+
        /* Enable PSR in sink */
        if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT)
                intel_dp_aux_native_write_1(intel_dp, DP_PSR_EN_CFG,
@@ -1668,7 +1760,7 @@ static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp)
                return false;
        }
 
-       if (!i915_enable_psr) {
+       if (!i915.enable_psr) {
                DRM_DEBUG_KMS("PSR disable by flag\n");
                return false;
        }
@@ -1784,9 +1876,11 @@ static void intel_disable_dp(struct intel_encoder *encoder)
 
        /* Make sure the panel is off before trying to change the mode. But also
         * ensure that we have vdd while we switch off the panel. */
-       ironlake_edp_backlight_off(intel_dp);
+       edp_panel_vdd_on(intel_dp);
+       intel_edp_backlight_off(intel_dp);
        intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
-       ironlake_edp_panel_off(intel_dp);
+       intel_edp_panel_off(intel_dp);
+       edp_panel_vdd_off(intel_dp, true);
 
        /* cpu edp my only be disable _after_ the cpu pipe/plane is disabled. */
        if (!(port == PORT_A || IS_VALLEYVIEW(dev)))
@@ -1816,11 +1910,11 @@ static void intel_enable_dp(struct intel_encoder *encoder)
        if (WARN_ON(dp_reg & DP_PORT_EN))
                return;
 
-       ironlake_edp_panel_vdd_on(intel_dp);
+       edp_panel_vdd_on(intel_dp);
        intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
        intel_dp_start_link_train(intel_dp);
-       ironlake_edp_panel_on(intel_dp);
-       ironlake_edp_panel_vdd_off(intel_dp, true);
+       intel_edp_panel_on(intel_dp);
+       edp_panel_vdd_off(intel_dp, true);
        intel_dp_complete_link_train(intel_dp);
        intel_dp_stop_link_train(intel_dp);
 }
@@ -1830,14 +1924,14 @@ static void g4x_enable_dp(struct intel_encoder *encoder)
        struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
 
        intel_enable_dp(encoder);
-       ironlake_edp_backlight_on(intel_dp);
+       intel_edp_backlight_on(intel_dp);
 }
 
 static void vlv_enable_dp(struct intel_encoder *encoder)
 {
        struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
 
-       ironlake_edp_backlight_on(intel_dp);
+       intel_edp_backlight_on(intel_dp);
 }
 
 static void g4x_pre_enable_dp(struct intel_encoder *encoder)
@@ -2630,10 +2724,15 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
        bool channel_eq = false;
        int tries, cr_tries;
        uint32_t DP = intel_dp->DP;
+       uint32_t training_pattern = DP_TRAINING_PATTERN_2;
+
+       /* Training Pattern 3 for HBR2 ot 1.2 devices that support it*/
+       if (intel_dp->link_bw == DP_LINK_BW_5_4 || intel_dp->use_tps3)
+               training_pattern = DP_TRAINING_PATTERN_3;
 
        /* channel equalization */
        if (!intel_dp_set_link_train(intel_dp, &DP,
-                                    DP_TRAINING_PATTERN_2 |
+                                    training_pattern |
                                     DP_LINK_SCRAMBLING_DISABLE)) {
                DRM_ERROR("failed to start channel equalization\n");
                return;
@@ -2660,7 +2759,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
                if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
                        intel_dp_start_link_train(intel_dp);
                        intel_dp_set_link_train(intel_dp, &DP,
-                                               DP_TRAINING_PATTERN_2 |
+                                               training_pattern |
                                                DP_LINK_SCRAMBLING_DISABLE);
                        cr_tries++;
                        continue;
@@ -2676,7 +2775,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
                        intel_dp_link_down(intel_dp);
                        intel_dp_start_link_train(intel_dp);
                        intel_dp_set_link_train(intel_dp, &DP,
-                                               DP_TRAINING_PATTERN_2 |
+                                               training_pattern |
                                                DP_LINK_SCRAMBLING_DISABLE);
                        tries = 0;
                        cr_tries++;
@@ -2818,6 +2917,14 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
                }
        }
 
+       /* Training Pattern 3 support */
+       if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
+           intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED) {
+               intel_dp->use_tps3 = true;
+               DRM_DEBUG_KMS("Displayport TPS3 supported");
+       } else
+               intel_dp->use_tps3 = false;
+
        if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
              DP_DWN_STRM_PORT_PRESENT))
                return true; /* native DP sink */
@@ -2841,7 +2948,7 @@ intel_dp_probe_oui(struct intel_dp *intel_dp)
        if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
                return;
 
-       ironlake_edp_panel_vdd_on(intel_dp);
+       edp_panel_vdd_on(intel_dp);
 
        if (intel_dp_aux_native_read_retry(intel_dp, DP_SINK_OUI, buf, 3))
                DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
@@ -2851,7 +2958,36 @@ intel_dp_probe_oui(struct intel_dp *intel_dp)
                DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
                              buf[0], buf[1], buf[2]);
 
-       ironlake_edp_panel_vdd_off(intel_dp, false);
+       edp_panel_vdd_off(intel_dp, false);
+}
+
+int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
+{
+       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+       struct drm_device *dev = intel_dig_port->base.base.dev;
+       struct intel_crtc *intel_crtc =
+               to_intel_crtc(intel_dig_port->base.base.crtc);
+       u8 buf[1];
+
+       if (!intel_dp_aux_native_read(intel_dp, DP_TEST_SINK_MISC, buf, 1))
+               return -EAGAIN;
+
+       if (!(buf[0] & DP_TEST_CRC_SUPPORTED))
+               return -ENOTTY;
+
+       if (!intel_dp_aux_native_write_1(intel_dp, DP_TEST_SINK,
+                                        DP_TEST_SINK_START))
+               return -EAGAIN;
+
+       /* Wait 2 vblanks to be sure we will have the correct CRC value */
+       intel_wait_for_vblank(dev, intel_crtc->pipe);
+       intel_wait_for_vblank(dev, intel_crtc->pipe);
+
+       if (!intel_dp_aux_native_read(intel_dp, DP_TEST_CRC_R_CR, crc, 6))
+               return -EAGAIN;
+
+       intel_dp_aux_native_write_1(intel_dp, DP_TEST_SINK, 0);
+       return 0;
 }
 
 static bool
@@ -3098,10 +3234,14 @@ intel_dp_detect(struct drm_connector *connector, bool force)
        struct drm_device *dev = connector->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        enum drm_connector_status status;
+       enum intel_display_power_domain power_domain;
        struct edid *edid = NULL;
 
        intel_runtime_pm_get(dev_priv);
 
+       power_domain = intel_display_port_power_domain(intel_encoder);
+       intel_display_power_get(dev_priv, power_domain);
+
        DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
                      connector->base.id, drm_get_connector_name(connector));
 
@@ -3132,21 +3272,32 @@ intel_dp_detect(struct drm_connector *connector, bool force)
        status = connector_status_connected;
 
 out:
+       intel_display_power_put(dev_priv, power_domain);
+
        intel_runtime_pm_put(dev_priv);
+
        return status;
 }
 
 static int intel_dp_get_modes(struct drm_connector *connector)
 {
        struct intel_dp *intel_dp = intel_attached_dp(connector);
+       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+       struct intel_encoder *intel_encoder = &intel_dig_port->base;
        struct intel_connector *intel_connector = to_intel_connector(connector);
        struct drm_device *dev = connector->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       enum intel_display_power_domain power_domain;
        int ret;
 
        /* We should parse the EDID data and find out if it has an audio sink
         */
 
+       power_domain = intel_display_port_power_domain(intel_encoder);
+       intel_display_power_get(dev_priv, power_domain);
+
        ret = intel_dp_get_edid_modes(connector, &intel_dp->adapter);
+       intel_display_power_put(dev_priv, power_domain);
        if (ret)
                return ret;
 
@@ -3167,15 +3318,25 @@ static bool
 intel_dp_detect_audio(struct drm_connector *connector)
 {
        struct intel_dp *intel_dp = intel_attached_dp(connector);
+       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+       struct intel_encoder *intel_encoder = &intel_dig_port->base;
+       struct drm_device *dev = connector->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       enum intel_display_power_domain power_domain;
        struct edid *edid;
        bool has_audio = false;
 
+       power_domain = intel_display_port_power_domain(intel_encoder);
+       intel_display_power_get(dev_priv, power_domain);
+
        edid = intel_dp_get_edid(connector, &intel_dp->adapter);
        if (edid) {
                has_audio = drm_detect_monitor_audio(edid);
                kfree(edid);
        }
 
+       intel_display_power_put(dev_priv, power_domain);
+
        return has_audio;
 }
 
@@ -3295,7 +3456,7 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder)
        if (is_edp(intel_dp)) {
                cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
                mutex_lock(&dev->mode_config.mutex);
-               ironlake_panel_vdd_off_sync(intel_dp);
+               edp_panel_vdd_off_sync(intel_dp);
                mutex_unlock(&dev->mode_config.mutex);
        }
        kfree(intel_dig_port);
@@ -3394,6 +3555,13 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect
        }
 }
 
+static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
+{
+       intel_dp->last_power_cycle = jiffies;
+       intel_dp->last_power_on = jiffies;
+       intel_dp->last_backlight_off = jiffies;
+}
+
 static void
 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
                                    struct intel_dp *intel_dp,
@@ -3516,10 +3684,17 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
                pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
        }
 
-       /* And finally store the new values in the power sequencer. */
+       /*
+        * And finally store the new values in the power sequencer. The
+        * backlight delays are set to 1 because we do manual waits on them. For
+        * T8, even BSpec recommends doing it. For T9, if we don't do this,
+        * we'll end up waiting for the backlight off delay twice: once when we
+        * do the manual sleep, and once when we disable the panel and wait for
+        * the PP_STATUS bit to become zero.
+        */
        pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
-               (seq->t8 << PANEL_LIGHT_ON_DELAY_SHIFT);
-       pp_off = (seq->t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
+               (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
+       pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
                 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
        /* Compute the divisor for the pp clock, simply match the Bspec
         * formula. */
@@ -3554,14 +3729,14 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
 }
 
 static bool intel_edp_init_connector(struct intel_dp *intel_dp,
-                                    struct intel_connector *intel_connector)
+                                    struct intel_connector *intel_connector,
+                                    struct edp_power_seq *power_seq)
 {
        struct drm_connector *connector = &intel_connector->base;
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
        struct drm_device *dev = intel_dig_port->base.base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_display_mode *fixed_mode = NULL;
-       struct edp_power_seq power_seq = { 0 };
        bool has_dpcd;
        struct drm_display_mode *scan;
        struct edid *edid;
@@ -3569,12 +3744,10 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
        if (!is_edp(intel_dp))
                return true;
 
-       intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
-
        /* Cache DPCD and EDID for edp. */
-       ironlake_edp_panel_vdd_on(intel_dp);
+       edp_panel_vdd_on(intel_dp);
        has_dpcd = intel_dp_get_dpcd(intel_dp);
-       ironlake_edp_panel_vdd_off(intel_dp, false);
+       edp_panel_vdd_off(intel_dp, false);
 
        if (has_dpcd) {
                if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
@@ -3588,8 +3761,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
        }
 
        /* We now know it's not a ghost, init power sequence regs. */
-       intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
-                                                     &power_seq);
+       intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, power_seq);
 
        edid = drm_get_edid(connector, &intel_dp->adapter);
        if (edid) {
@@ -3622,7 +3794,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
                        fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
        }
 
-       intel_panel_init(&intel_connector->panel, fixed_mode);
+       intel_panel_init(&intel_connector->panel, fixed_mode, NULL);
        intel_panel_setup_backlight(connector);
 
        return true;
@@ -3638,9 +3810,22 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
        struct drm_device *dev = intel_encoder->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        enum port port = intel_dig_port->port;
+       struct edp_power_seq power_seq = { 0 };
        const char *name = NULL;
        int type, error;
 
+       /* intel_dp vfuncs */
+       if (IS_VALLEYVIEW(dev))
+               intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
+       else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+               intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
+       else if (HAS_PCH_SPLIT(dev))
+               intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
+       else
+               intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
+
+       intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
+
        /* Preserve the current hw state. */
        intel_dp->DP = I915_READ(intel_dp->output_reg);
        intel_dp->attached_connector = intel_connector;
@@ -3669,7 +3854,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
        connector->doublescan_allowed = 0;
 
        INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
-                         ironlake_panel_vdd_work);
+                         edp_panel_vdd_work);
 
        intel_connector_attach_encoder(intel_connector, intel_encoder);
        drm_sysfs_connector_add(connector);
@@ -3678,6 +3863,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
                intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
        else
                intel_connector->get_hw_state = intel_connector_get_hw_state;
+       intel_connector->unregister = intel_dp_connector_unregister;
 
        intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
        if (HAS_DDI(dev)) {
@@ -3721,18 +3907,23 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
                BUG();
        }
 
+       if (is_edp(intel_dp)) {
+               intel_dp_init_panel_power_timestamps(intel_dp);
+               intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
+       }
+
        error = intel_dp_i2c_init(intel_dp, intel_connector, name);
        WARN(error, "intel_dp_i2c_init failed with error %d for port %c\n",
             error, port_name(port));
 
        intel_dp->psr_setup_done = false;
 
-       if (!intel_edp_init_connector(intel_dp, intel_connector)) {
+       if (!intel_edp_init_connector(intel_dp, intel_connector, &power_seq)) {
                i2c_del_adapter(&intel_dp->adapter);
                if (is_edp(intel_dp)) {
                        cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
                        mutex_lock(&dev->mode_config.mutex);
-                       ironlake_panel_vdd_off_sync(intel_dp);
+                       edp_panel_vdd_off_sync(intel_dp);
                        mutex_unlock(&dev->mode_config.mutex);
                }
                drm_sysfs_connector_remove(connector);
@@ -3798,7 +3989,7 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
 
        intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
        intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
-       intel_encoder->cloneable = false;
+       intel_encoder->cloneable = 0;
        intel_encoder->hot_plug = intel_dp_hot_plug;
 
        if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
index fbfaaba5cc3b2a079ae9749401d5602c6674cf07..2546cae0b4f03dc7dfae4e5a0d5d19f92dd8b6b9 100644 (file)
@@ -110,9 +110,10 @@ struct intel_framebuffer {
 
 struct intel_fbdev {
        struct drm_fb_helper helper;
-       struct intel_framebuffer ifb;
+       struct intel_framebuffer *fb;
        struct list_head fbdev_list;
        struct drm_display_mode *our_mode;
+       int preferred_bpp;
 };
 
 struct intel_encoder {
@@ -124,11 +125,7 @@ struct intel_encoder {
        struct intel_crtc *new_crtc;
 
        int type;
-       /*
-        * Intel hw has only one MUX where encoders could be clone, hence a
-        * simple flag is enough to compute the possible_clones mask.
-        */
-       bool cloneable;
+       unsigned int cloneable;
        bool connectors_active;
        void (*hot_plug)(struct intel_encoder *);
        bool (*compute_config)(struct intel_encoder *,
@@ -187,6 +184,14 @@ struct intel_connector {
         * and active (i.e. dpms ON state). */
        bool (*get_hw_state)(struct intel_connector *);
 
+       /*
+        * Removes all interfaces through which the connector is accessible
+        * - like sysfs, debugfs entries -, so that no new operations can be
+        * started on the connector. Also makes sure all currently pending
+        * operations finish before returing.
+        */
+       void (*unregister)(struct intel_connector *);
+
        /* Panel info for eDP and LVDS */
        struct intel_panel panel;
 
@@ -210,6 +215,12 @@ typedef struct dpll {
        int     p;
 } intel_clock_t;
 
+struct intel_plane_config {
+       bool tiled;
+       int size;
+       u32 base;
+};
+
 struct intel_crtc_config {
        /**
         * quirks - bitfield with hw state readout quirks
@@ -358,7 +369,10 @@ struct intel_crtc {
        int16_t cursor_width, cursor_height;
        bool cursor_visible;
 
+       struct intel_plane_config plane_config;
        struct intel_crtc_config config;
+       struct intel_crtc_config *new_config;
+       bool new_enabled;
 
        uint32_t ddi_pll_sel;
 
@@ -485,8 +499,22 @@ struct intel_dp {
        int backlight_off_delay;
        struct delayed_work panel_vdd_work;
        bool want_panel_vdd;
+       unsigned long last_power_cycle;
+       unsigned long last_power_on;
+       unsigned long last_backlight_off;
        bool psr_setup_done;
+       bool use_tps3;
        struct intel_connector *attached_connector;
+
+       uint32_t (*get_aux_clock_divider)(struct intel_dp *dp, int index);
+       /*
+        * This function returns the value we have to program the AUX_CTL
+        * register with to kick off an AUX transaction.
+        */
+       uint32_t (*get_aux_send_ctl)(struct intel_dp *dp,
+                                    bool has_aux_irq,
+                                    int send_bytes,
+                                    uint32_t aux_clock_divider);
 };
 
 struct intel_digital_port {
@@ -540,6 +568,7 @@ struct intel_unpin_work {
 struct intel_set_config {
        struct drm_encoder **save_connector_encoders;
        struct drm_crtc **save_encoder_crtcs;
+       bool *save_crtc_enabled;
 
        bool fb_changed;
        bool mode_changed;
@@ -584,6 +613,8 @@ hdmi_to_dig_port(struct intel_hdmi *intel_hdmi)
 /* i915_irq.c */
 bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
                                           enum pipe pipe, bool enable);
+bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
+                                            enum pipe pipe, bool enable);
 bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
                                           enum transcoder pch_transcoder,
                                           bool enable);
@@ -664,11 +695,10 @@ int intel_pin_and_fence_fb_obj(struct drm_device *dev,
                               struct drm_i915_gem_object *obj,
                               struct intel_ring_buffer *pipelined);
 void intel_unpin_fb_obj(struct drm_i915_gem_object *obj);
-int intel_framebuffer_init(struct drm_device *dev,
-                          struct intel_framebuffer *ifb,
+struct drm_framebuffer *
+__intel_framebuffer_create(struct drm_device *dev,
                           struct drm_mode_fb_cmd2 *mode_cmd,
                           struct drm_i915_gem_object *obj);
-void intel_framebuffer_fini(struct intel_framebuffer *fb);
 void intel_prepare_page_flip(struct drm_device *dev, int plane);
 void intel_finish_page_flip(struct drm_device *dev, int pipe);
 void intel_finish_page_flip_plane(struct drm_device *dev, int plane);
@@ -708,8 +738,13 @@ ironlake_check_encoder_dotclock(const struct intel_crtc_config *pipe_config,
 bool intel_crtc_active(struct drm_crtc *crtc);
 void hsw_enable_ips(struct intel_crtc *crtc);
 void hsw_disable_ips(struct intel_crtc *crtc);
-void intel_display_set_init_power(struct drm_device *dev, bool enable);
+void intel_display_set_init_power(struct drm_i915_private *dev, bool enable);
+enum intel_display_power_domain
+intel_display_port_power_domain(struct intel_encoder *intel_encoder);
 int valleyview_get_vco(struct drm_i915_private *dev_priv);
+void intel_mode_from_pipe_config(struct drm_display_mode *mode,
+                                struct intel_crtc_config *pipe_config);
+int intel_format_to_fourcc(int format);
 
 /* intel_dp.c */
 void intel_dp_init(struct drm_device *dev, int output_reg, enum port port);
@@ -721,15 +756,14 @@ void intel_dp_stop_link_train(struct intel_dp *intel_dp);
 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
 void intel_dp_encoder_destroy(struct drm_encoder *encoder);
 void intel_dp_check_link_status(struct intel_dp *intel_dp);
+int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc);
 bool intel_dp_compute_config(struct intel_encoder *encoder,
                             struct intel_crtc_config *pipe_config);
 bool intel_dp_is_edp(struct drm_device *dev, enum port port);
-void ironlake_edp_backlight_on(struct intel_dp *intel_dp);
-void ironlake_edp_backlight_off(struct intel_dp *intel_dp);
-void ironlake_edp_panel_on(struct intel_dp *intel_dp);
-void ironlake_edp_panel_off(struct intel_dp *intel_dp);
-void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp);
-void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
+void intel_edp_backlight_on(struct intel_dp *intel_dp);
+void intel_edp_backlight_off(struct intel_dp *intel_dp);
+void intel_edp_panel_on(struct intel_dp *intel_dp);
+void intel_edp_panel_off(struct intel_dp *intel_dp);
 void intel_edp_psr_enable(struct intel_dp *intel_dp);
 void intel_edp_psr_disable(struct intel_dp *intel_dp);
 void intel_edp_psr_update(struct drm_device *dev);
@@ -808,7 +842,8 @@ int intel_overlay_attrs(struct drm_device *dev, void *data,
 
 /* intel_panel.c */
 int intel_panel_init(struct intel_panel *panel,
-                    struct drm_display_mode *fixed_mode);
+                    struct drm_display_mode *fixed_mode,
+                    struct drm_display_mode *downclock_mode);
 void intel_panel_fini(struct intel_panel *panel);
 void intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
                            struct drm_display_mode *adjusted_mode);
@@ -845,18 +880,17 @@ bool intel_fbc_enabled(struct drm_device *dev);
 void intel_update_fbc(struct drm_device *dev);
 void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
 void intel_gpu_ips_teardown(void);
-int intel_power_domains_init(struct drm_device *dev);
-void intel_power_domains_remove(struct drm_device *dev);
-bool intel_display_power_enabled(struct drm_device *dev,
+int intel_power_domains_init(struct drm_i915_private *);
+void intel_power_domains_remove(struct drm_i915_private *);
+bool intel_display_power_enabled(struct drm_i915_private *dev_priv,
                                 enum intel_display_power_domain domain);
-bool intel_display_power_enabled_sw(struct drm_device *dev,
+bool intel_display_power_enabled_sw(struct drm_i915_private *dev_priv,
                                    enum intel_display_power_domain domain);
-void intel_display_power_get(struct drm_device *dev,
+void intel_display_power_get(struct drm_i915_private *dev_priv,
                             enum intel_display_power_domain domain);
-void intel_display_power_put(struct drm_device *dev,
+void intel_display_power_put(struct drm_i915_private *dev_priv,
                             enum intel_display_power_domain domain);
-void intel_power_domains_init_hw(struct drm_device *dev);
-void intel_set_power_well(struct drm_device *dev, bool enable);
+void intel_power_domains_init_hw(struct drm_i915_private *dev_priv);
 void intel_enable_gt_powersave(struct drm_device *dev);
 void intel_disable_gt_powersave(struct drm_device *dev);
 void ironlake_teardown_rc6(struct drm_device *dev);
index fabbf0d895cf2a5e133d804d5e8e05f0f26f79ad..33656647f8bcf566ea8fba54f898ec6fc71b2b5b 100644 (file)
@@ -243,11 +243,16 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
                                   enum pipe *pipe)
 {
        struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+       enum intel_display_power_domain power_domain;
        u32 port, func;
        enum pipe p;
 
        DRM_DEBUG_KMS("\n");
 
+       power_domain = intel_display_port_power_domain(encoder);
+       if (!intel_display_power_enabled(dev_priv, power_domain))
+               return false;
+
        /* XXX: this only works for one DSI output */
        for (p = PIPE_A; p <= PIPE_B; p++) {
                port = I915_READ(MIPI_PORT_CTRL(p));
@@ -488,8 +493,19 @@ static enum drm_connector_status
 intel_dsi_detect(struct drm_connector *connector, bool force)
 {
        struct intel_dsi *intel_dsi = intel_attached_dsi(connector);
+       struct intel_encoder *intel_encoder = &intel_dsi->base;
+       enum intel_display_power_domain power_domain;
+       enum drm_connector_status connector_status;
+       struct drm_i915_private *dev_priv = intel_encoder->base.dev->dev_private;
+
        DRM_DEBUG_KMS("\n");
-       return intel_dsi->dev.dev_ops->detect(&intel_dsi->dev);
+       power_domain = intel_display_port_power_domain(intel_encoder);
+
+       intel_display_power_get(dev_priv, power_domain);
+       connector_status = intel_dsi->dev.dev_ops->detect(&intel_dsi->dev);
+       intel_display_power_put(dev_priv, power_domain);
+
+       return connector_status;
 }
 
 static int intel_dsi_get_modes(struct drm_connector *connector)
@@ -586,6 +602,7 @@ bool intel_dsi_init(struct drm_device *dev)
        intel_encoder->get_config = intel_dsi_get_config;
 
        intel_connector->get_hw_state = intel_connector_get_hw_state;
+       intel_connector->unregister = intel_connector_unregister;
 
        for (i = 0; i < ARRAY_SIZE(intel_dsi_devices); i++) {
                dsi = &intel_dsi_devices[i];
@@ -603,7 +620,7 @@ bool intel_dsi_init(struct drm_device *dev)
        intel_encoder->type = INTEL_OUTPUT_DSI;
        intel_encoder->crtc_mask = (1 << 0); /* XXX */
 
-       intel_encoder->cloneable = false;
+       intel_encoder->cloneable = 0;
        drm_connector_init(dev, connector, &intel_dsi_connector_funcs,
                           DRM_MODE_CONNECTOR_DSI);
 
@@ -624,7 +641,7 @@ bool intel_dsi_init(struct drm_device *dev)
        }
 
        fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
-       intel_panel_init(&intel_connector->panel, fixed_mode);
+       intel_panel_init(&intel_connector->panel, fixed_mode, NULL);
 
        return true;
 
index eeff998e52efdea93534ffb680582503f9f2a56c..7fe3feedfe039cda453851f1338fd56b100945cb 100644 (file)
@@ -477,6 +477,7 @@ void intel_dvo_init(struct drm_device *dev)
        intel_encoder->compute_config = intel_dvo_compute_config;
        intel_encoder->mode_set = intel_dvo_mode_set;
        intel_connector->get_hw_state = intel_dvo_connector_get_hw_state;
+       intel_connector->unregister = intel_connector_unregister;
 
        /* Now, try to find a controller */
        for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) {
@@ -521,14 +522,15 @@ void intel_dvo_init(struct drm_device *dev)
                intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
                switch (dvo->type) {
                case INTEL_DVO_CHIP_TMDS:
-                       intel_encoder->cloneable = true;
+                       intel_encoder->cloneable = (1 << INTEL_OUTPUT_ANALOG) |
+                               (1 << INTEL_OUTPUT_DVO);
                        drm_connector_init(dev, connector,
                                           &intel_dvo_connector_funcs,
                                           DRM_MODE_CONNECTOR_DVII);
                        encoder_type = DRM_MODE_ENCODER_TMDS;
                        break;
                case INTEL_DVO_CHIP_LVDS:
-                       intel_encoder->cloneable = false;
+                       intel_encoder->cloneable = 0;
                        drm_connector_init(dev, connector,
                                           &intel_dvo_connector_funcs,
                                           DRM_MODE_CONNECTOR_LVDS);
index 39eac9937a4aa1a89c176ef77d727cfb615b8cd1..d6d78c86c232b006906d5e7f4dbc9aabc943afb6 100644 (file)
@@ -62,6 +62,7 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
 {
        struct intel_fbdev *ifbdev =
                container_of(helper, struct intel_fbdev, helper);
+       struct drm_framebuffer *fb;
        struct drm_device *dev = helper->dev;
        struct drm_mode_fb_cmd2 mode_cmd = {};
        struct drm_i915_gem_object *obj;
@@ -93,18 +94,22 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
        /* Flush everything out, we'll be doing GTT only from now on */
        ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
        if (ret) {
-               DRM_ERROR("failed to pin fb: %d\n", ret);
+               DRM_ERROR("failed to pin obj: %d\n", ret);
                goto out_unref;
        }
 
-       ret = intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, obj);
-       if (ret)
+       fb = __intel_framebuffer_create(dev, &mode_cmd, obj);
+       if (IS_ERR(fb)) {
+               ret = PTR_ERR(fb);
                goto out_unpin;
+       }
+
+       ifbdev->fb = to_intel_framebuffer(fb);
 
        return 0;
 
 out_unpin:
-       i915_gem_object_unpin(obj);
+       i915_gem_object_ggtt_unpin(obj);
 out_unref:
        drm_gem_object_unreference(&obj->base);
 out:
@@ -116,23 +121,26 @@ static int intelfb_create(struct drm_fb_helper *helper,
 {
        struct intel_fbdev *ifbdev =
                container_of(helper, struct intel_fbdev, helper);
-       struct intel_framebuffer *intel_fb = &ifbdev->ifb;
+       struct intel_framebuffer *intel_fb = ifbdev->fb;
        struct drm_device *dev = helper->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct fb_info *info;
        struct drm_framebuffer *fb;
        struct drm_i915_gem_object *obj;
        int size, ret;
+       bool prealloc = false;
 
        mutex_lock(&dev->struct_mutex);
 
-       if (!intel_fb->obj) {
+       if (!intel_fb || WARN_ON(!intel_fb->obj)) {
                DRM_DEBUG_KMS("no BIOS fb, allocating a new one\n");
                ret = intelfb_alloc(helper, sizes);
                if (ret)
                        goto out_unlock;
+               intel_fb = ifbdev->fb;
        } else {
                DRM_DEBUG_KMS("re-using BIOS fb\n");
+               prealloc = true;
                sizes->fb_width = intel_fb->base.width;
                sizes->fb_height = intel_fb->base.height;
        }
@@ -148,7 +156,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
 
        info->par = helper;
 
-       fb = &ifbdev->ifb.base;
+       fb = &ifbdev->fb->base;
 
        ifbdev->helper.fb = fb;
        ifbdev->helper.fbdev = info;
@@ -194,7 +202,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
         * If the object is stolen however, it will be full of whatever
         * garbage was left in there.
         */
-       if (ifbdev->ifb.obj->stolen)
+       if (ifbdev->fb->obj->stolen && !prealloc)
                memset_io(info->screen_base, 0, info->screen_size);
 
        /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
@@ -208,7 +216,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
        return 0;
 
 out_unpin:
-       i915_gem_object_unpin(obj);
+       i915_gem_object_ggtt_unpin(obj);
        drm_gem_object_unreference(&obj->base);
 out_unlock:
        mutex_unlock(&dev->struct_mutex);
@@ -236,7 +244,193 @@ static void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
        *blue = intel_crtc->lut_b[regno] << 8;
 }
 
+static struct drm_fb_helper_crtc *
+intel_fb_helper_crtc(struct drm_fb_helper *fb_helper, struct drm_crtc *crtc)
+{
+       int i;
+
+       for (i = 0; i < fb_helper->crtc_count; i++)
+               if (fb_helper->crtc_info[i].mode_set.crtc == crtc)
+                       return &fb_helper->crtc_info[i];
+
+       return NULL;
+}
+
+/*
+ * Try to read the BIOS display configuration and use it for the initial
+ * fb configuration.
+ *
+ * The BIOS or boot loader will generally create an initial display
+ * configuration for us that includes some set of active pipes and displays.
+ * This routine tries to figure out which pipes and connectors are active
+ * and stuffs them into the crtcs and modes array given to us by the
+ * drm_fb_helper code.
+ *
+ * The overall sequence is:
+ *   intel_fbdev_init - from driver load
+ *     intel_fbdev_init_bios - initialize the intel_fbdev using BIOS data
+ *     drm_fb_helper_init - build fb helper structs
+ *     drm_fb_helper_single_add_all_connectors - more fb helper structs
+ *   intel_fbdev_initial_config - apply the config
+ *     drm_fb_helper_initial_config - call ->probe then register_framebuffer()
+ *         drm_setup_crtcs - build crtc config for fbdev
+ *           intel_fb_initial_config - find active connectors etc
+ *         drm_fb_helper_single_fb_probe - set up fbdev
+ *           intelfb_create - re-use or alloc fb, build out fbdev structs
+ *
+ * Note that we don't make special consideration whether we could actually
+ * switch to the selected modes without a full modeset. E.g. when the display
+ * is in VGA mode we need to recalculate watermarks and set a new high-res
+ * framebuffer anyway.
+ */
+static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
+                                   struct drm_fb_helper_crtc **crtcs,
+                                   struct drm_display_mode **modes,
+                                   bool *enabled, int width, int height)
+{
+       struct drm_device *dev = fb_helper->dev;
+       int i, j;
+       bool *save_enabled;
+       bool fallback = true;
+       int num_connectors_enabled = 0;
+       int num_connectors_detected = 0;
+
+       /*
+        * If the user specified any force options, just bail here
+        * and use that config.
+        */
+       for (i = 0; i < fb_helper->connector_count; i++) {
+               struct drm_fb_helper_connector *fb_conn;
+               struct drm_connector *connector;
+
+               fb_conn = fb_helper->connector_info[i];
+               connector = fb_conn->connector;
+
+               if (!enabled[i])
+                       continue;
+
+               if (connector->force != DRM_FORCE_UNSPECIFIED)
+                       return false;
+       }
+
+       save_enabled = kcalloc(dev->mode_config.num_connector, sizeof(bool),
+                              GFP_KERNEL);
+       if (!save_enabled)
+               return false;
+
+       memcpy(save_enabled, enabled, dev->mode_config.num_connector);
+
+       for (i = 0; i < fb_helper->connector_count; i++) {
+               struct drm_fb_helper_connector *fb_conn;
+               struct drm_connector *connector;
+               struct drm_encoder *encoder;
+               struct drm_fb_helper_crtc *new_crtc;
+
+               fb_conn = fb_helper->connector_info[i];
+               connector = fb_conn->connector;
+
+               if (connector->status == connector_status_connected)
+                       num_connectors_detected++;
+
+               if (!enabled[i]) {
+                       DRM_DEBUG_KMS("connector %d not enabled, skipping\n",
+                                     connector->base.id);
+                       continue;
+               }
+
+               encoder = connector->encoder;
+               if (!encoder || WARN_ON(!encoder->crtc)) {
+                       DRM_DEBUG_KMS("connector %d has no encoder or crtc, skipping\n",
+                                     connector->base.id);
+                       enabled[i] = false;
+                       continue;
+               }
+
+               num_connectors_enabled++;
+
+               new_crtc = intel_fb_helper_crtc(fb_helper, encoder->crtc);
+
+               /*
+                * Make sure we're not trying to drive multiple connectors
+                * with a single CRTC, since our cloning support may not
+                * match the BIOS.
+                */
+               for (j = 0; j < fb_helper->connector_count; j++) {
+                       if (crtcs[j] == new_crtc) {
+                               DRM_DEBUG_KMS("fallback: cloned configuration\n");
+                               fallback = true;
+                               goto out;
+                       }
+               }
+
+               DRM_DEBUG_KMS("looking for cmdline mode on connector %d\n",
+                             fb_conn->connector->base.id);
+
+               /* go for command line mode first */
+               modes[i] = drm_pick_cmdline_mode(fb_conn, width, height);
+
+               /* try for preferred next */
+               if (!modes[i]) {
+                       DRM_DEBUG_KMS("looking for preferred mode on connector %d\n",
+                                     fb_conn->connector->base.id);
+                       modes[i] = drm_has_preferred_mode(fb_conn, width,
+                                                         height);
+               }
+
+               /* last resort: use current mode */
+               if (!modes[i]) {
+                       /*
+                        * IMPORTANT: We want to use the adjusted mode (i.e.
+                        * after the panel fitter upscaling) as the initial
+                        * config, not the input mode, which is what crtc->mode
+                        * usually contains. But since our current fastboot
+                        * code puts a mode derived from the post-pfit timings
+                        * into crtc->mode this works out correctly. We don't
+                        * use hwmode anywhere right now, so use it for this
+                        * since the fb helper layer wants a pointer to
+                        * something we own.
+                        */
+                       intel_mode_from_pipe_config(&encoder->crtc->hwmode,
+                                                   &to_intel_crtc(encoder->crtc)->config);
+                       modes[i] = &encoder->crtc->hwmode;
+               }
+               crtcs[i] = new_crtc;
+
+               DRM_DEBUG_KMS("connector %s on crtc %d: %s\n",
+                             drm_get_connector_name(connector),
+                             encoder->crtc->base.id,
+                             modes[i]->name);
+
+               fallback = false;
+       }
+
+       /*
+        * If the BIOS didn't enable everything it could, fall back to have the
+        * same user experiencing of lighting up as much as possible like the
+        * fbdev helper library.
+        */
+       if (num_connectors_enabled != num_connectors_detected &&
+           num_connectors_enabled < INTEL_INFO(dev)->num_pipes) {
+               DRM_DEBUG_KMS("fallback: Not all outputs enabled\n");
+               DRM_DEBUG_KMS("Enabled: %i, detected: %i\n", num_connectors_enabled,
+                             num_connectors_detected);
+               fallback = true;
+       }
+
+out:
+       if (fallback) {
+               DRM_DEBUG_KMS("Not using firmware configuration\n");
+               memcpy(enabled, save_enabled, dev->mode_config.num_connector);
+               kfree(save_enabled);
+               return false;
+       }
+
+       kfree(save_enabled);
+       return true;
+}
+
 static struct drm_fb_helper_funcs intel_fb_helper_funcs = {
+       .initial_config = intel_fb_initial_config,
        .gamma_set = intel_crtc_fb_gamma_set,
        .gamma_get = intel_crtc_fb_gamma_get,
        .fb_probe = intelfb_create,
@@ -258,8 +452,126 @@ static void intel_fbdev_destroy(struct drm_device *dev,
 
        drm_fb_helper_fini(&ifbdev->helper);
 
-       drm_framebuffer_unregister_private(&ifbdev->ifb.base);
-       intel_framebuffer_fini(&ifbdev->ifb);
+       drm_framebuffer_unregister_private(&ifbdev->fb->base);
+       drm_framebuffer_remove(&ifbdev->fb->base);
+}
+
+/*
+ * Build an intel_fbdev struct using a BIOS allocated framebuffer, if possible.
+ * The core display code will have read out the current plane configuration,
+ * so we use that to figure out if there's an object for us to use as the
+ * fb, and if so, we re-use it for the fbdev configuration.
+ *
+ * Note we only support a single fb shared across pipes for boot (mostly for
+ * fbcon), so we just find the biggest and use that.
+ */
+static bool intel_fbdev_init_bios(struct drm_device *dev,
+                                struct intel_fbdev *ifbdev)
+{
+       struct intel_framebuffer *fb = NULL;
+       struct drm_crtc *crtc;
+       struct intel_crtc *intel_crtc;
+       struct intel_plane_config *plane_config = NULL;
+       unsigned int max_size = 0;
+
+       if (!i915.fastboot)
+               return false;
+
+       /* Find the largest fb */
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+               intel_crtc = to_intel_crtc(crtc);
+
+               if (!intel_crtc->active || !crtc->fb) {
+                       DRM_DEBUG_KMS("pipe %c not active or no fb, skipping\n",
+                                     pipe_name(intel_crtc->pipe));
+                       continue;
+               }
+
+               if (intel_crtc->plane_config.size > max_size) {
+                       DRM_DEBUG_KMS("found possible fb from plane %c\n",
+                                     pipe_name(intel_crtc->pipe));
+                       plane_config = &intel_crtc->plane_config;
+                       fb = to_intel_framebuffer(crtc->fb);
+                       max_size = plane_config->size;
+               }
+       }
+
+       if (!fb) {
+               DRM_DEBUG_KMS("no active fbs found, not using BIOS config\n");
+               goto out;
+       }
+
+       /* Now make sure all the pipes will fit into it */
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+               unsigned int cur_size;
+
+               intel_crtc = to_intel_crtc(crtc);
+
+               if (!intel_crtc->active) {
+                       DRM_DEBUG_KMS("pipe %c not active, skipping\n",
+                                     pipe_name(intel_crtc->pipe));
+                       continue;
+               }
+
+               DRM_DEBUG_KMS("checking plane %c for BIOS fb\n",
+                             pipe_name(intel_crtc->pipe));
+
+               /*
+                * See if the plane fb we found above will fit on this
+                * pipe.  Note we need to use the selected fb's bpp rather
+                * than the current pipe's, since they could be different.
+                */
+               cur_size = intel_crtc->config.adjusted_mode.crtc_hdisplay *
+                       intel_crtc->config.adjusted_mode.crtc_vdisplay;
+               DRM_DEBUG_KMS("pipe %c area: %d\n", pipe_name(intel_crtc->pipe),
+                             cur_size);
+               cur_size *= fb->base.bits_per_pixel / 8;
+               DRM_DEBUG_KMS("total size %d (bpp %d)\n", cur_size,
+                             fb->base.bits_per_pixel / 8);
+
+               if (cur_size > max_size) {
+                       DRM_DEBUG_KMS("fb not big enough for plane %c (%d vs %d)\n",
+                                     pipe_name(intel_crtc->pipe),
+                                     cur_size, max_size);
+                       plane_config = NULL;
+                       fb = NULL;
+                       break;
+               }
+
+               DRM_DEBUG_KMS("fb big enough for plane %c (%d >= %d)\n",
+                             pipe_name(intel_crtc->pipe),
+                             max_size, cur_size);
+       }
+
+       if (!fb) {
+               DRM_DEBUG_KMS("BIOS fb not suitable for all pipes, not using\n");
+               goto out;
+       }
+
+       ifbdev->preferred_bpp = fb->base.bits_per_pixel;
+       ifbdev->fb = fb;
+
+       drm_framebuffer_reference(&ifbdev->fb->base);
+
+       /* Final pass to check if any active pipes don't have fbs */
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+               intel_crtc = to_intel_crtc(crtc);
+
+               if (!intel_crtc->active)
+                       continue;
+
+               WARN(!crtc->fb,
+                    "re-used BIOS config but lost an fb on crtc %d\n",
+                    crtc->base.id);
+       }
+
+
+       DRM_DEBUG_KMS("using BIOS fb for initial console\n");
+       return true;
+
+out:
+
+       return false;
 }
 
 int intel_fbdev_init(struct drm_device *dev)
@@ -268,21 +580,25 @@ int intel_fbdev_init(struct drm_device *dev)
        struct drm_i915_private *dev_priv = dev->dev_private;
        int ret;
 
-       ifbdev = kzalloc(sizeof(*ifbdev), GFP_KERNEL);
-       if (!ifbdev)
+       if (WARN_ON(INTEL_INFO(dev)->num_pipes == 0))
+               return -ENODEV;
+
+       ifbdev = kzalloc(sizeof(struct intel_fbdev), GFP_KERNEL);
+       if (ifbdev == NULL)
                return -ENOMEM;
 
-       dev_priv->fbdev = ifbdev;
        ifbdev->helper.funcs = &intel_fb_helper_funcs;
+       if (!intel_fbdev_init_bios(dev, ifbdev))
+               ifbdev->preferred_bpp = 32;
 
        ret = drm_fb_helper_init(dev, &ifbdev->helper,
-                                INTEL_INFO(dev)->num_pipes,
-                                4);
+                                INTEL_INFO(dev)->num_pipes, 4);
        if (ret) {
                kfree(ifbdev);
                return ret;
        }
 
+       dev_priv->fbdev = ifbdev;
        drm_fb_helper_single_add_all_connectors(&ifbdev->helper);
 
        return 0;
@@ -291,9 +607,10 @@ int intel_fbdev_init(struct drm_device *dev)
 void intel_fbdev_initial_config(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_fbdev *ifbdev = dev_priv->fbdev;
 
        /* Due to peculiar init order wrt to hpd handling this is separate. */
-       drm_fb_helper_initial_config(&dev_priv->fbdev->helper, 32);
+       drm_fb_helper_initial_config(&ifbdev->helper, ifbdev->preferred_bpp);
 }
 
 void intel_fbdev_fini(struct drm_device *dev)
@@ -322,7 +639,7 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state)
         * been restored from swap. If the object is stolen however, it will be
         * full of whatever garbage was left in there.
         */
-       if (state == FBINFO_STATE_RUNNING && ifbdev->ifb.obj->stolen)
+       if (state == FBINFO_STATE_RUNNING && ifbdev->fb->obj->stolen)
                memset_io(info->screen_base, 0, info->screen_size);
 
        fb_set_suspend(info, state);
@@ -331,7 +648,8 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state)
 void intel_fbdev_output_poll_changed(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       drm_fb_helper_hotplug_event(&dev_priv->fbdev->helper);
+       if (dev_priv->fbdev)
+               drm_fb_helper_hotplug_event(&dev_priv->fbdev->helper);
 }
 
 void intel_fbdev_restore_mode(struct drm_device *dev)
@@ -339,7 +657,7 @@ void intel_fbdev_restore_mode(struct drm_device *dev)
        int ret;
        struct drm_i915_private *dev_priv = dev->dev_private;
 
-       if (INTEL_INFO(dev)->num_pipes == 0)
+       if (!dev_priv->fbdev)
                return;
 
        drm_modeset_lock_all(dev);
index ee3181ebcc9236078835ee18a48fe86c97525ff9..d13c65d607b2d15d966f81723b2986c1eb395506 100644 (file)
@@ -113,7 +113,8 @@ static u32 hsw_infoframe_enable(enum hdmi_infoframe_type type)
 }
 
 static u32 hsw_infoframe_data_reg(enum hdmi_infoframe_type type,
-                                 enum transcoder cpu_transcoder)
+                                 enum transcoder cpu_transcoder,
+                                 struct drm_i915_private *dev_priv)
 {
        switch (type) {
        case HDMI_INFOFRAME_TYPE_AVI:
@@ -296,7 +297,8 @@ static void hsw_write_infoframe(struct drm_encoder *encoder,
        u32 val = I915_READ(ctl_reg);
 
        data_reg = hsw_infoframe_data_reg(type,
-                                         intel_crtc->config.cpu_transcoder);
+                                         intel_crtc->config.cpu_transcoder,
+                                         dev_priv);
        if (data_reg == 0)
                return;
 
@@ -423,7 +425,7 @@ static void g4x_set_infoframes(struct drm_encoder *encoder,
        struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
        u32 reg = VIDEO_DIP_CTL;
        u32 val = I915_READ(reg);
-       u32 port;
+       u32 port = VIDEO_DIP_PORT(intel_dig_port->port);
 
        assert_hdmi_port_disabled(intel_hdmi);
 
@@ -447,18 +449,6 @@ static void g4x_set_infoframes(struct drm_encoder *encoder,
                return;
        }
 
-       switch (intel_dig_port->port) {
-       case PORT_B:
-               port = VIDEO_DIP_PORT_B;
-               break;
-       case PORT_C:
-               port = VIDEO_DIP_PORT_C;
-               break;
-       default:
-               BUG();
-               return;
-       }
-
        if (port != (val & VIDEO_DIP_PORT_MASK)) {
                if (val & VIDEO_DIP_ENABLE) {
                        val &= ~VIDEO_DIP_ENABLE;
@@ -489,7 +479,7 @@ static void ibx_set_infoframes(struct drm_encoder *encoder,
        struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
        u32 reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
        u32 val = I915_READ(reg);
-       u32 port;
+       u32 port = VIDEO_DIP_PORT(intel_dig_port->port);
 
        assert_hdmi_port_disabled(intel_hdmi);
 
@@ -505,21 +495,6 @@ static void ibx_set_infoframes(struct drm_encoder *encoder,
                return;
        }
 
-       switch (intel_dig_port->port) {
-       case PORT_B:
-               port = VIDEO_DIP_PORT_B;
-               break;
-       case PORT_C:
-               port = VIDEO_DIP_PORT_C;
-               break;
-       case PORT_D:
-               port = VIDEO_DIP_PORT_D;
-               break;
-       default:
-               BUG();
-               return;
-       }
-
        if (port != (val & VIDEO_DIP_PORT_MASK)) {
                if (val & VIDEO_DIP_ENABLE) {
                        val &= ~VIDEO_DIP_ENABLE;
@@ -692,8 +667,13 @@ static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
        struct drm_device *dev = encoder->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+       enum intel_display_power_domain power_domain;
        u32 tmp;
 
+       power_domain = intel_display_port_power_domain(encoder);
+       if (!intel_display_power_enabled(dev_priv, power_domain))
+               return false;
+
        tmp = I915_READ(intel_hdmi->hdmi_reg);
 
        if (!(tmp & SDVO_ENABLE))
@@ -934,11 +914,15 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
        struct intel_encoder *intel_encoder = &intel_dig_port->base;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct edid *edid;
+       enum intel_display_power_domain power_domain;
        enum drm_connector_status status = connector_status_disconnected;
 
        DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
                      connector->base.id, drm_get_connector_name(connector));
 
+       power_domain = intel_display_port_power_domain(intel_encoder);
+       intel_display_power_get(dev_priv, power_domain);
+
        intel_hdmi->has_hdmi_sink = false;
        intel_hdmi->has_audio = false;
        intel_hdmi->rgb_quant_range_selectable = false;
@@ -966,31 +950,48 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
                intel_encoder->type = INTEL_OUTPUT_HDMI;
        }
 
+       intel_display_power_put(dev_priv, power_domain);
+
        return status;
 }
 
 static int intel_hdmi_get_modes(struct drm_connector *connector)
 {
-       struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
+       struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
+       struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
        struct drm_i915_private *dev_priv = connector->dev->dev_private;
+       enum intel_display_power_domain power_domain;
+       int ret;
 
        /* We should parse the EDID data and find out if it's an HDMI sink so
         * we can send audio to it.
         */
 
-       return intel_ddc_get_modes(connector,
+       power_domain = intel_display_port_power_domain(intel_encoder);
+       intel_display_power_get(dev_priv, power_domain);
+
+       ret = intel_ddc_get_modes(connector,
                                   intel_gmbus_get_adapter(dev_priv,
                                                           intel_hdmi->ddc_bus));
+
+       intel_display_power_put(dev_priv, power_domain);
+
+       return ret;
 }
 
 static bool
 intel_hdmi_detect_audio(struct drm_connector *connector)
 {
-       struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
+       struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
+       struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
        struct drm_i915_private *dev_priv = connector->dev->dev_private;
+       enum intel_display_power_domain power_domain;
        struct edid *edid;
        bool has_audio = false;
 
+       power_domain = intel_display_port_power_domain(intel_encoder);
+       intel_display_power_get(dev_priv, power_domain);
+
        edid = drm_get_edid(connector,
                            intel_gmbus_get_adapter(dev_priv,
                                                    intel_hdmi->ddc_bus));
@@ -1000,6 +1001,8 @@ intel_hdmi_detect_audio(struct drm_connector *connector)
                kfree(edid);
        }
 
+       intel_display_power_put(dev_priv, power_domain);
+
        return has_audio;
 }
 
@@ -1261,6 +1264,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
                intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
        else
                intel_connector->get_hw_state = intel_connector_get_hw_state;
+       intel_connector->unregister = intel_connector_unregister;
 
        intel_hdmi_add_properties(intel_hdmi, connector);
 
@@ -1314,7 +1318,7 @@ void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port)
 
        intel_encoder->type = INTEL_OUTPUT_HDMI;
        intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
-       intel_encoder->cloneable = false;
+       intel_encoder->cloneable = 0;
 
        intel_dig_port->port = port;
        intel_dig_port->hdmi.hdmi_reg = hdmi_reg;
index 8bcb93a2a9f6b1d09780c3154f6c656895451e8e..ef5e5661efb21f3ade3da94e52ac47f1c86af5fc 100644 (file)
@@ -848,8 +848,8 @@ static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
        struct drm_i915_private *dev_priv = dev->dev_private;
 
        /* use the module option value if specified */
-       if (i915_lvds_channel_mode > 0)
-               return i915_lvds_channel_mode == 2;
+       if (i915.lvds_channel_mode > 0)
+               return i915.lvds_channel_mode == 2;
 
        if (dmi_check_system(intel_dual_link_lvds))
                return true;
@@ -899,6 +899,7 @@ void intel_lvds_init(struct drm_device *dev)
        struct drm_encoder *encoder;
        struct drm_display_mode *scan; /* *modes, *bios_mode; */
        struct drm_display_mode *fixed_mode = NULL;
+       struct drm_display_mode *downclock_mode = NULL;
        struct edid *edid;
        struct drm_crtc *crtc;
        u32 lvds;
@@ -957,11 +958,12 @@ void intel_lvds_init(struct drm_device *dev)
        intel_encoder->get_hw_state = intel_lvds_get_hw_state;
        intel_encoder->get_config = intel_lvds_get_config;
        intel_connector->get_hw_state = intel_connector_get_hw_state;
+       intel_connector->unregister = intel_connector_unregister;
 
        intel_connector_attach_encoder(intel_connector, intel_encoder);
        intel_encoder->type = INTEL_OUTPUT_LVDS;
 
-       intel_encoder->cloneable = false;
+       intel_encoder->cloneable = 0;
        if (HAS_PCH_SPLIT(dev))
                intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
        else if (IS_GEN4(dev))
@@ -1032,15 +1034,14 @@ void intel_lvds_init(struct drm_device *dev)
 
                        fixed_mode = drm_mode_duplicate(dev, scan);
                        if (fixed_mode) {
-                               intel_connector->panel.downclock_mode =
+                               downclock_mode =
                                        intel_find_panel_downclock(dev,
                                        fixed_mode, connector);
-                               if (intel_connector->panel.downclock_mode !=
-                                       NULL && i915_lvds_downclock) {
+                               if (downclock_mode != NULL &&
+                                       i915.lvds_downclock) {
                                        /* We found the downclock for LVDS. */
                                        dev_priv->lvds_downclock_avail = true;
                                        dev_priv->lvds_downclock =
-                                               intel_connector->panel.
                                                downclock_mode->clock;
                                        DRM_DEBUG_KMS("LVDS downclock is found"
                                        " in EDID. Normal clock %dKhz, "
@@ -1116,7 +1117,7 @@ out:
        }
        drm_sysfs_connector_add(connector);
 
-       intel_panel_init(&intel_connector->panel, fixed_mode);
+       intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
        intel_panel_setup_backlight(connector);
 
        return;
@@ -1125,8 +1126,6 @@ failed:
        DRM_DEBUG_KMS("No LVDS modes found, disabling.\n");
        drm_connector_cleanup(connector);
        drm_encoder_cleanup(encoder);
-       if (fixed_mode)
-               drm_mode_destroy(dev, fixed_mode);
        kfree(lvds_encoder);
        kfree(lvds_connector);
        return;
index a759ecdb7a6ebaddee622ffd5a1d7900fcf5aee3..312961a8472e304313d4714684a7cb7cf9f3a6a0 100644 (file)
@@ -293,7 +293,7 @@ static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay)
 {
        struct drm_i915_gem_object *obj = overlay->old_vid_bo;
 
-       i915_gem_object_unpin(obj);
+       i915_gem_object_ggtt_unpin(obj);
        drm_gem_object_unreference(&obj->base);
 
        overlay->old_vid_bo = NULL;
@@ -306,7 +306,7 @@ static void intel_overlay_off_tail(struct intel_overlay *overlay)
        /* never have the overlay hw on without showing a frame */
        BUG_ON(!overlay->vid_bo);
 
-       i915_gem_object_unpin(obj);
+       i915_gem_object_ggtt_unpin(obj);
        drm_gem_object_unreference(&obj->base);
        overlay->vid_bo = NULL;
 
@@ -782,7 +782,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
        return 0;
 
 out_unpin:
-       i915_gem_object_unpin(new_bo);
+       i915_gem_object_ggtt_unpin(new_bo);
        return ret;
 }
 
@@ -1076,7 +1076,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
        mutex_lock(&dev->struct_mutex);
 
        if (new_bo->tiling_mode) {
-               DRM_ERROR("buffer used for overlay image can not be tiled\n");
+               DRM_DEBUG_KMS("buffer used for overlay image can not be tiled\n");
                ret = -EINVAL;
                goto out_unlock;
        }
@@ -1349,7 +1349,7 @@ void intel_setup_overlay(struct drm_device *dev)
                }
                overlay->flip_addr = reg_bo->phys_obj->handle->busaddr;
        } else {
-               ret = i915_gem_obj_ggtt_pin(reg_bo, PAGE_SIZE, true, false);
+               ret = i915_gem_obj_ggtt_pin(reg_bo, PAGE_SIZE, PIN_MAPPABLE);
                if (ret) {
                        DRM_ERROR("failed to pin overlay register bo\n");
                        goto out_free_bo;
@@ -1386,7 +1386,7 @@ void intel_setup_overlay(struct drm_device *dev)
 
 out_unpin_bo:
        if (!OVERLAY_NEEDS_PHYSICAL(dev))
-               i915_gem_object_unpin(reg_bo);
+               i915_gem_object_ggtt_unpin(reg_bo);
 out_free_bo:
        drm_gem_object_unreference(&reg_bo->base);
 out_free:
index 079ea38f14d9b4b54092a1ebeca4cd77dac27cec..cb058408c70ea6f8aee53c332a32515edd37c9f7 100644 (file)
@@ -33,8 +33,6 @@
 #include <linux/moduleparam.h>
 #include "intel_drv.h"
 
-#define PCI_LBPC 0xf4 /* legacy/combination backlight modes */
-
 void
 intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
                       struct drm_display_mode *adjusted_mode)
@@ -325,13 +323,6 @@ out:
        pipe_config->gmch_pfit.lvds_border_bits = border;
 }
 
-static int i915_panel_invert_brightness;
-MODULE_PARM_DESC(invert_brightness, "Invert backlight brightness "
-       "(-1 force normal, 0 machine defaults, 1 force inversion), please "
-       "report PCI device ID, subsystem vendor and subsystem device ID "
-       "to dri-devel@lists.freedesktop.org, if your machine needs it. "
-       "It will then be included in an upcoming module version.");
-module_param_named(invert_brightness, i915_panel_invert_brightness, int, 0600);
 static u32 intel_panel_compute_brightness(struct intel_connector *connector,
                                          u32 val)
 {
@@ -341,10 +332,10 @@ static u32 intel_panel_compute_brightness(struct intel_connector *connector,
 
        WARN_ON(panel->backlight.max == 0);
 
-       if (i915_panel_invert_brightness < 0)
+       if (i915.invert_brightness < 0)
                return val;
 
-       if (i915_panel_invert_brightness > 0 ||
+       if (i915.invert_brightness > 0 ||
            dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS) {
                return panel->backlight.max - val;
        }
@@ -810,13 +801,13 @@ intel_panel_detect(struct drm_device *dev)
        struct drm_i915_private *dev_priv = dev->dev_private;
 
        /* Assume that the BIOS does not lie through the OpRegion... */
-       if (!i915_panel_ignore_lid && dev_priv->opregion.lid_state) {
+       if (!i915.panel_ignore_lid && dev_priv->opregion.lid_state) {
                return ioread32(dev_priv->opregion.lid_state) & 0x1 ?
                        connector_status_connected :
                        connector_status_disconnected;
        }
 
-       switch (i915_panel_ignore_lid) {
+       switch (i915.panel_ignore_lid) {
        case -2:
                return connector_status_connected;
        case -1:
@@ -1199,9 +1190,11 @@ void intel_panel_init_backlight_funcs(struct drm_device *dev)
 }
 
 int intel_panel_init(struct intel_panel *panel,
-                    struct drm_display_mode *fixed_mode)
+                    struct drm_display_mode *fixed_mode,
+                    struct drm_display_mode *downclock_mode)
 {
        panel->fixed_mode = fixed_mode;
+       panel->downclock_mode = downclock_mode;
 
        return 0;
 }
index e1fc35a726564cc9f3526231780672d09d151838..ad58ce3b7675f0f8675aec257743d09d2346d93c 100644 (file)
@@ -97,7 +97,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc)
        struct drm_i915_gem_object *obj = intel_fb->obj;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        int cfb_pitch;
-       int plane, i;
+       int i;
        u32 fbc_ctl;
 
        cfb_pitch = dev_priv->fbc.size / FBC_LL_SIZE;
@@ -109,7 +109,6 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc)
                cfb_pitch = (cfb_pitch / 32) - 1;
        else
                cfb_pitch = (cfb_pitch / 64) - 1;
-       plane = intel_crtc->plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
 
        /* Clear old tags */
        for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
@@ -120,7 +119,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc)
 
                /* Set it up... */
                fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
-               fbc_ctl2 |= plane;
+               fbc_ctl2 |= FBC_CTL_PLANE(intel_crtc->plane);
                I915_WRITE(FBC_CONTROL2, fbc_ctl2);
                I915_WRITE(FBC_FENCE_OFF, crtc->y);
        }
@@ -135,7 +134,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc)
        fbc_ctl |= obj->fence_reg;
        I915_WRITE(FBC_CONTROL, fbc_ctl);
 
-       DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %c",
+       DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %c\n",
                      cfb_pitch, crtc->y, plane_name(intel_crtc->plane));
 }
 
@@ -154,17 +153,19 @@ static void g4x_enable_fbc(struct drm_crtc *crtc)
        struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
        struct drm_i915_gem_object *obj = intel_fb->obj;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
        u32 dpfc_ctl;
 
-       dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
+       dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane) | DPFC_SR_EN;
+       if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
+               dpfc_ctl |= DPFC_CTL_LIMIT_2X;
+       else
+               dpfc_ctl |= DPFC_CTL_LIMIT_1X;
        dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
-       I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
 
        I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
 
        /* enable it... */
-       I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN);
+       I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
 
        DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
 }
@@ -224,18 +225,16 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc)
        struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
        struct drm_i915_gem_object *obj = intel_fb->obj;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
        u32 dpfc_ctl;
 
-       dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
-       dpfc_ctl &= DPFC_RESERVED;
-       dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
-       /* Set persistent mode for front-buffer rendering, ala X. */
-       dpfc_ctl |= DPFC_CTL_PERSISTENT_MODE;
+       dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane);
+       if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
+               dpfc_ctl |= DPFC_CTL_LIMIT_2X;
+       else
+               dpfc_ctl |= DPFC_CTL_LIMIT_1X;
        dpfc_ctl |= DPFC_CTL_FENCE_EN;
        if (IS_GEN5(dev))
                dpfc_ctl |= obj->fence_reg;
-       I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
 
        I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
        I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID);
@@ -282,20 +281,27 @@ static void gen7_enable_fbc(struct drm_crtc *crtc)
        struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
        struct drm_i915_gem_object *obj = intel_fb->obj;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       u32 dpfc_ctl;
 
-       I915_WRITE(IVB_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj));
+       dpfc_ctl = IVB_DPFC_CTL_PLANE(intel_crtc->plane);
+       if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
+               dpfc_ctl |= DPFC_CTL_LIMIT_2X;
+       else
+               dpfc_ctl |= DPFC_CTL_LIMIT_1X;
+       dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
 
-       I915_WRITE(ILK_DPFC_CONTROL, DPFC_CTL_EN | DPFC_CTL_LIMIT_1X |
-                  IVB_DPFC_CTL_FENCE_EN |
-                  intel_crtc->plane << IVB_DPFC_CTL_PLANE_SHIFT);
+       I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
 
        if (IS_IVYBRIDGE(dev)) {
                /* WaFbcAsynchFlipDisableFbcQueue:ivb */
-               I915_WRITE(ILK_DISPLAY_CHICKEN1, ILK_FBCQ_DIS);
+               I915_WRITE(ILK_DISPLAY_CHICKEN1,
+                          I915_READ(ILK_DISPLAY_CHICKEN1) |
+                          ILK_FBCQ_DIS);
        } else {
-               /* WaFbcAsynchFlipDisableFbcQueue:hsw */
-               I915_WRITE(HSW_PIPE_SLICE_CHICKEN_1(intel_crtc->pipe),
-                          HSW_BYPASS_FBC_QUEUE);
+               /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
+               I915_WRITE(CHICKEN_PIPESL_1(intel_crtc->pipe),
+                          I915_READ(CHICKEN_PIPESL_1(intel_crtc->pipe)) |
+                          HSW_FBCQ_DIS);
        }
 
        I915_WRITE(SNB_DPFC_CTL_SA,
@@ -466,7 +472,7 @@ void intel_update_fbc(struct drm_device *dev)
                return;
        }
 
-       if (!i915_powersave) {
+       if (!i915.powersave) {
                if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
                        DRM_DEBUG_KMS("fbc disabled per module param\n");
                return;
@@ -505,13 +511,13 @@ void intel_update_fbc(struct drm_device *dev)
        obj = intel_fb->obj;
        adjusted_mode = &intel_crtc->config.adjusted_mode;
 
-       if (i915_enable_fbc < 0 &&
+       if (i915.enable_fbc < 0 &&
            INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) {
                if (set_no_fbc_reason(dev_priv, FBC_CHIP_DEFAULT))
                        DRM_DEBUG_KMS("disabled per chip default\n");
                goto out_disable;
        }
-       if (!i915_enable_fbc) {
+       if (!i915.enable_fbc) {
                if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
                        DRM_DEBUG_KMS("fbc disabled per module param\n");
                goto out_disable;
@@ -537,7 +543,7 @@ void intel_update_fbc(struct drm_device *dev)
                        DRM_DEBUG_KMS("mode too large for compression, disabling\n");
                goto out_disable;
        }
-       if ((INTEL_INFO(dev)->gen < 4 || IS_HASWELL(dev)) &&
+       if ((INTEL_INFO(dev)->gen < 4 || HAS_DDI(dev)) &&
            intel_crtc->plane != PLANE_A) {
                if (set_no_fbc_reason(dev_priv, FBC_BAD_PLANE))
                        DRM_DEBUG_KMS("plane not A, disabling compression\n");
@@ -1128,7 +1134,7 @@ static bool g4x_compute_wm0(struct drm_device *dev,
                *plane_wm = display->max_wm;
 
        /* Use the large buffer method to calculate cursor watermark */
-       line_time_us = ((htotal * 1000) / clock);
+       line_time_us = max(htotal * 1000 / clock, 1);
        line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
        entries = line_count * 64 * pixel_size;
        tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
@@ -1204,7 +1210,7 @@ static bool g4x_compute_srwm(struct drm_device *dev,
        hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
        pixel_size = crtc->fb->bits_per_pixel / 8;
 
-       line_time_us = (htotal * 1000) / clock;
+       line_time_us = max(htotal * 1000 / clock, 1);
        line_count = (latency_ns / line_time_us + 1000) / 1000;
        line_size = hdisplay * pixel_size;
 
@@ -1437,7 +1443,7 @@ static void i965_update_wm(struct drm_crtc *unused_crtc)
                unsigned long line_time_us;
                int entries;
 
-               line_time_us = ((htotal * 1000) / clock);
+               line_time_us = max(htotal * 1000 / clock, 1);
 
                /* Use ns/us then divide to preserve precision */
                entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
@@ -1563,7 +1569,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
                unsigned long line_time_us;
                int entries;
 
-               line_time_us = (htotal * 1000) / clock;
+               line_time_us = max(htotal * 1000 / clock, 1);
 
                /* Use ns/us then divide to preserve precision */
                entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
@@ -1886,7 +1892,7 @@ static unsigned int ilk_cursor_wm_max(const struct drm_device *dev,
 }
 
 /* Calculate the maximum FBC watermark */
-static unsigned int ilk_fbc_wm_max(struct drm_device *dev)
+static unsigned int ilk_fbc_wm_max(const struct drm_device *dev)
 {
        /* max that registers can hold */
        if (INTEL_INFO(dev)->gen >= 8)
@@ -1895,7 +1901,7 @@ static unsigned int ilk_fbc_wm_max(struct drm_device *dev)
                return 15;
 }
 
-static void ilk_compute_wm_maximums(struct drm_device *dev,
+static void ilk_compute_wm_maximums(const struct drm_device *dev,
                                    int level,
                                    const struct intel_wm_config *config,
                                    enum intel_ddb_partitioning ddb_partitioning,
@@ -1948,7 +1954,7 @@ static bool ilk_validate_wm_level(int level,
        return ret;
 }
 
-static void ilk_compute_wm_level(struct drm_i915_private *dev_priv,
+static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
                                 int level,
                                 const struct ilk_pipe_wm_parameters *p,
                                 struct intel_wm_level *result)
@@ -2140,7 +2146,7 @@ static bool intel_compute_pipe_wm(struct drm_crtc *crtc,
                                  struct intel_pipe_wm *pipe_wm)
 {
        struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       const struct drm_i915_private *dev_priv = dev->dev_private;
        int level, max_level = ilk_wm_max_level(dev);
        /* LP0 watermark maximums depend on this pipe alone */
        struct intel_wm_config config = {
@@ -2738,7 +2744,7 @@ intel_alloc_context_page(struct drm_device *dev)
                return NULL;
        }
 
-       ret = i915_gem_obj_ggtt_pin(ctx, 4096, true, false);
+       ret = i915_gem_obj_ggtt_pin(ctx, 4096, 0);
        if (ret) {
                DRM_ERROR("failed to pin power context: %d\n", ret);
                goto err_unref;
@@ -2753,7 +2759,7 @@ intel_alloc_context_page(struct drm_device *dev)
        return ctx;
 
 err_unpin:
-       i915_gem_object_unpin(ctx);
+       i915_gem_object_ggtt_unpin(ctx);
 err_unref:
        drm_gem_object_unreference(&ctx->base);
        return NULL;
@@ -3000,6 +3006,9 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
        dev_priv->rps.last_adj = 0;
 }
 
+/* gen6_set_rps is called to update the frequency request, but should also be
+ * called when the range (min_delay and max_delay) is modified so that we can
+ * update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */
 void gen6_set_rps(struct drm_device *dev, u8 val)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3008,8 +3017,14 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
        WARN_ON(val > dev_priv->rps.max_delay);
        WARN_ON(val < dev_priv->rps.min_delay);
 
-       if (val == dev_priv->rps.cur_delay)
+       if (val == dev_priv->rps.cur_delay) {
+               /* min/max delay may still have been modified so be sure to
+                * write the limits value */
+               I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
+                          gen6_rps_limits(dev_priv, val));
+
                return;
+       }
 
        gen6_set_rps_thresholds(dev_priv, val);
 
@@ -3035,6 +3050,58 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
        trace_intel_gpu_freq_change(val * 50);
 }
 
+/* vlv_set_rps_idle: Set the frequency to Rpn if Gfx clocks are down
+ *
+ * * If Gfx is Idle, then
+ * 1. Mask Turbo interrupts
+ * 2. Bring up Gfx clock
+ * 3. Change the freq to Rpn and wait till P-Unit updates freq
+ * 4. Clear the Force GFX CLK ON bit so that Gfx can down
+ * 5. Unmask Turbo interrupts
+*/
+static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
+{
+       /*
+        * When we are idle.  Drop to min voltage state.
+        */
+
+       if (dev_priv->rps.cur_delay <= dev_priv->rps.min_delay)
+               return;
+
+       /* Mask turbo interrupt so that they will not come in between */
+       I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
+
+       /* Bring up the Gfx clock */
+       I915_WRITE(VLV_GTLC_SURVIVABILITY_REG,
+               I915_READ(VLV_GTLC_SURVIVABILITY_REG) |
+                               VLV_GFX_CLK_FORCE_ON_BIT);
+
+       if (wait_for(((VLV_GFX_CLK_STATUS_BIT &
+               I915_READ(VLV_GTLC_SURVIVABILITY_REG)) != 0), 5)) {
+                       DRM_ERROR("GFX_CLK_ON request timed out\n");
+               return;
+       }
+
+       dev_priv->rps.cur_delay = dev_priv->rps.min_delay;
+
+       vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ,
+                                       dev_priv->rps.min_delay);
+
+       if (wait_for(((vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS))
+                               & GENFREQSTATUS) == 0, 5))
+               DRM_ERROR("timed out waiting for Punit\n");
+
+       /* Release the Gfx clock */
+       I915_WRITE(VLV_GTLC_SURVIVABILITY_REG,
+               I915_READ(VLV_GTLC_SURVIVABILITY_REG) &
+                               ~VLV_GFX_CLK_FORCE_ON_BIT);
+
+       /* Unmask Up interrupts */
+       dev_priv->rps.rp_up_masked = true;
+       gen6_set_pm_mask(dev_priv, GEN6_PM_RP_DOWN_THRESHOLD,
+                                               dev_priv->rps.min_delay);
+}
+
 void gen6_rps_idle(struct drm_i915_private *dev_priv)
 {
        struct drm_device *dev = dev_priv->dev;
@@ -3042,7 +3109,7 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv)
        mutex_lock(&dev_priv->rps.hw_lock);
        if (dev_priv->rps.enabled) {
                if (IS_VALLEYVIEW(dev))
-                       valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_delay);
+                       vlv_set_rps_idle(dev_priv);
                else
                        gen6_set_rps(dev_priv->dev, dev_priv->rps.min_delay);
                dev_priv->rps.last_adj = 0;
@@ -3132,16 +3199,10 @@ static void valleyview_disable_rps(struct drm_device *dev)
 
 static void intel_print_rc6_info(struct drm_device *dev, u32 mode)
 {
-       if (IS_GEN6(dev))
-               DRM_DEBUG_DRIVER("Sandybridge: deep RC6 disabled\n");
-
-       if (IS_HASWELL(dev))
-               DRM_DEBUG_DRIVER("Haswell: only RC6 available\n");
-
        DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
-                       (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off",
-                       (mode & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off",
-                       (mode & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off");
+                (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off",
+                (mode & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off",
+                (mode & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off");
 }
 
 int intel_enable_rc6(const struct drm_device *dev)
@@ -3151,21 +3212,17 @@ int intel_enable_rc6(const struct drm_device *dev)
                return 0;
 
        /* Respect the kernel parameter if it is set */
-       if (i915_enable_rc6 >= 0)
-               return i915_enable_rc6;
+       if (i915.enable_rc6 >= 0)
+               return i915.enable_rc6;
 
        /* Disable RC6 on Ironlake */
        if (INTEL_INFO(dev)->gen == 5)
                return 0;
 
-       if (IS_HASWELL(dev))
-               return INTEL_RC6_ENABLE;
-
-       /* snb/ivb have more than one rc6 state. */
-       if (INTEL_INFO(dev)->gen == 6)
-               return INTEL_RC6_ENABLE;
+       if (IS_IVYBRIDGE(dev))
+               return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
 
-       return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
+       return INTEL_RC6_ENABLE;
 }
 
 static void gen6_enable_rps_interrupts(struct drm_device *dev)
@@ -3222,10 +3279,10 @@ static void gen8_enable_rps(struct drm_device *dev)
        /* 3: Enable RC6 */
        if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
                rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
-       DRM_INFO("RC6 %s\n", (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off");
+       intel_print_rc6_info(dev, rc6_mask);
        I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
-                       GEN6_RC_CTL_EI_MODE(1) |
-                       rc6_mask);
+                                   GEN6_RC_CTL_EI_MODE(1) |
+                                   rc6_mask);
 
        /* 4 Program defaults and thresholds for RPS*/
        I915_WRITE(GEN6_RPNSWREQ, HSW_FREQUENCY(10)); /* Request 500 MHz */
@@ -3267,7 +3324,7 @@ static void gen6_enable_rps(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_ring_buffer *ring;
-       u32 rp_state_cap;
+       u32 rp_state_cap, hw_max, hw_min;
        u32 gt_perf_status;
        u32 rc6vids, pcu_mbox, rc6_mask = 0;
        u32 gtfifodbg;
@@ -3296,13 +3353,20 @@ static void gen6_enable_rps(struct drm_device *dev)
        gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
 
        /* In units of 50MHz */
-       dev_priv->rps.hw_max = dev_priv->rps.max_delay = rp_state_cap & 0xff;
-       dev_priv->rps.min_delay = (rp_state_cap >> 16) & 0xff;
+       dev_priv->rps.hw_max = hw_max = rp_state_cap & 0xff;
+       hw_min = (rp_state_cap >> 16) & 0xff;
        dev_priv->rps.rp1_delay = (rp_state_cap >>  8) & 0xff;
        dev_priv->rps.rp0_delay = (rp_state_cap >>  0) & 0xff;
        dev_priv->rps.rpe_delay = dev_priv->rps.rp1_delay;
        dev_priv->rps.cur_delay = 0;
 
+       /* Preserve min/max settings in case of re-init */
+       if (dev_priv->rps.max_delay == 0)
+               dev_priv->rps.max_delay = hw_max;
+
+       if (dev_priv->rps.min_delay == 0)
+               dev_priv->rps.min_delay = hw_min;
+
        /* disable the counters and set deterministic thresholds */
        I915_WRITE(GEN6_RC_CONTROL, 0);
 
@@ -3533,7 +3597,7 @@ static void valleyview_enable_rps(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_ring_buffer *ring;
-       u32 gtfifodbg, val, rc6_mode = 0;
+       u32 gtfifodbg, val, hw_max, hw_min, rc6_mode = 0;
        int i;
 
        WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
@@ -3593,21 +3657,27 @@ static void valleyview_enable_rps(struct drm_device *dev)
                         vlv_gpu_freq(dev_priv, dev_priv->rps.cur_delay),
                         dev_priv->rps.cur_delay);
 
-       dev_priv->rps.max_delay = valleyview_rps_max_freq(dev_priv);
-       dev_priv->rps.hw_max = dev_priv->rps.max_delay;
+       dev_priv->rps.hw_max = hw_max = valleyview_rps_max_freq(dev_priv);
        DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
-                        vlv_gpu_freq(dev_priv, dev_priv->rps.max_delay),
-                        dev_priv->rps.max_delay);
+                        vlv_gpu_freq(dev_priv, hw_max),
+                        hw_max);
 
        dev_priv->rps.rpe_delay = valleyview_rps_rpe_freq(dev_priv);
        DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
                         vlv_gpu_freq(dev_priv, dev_priv->rps.rpe_delay),
                         dev_priv->rps.rpe_delay);
 
-       dev_priv->rps.min_delay = valleyview_rps_min_freq(dev_priv);
+       hw_min = valleyview_rps_min_freq(dev_priv);
        DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
-                        vlv_gpu_freq(dev_priv, dev_priv->rps.min_delay),
-                        dev_priv->rps.min_delay);
+                        vlv_gpu_freq(dev_priv, hw_min),
+                        hw_min);
+
+       /* Preserve min/max settings in case of re-init */
+       if (dev_priv->rps.max_delay == 0)
+               dev_priv->rps.max_delay = hw_max;
+
+       if (dev_priv->rps.min_delay == 0)
+               dev_priv->rps.min_delay = hw_min;
 
        DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
                         vlv_gpu_freq(dev_priv, dev_priv->rps.rpe_delay),
@@ -3615,6 +3685,9 @@ static void valleyview_enable_rps(struct drm_device *dev)
 
        valleyview_set_rps(dev_priv->dev, dev_priv->rps.rpe_delay);
 
+       dev_priv->rps.rp_up_masked = false;
+       dev_priv->rps.rp_down_masked = false;
+
        gen6_enable_rps_interrupts(dev);
 
        gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
@@ -3625,13 +3698,13 @@ void ironlake_teardown_rc6(struct drm_device *dev)
        struct drm_i915_private *dev_priv = dev->dev_private;
 
        if (dev_priv->ips.renderctx) {
-               i915_gem_object_unpin(dev_priv->ips.renderctx);
+               i915_gem_object_ggtt_unpin(dev_priv->ips.renderctx);
                drm_gem_object_unreference(&dev_priv->ips.renderctx->base);
                dev_priv->ips.renderctx = NULL;
        }
 
        if (dev_priv->ips.pwrctx) {
-               i915_gem_object_unpin(dev_priv->ips.pwrctx);
+               i915_gem_object_ggtt_unpin(dev_priv->ips.pwrctx);
                drm_gem_object_unreference(&dev_priv->ips.pwrctx->base);
                dev_priv->ips.pwrctx = NULL;
        }
@@ -3823,9 +3896,10 @@ static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv)
 
 unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
 {
+       struct drm_device *dev = dev_priv->dev;
        unsigned long val;
 
-       if (dev_priv->info->gen != 5)
+       if (INTEL_INFO(dev)->gen != 5)
                return 0;
 
        spin_lock_irq(&mchdev_lock);
@@ -3854,6 +3928,7 @@ unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
 
 static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
 {
+       struct drm_device *dev = dev_priv->dev;
        static const struct v_table {
                u16 vd; /* in .1 mil */
                u16 vm; /* in .1 mil */
@@ -3987,7 +4062,7 @@ static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
                { 16000, 14875, },
                { 16125, 15000, },
        };
-       if (dev_priv->info->is_mobile)
+       if (INTEL_INFO(dev)->is_mobile)
                return v_table[pxvid].vm;
        else
                return v_table[pxvid].vd;
@@ -4030,7 +4105,9 @@ static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
 
 void i915_update_gfx_val(struct drm_i915_private *dev_priv)
 {
-       if (dev_priv->info->gen != 5)
+       struct drm_device *dev = dev_priv->dev;
+
+       if (INTEL_INFO(dev)->gen != 5)
                return;
 
        spin_lock_irq(&mchdev_lock);
@@ -4079,9 +4156,10 @@ static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
 
 unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
 {
+       struct drm_device *dev = dev_priv->dev;
        unsigned long val;
 
-       if (dev_priv->info->gen != 5)
+       if (INTEL_INFO(dev)->gen != 5)
                return 0;
 
        spin_lock_irq(&mchdev_lock);
@@ -4270,6 +4348,7 @@ void intel_gpu_ips_teardown(void)
        i915_mch_dev = NULL;
        spin_unlock_irq(&mchdev_lock);
 }
+
 static void intel_init_emon(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4587,6 +4666,17 @@ static void gen6_init_clock_gating(struct drm_device *dev)
                I915_WRITE(GEN6_GT_MODE,
                           _MASKED_BIT_ENABLE(GEN6_TD_FOUR_ROW_DISPATCH_DISABLE));
 
+       /*
+        * BSpec recoomends 8x4 when MSAA is used,
+        * however in practice 16x4 seems fastest.
+        *
+        * Note that PS/WM thread counts depend on the WIZ hashing
+        * disable bit, which we don't touch here, but it's good
+        * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
+        */
+       I915_WRITE(GEN6_GT_MODE,
+                  GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
+
        ilk_init_lp_watermarks(dev);
 
        I915_WRITE(CACHE_MODE_0,
@@ -4607,17 +4697,24 @@ static void gen6_init_clock_gating(struct drm_device *dev)
         * According to the spec, bit 11 (RCCUNIT) must also be set,
         * but we didn't debug actual testcases to find it out.
         *
-        * Also apply WaDisableVDSUnitClockGating:snb and
-        * WaDisableRCPBUnitClockGating:snb.
+        * WaDisableRCCUnitClockGating:snb
+        * WaDisableRCPBUnitClockGating:snb
         */
        I915_WRITE(GEN6_UCGCTL2,
-                  GEN7_VDSUNIT_CLOCK_GATE_DISABLE |
                   GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
                   GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
 
-       /* Bspec says we need to always set all mask bits. */
-       I915_WRITE(_3D_CHICKEN3, (0xFFFF << 16) |
-                  _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL);
+       /* WaStripsFansDisableFastClipPerformanceFix:snb */
+       I915_WRITE(_3D_CHICKEN3,
+                  _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL));
+
+       /*
+        * Bspec says:
+        * "This bit must be set if 3DSTATE_CLIP clip mode is set to normal and
+        * 3DSTATE_SF number of SF output attributes is more than 16."
+        */
+       I915_WRITE(_3D_CHICKEN3,
+                  _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH));
 
        /*
         * According to the spec the following bits should be
@@ -4643,11 +4740,6 @@ static void gen6_init_clock_gating(struct drm_device *dev)
 
        g4x_disable_trickle_feed(dev);
 
-       /* The default value should be 0x200 according to docs, but the two
-        * platforms I checked have a 0 for this. (Maybe BIOS overrides?) */
-       I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_DISABLE(0xffff));
-       I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_ENABLE(GEN6_GT_MODE_HI));
-
        cpt_init_clock_gating(dev);
 
        gen6_check_mch_setup(dev);
@@ -4657,14 +4749,17 @@ static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
 {
        uint32_t reg = I915_READ(GEN7_FF_THREAD_MODE);
 
+       /*
+        * WaVSThreadDispatchOverride:ivb,vlv
+        *
+        * This actually overrides the dispatch
+        * mode for all thread types.
+        */
        reg &= ~GEN7_FF_SCHED_MASK;
        reg |= GEN7_FF_TS_SCHED_HW;
        reg |= GEN7_FF_VS_SCHED_HW;
        reg |= GEN7_FF_DS_SCHED_HW;
 
-       if (IS_HASWELL(dev_priv->dev))
-               reg &= ~GEN7_FF_VS_REF_CNT_FFME;
-
        I915_WRITE(GEN7_FF_THREAD_MODE, reg);
 }
 
@@ -4702,7 +4797,7 @@ static void lpt_suspend_hw(struct drm_device *dev)
 static void gen8_init_clock_gating(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       enum pipe i;
+       enum pipe pipe;
 
        I915_WRITE(WM3_LP_ILK, 0);
        I915_WRITE(WM2_LP_ILK, 0);
@@ -4711,8 +4806,19 @@ static void gen8_init_clock_gating(struct drm_device *dev)
        /* FIXME(BDW): Check all the w/a, some might only apply to
         * pre-production hw. */
 
-       WARN(!i915_preliminary_hw_support,
-            "GEN8_CENTROID_PIXEL_OPT_DIS not be needed for production\n");
+       /* WaDisablePartialInstShootdown:bdw */
+       I915_WRITE(GEN8_ROW_CHICKEN,
+                  _MASKED_BIT_ENABLE(PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE));
+
+       /* WaDisableThreadStallDopClockGating:bdw */
+       /* FIXME: Unclear whether we really need this on production bdw. */
+       I915_WRITE(GEN8_ROW_CHICKEN,
+                  _MASKED_BIT_ENABLE(STALL_DOP_GATING_DISABLE));
+
+       /*
+        * This GEN8_CENTROID_PIXEL_OPT_DIS W/A is only needed for
+        * pre-production hardware
+        */
        I915_WRITE(HALF_SLICE_CHICKEN3,
                   _MASKED_BIT_ENABLE(GEN8_CENTROID_PIXEL_OPT_DIS));
        I915_WRITE(HALF_SLICE_CHICKEN3,
@@ -4736,10 +4842,10 @@ static void gen8_init_clock_gating(struct drm_device *dev)
                   I915_READ(CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD);
 
        /* WaPsrDPRSUnmaskVBlankInSRD:bdw */
-       for_each_pipe(i) {
-               I915_WRITE(CHICKEN_PIPESL_1(i),
-                          I915_READ(CHICKEN_PIPESL_1(i) |
-                                    DPRS_MASK_VBLANK_SRD));
+       for_each_pipe(pipe) {
+               I915_WRITE(CHICKEN_PIPESL_1(pipe),
+                          I915_READ(CHICKEN_PIPESL_1(pipe)) |
+                          BDW_DPRS_MASK_VBLANK_SRD);
        }
 
        /* Use Force Non-Coherent whenever executing a 3D context. This is a
@@ -4755,6 +4861,24 @@ static void gen8_init_clock_gating(struct drm_device *dev)
        I915_WRITE(GEN7_FF_THREAD_MODE,
                   I915_READ(GEN7_FF_THREAD_MODE) &
                   ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
+
+       /*
+        * BSpec recommends 8x4 when MSAA is used,
+        * however in practice 16x4 seems fastest.
+        *
+        * Note that PS/WM thread counts depend on the WIZ hashing
+        * disable bit, which we don't touch here, but it's good
+        * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
+        */
+       I915_WRITE(GEN7_GT_MODE,
+                  GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
+
+       I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
+                  _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
+
+       /* WaDisableSDEUnitClockGating:bdw */
+       I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
+                  GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
 }
 
 static void haswell_init_clock_gating(struct drm_device *dev)
@@ -4763,21 +4887,6 @@ static void haswell_init_clock_gating(struct drm_device *dev)
 
        ilk_init_lp_watermarks(dev);
 
-       /* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
-        * This implements the WaDisableRCZUnitClockGating:hsw workaround.
-        */
-       I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
-
-       /* Apply the WaDisableRHWOOptimizationForRenderHang:hsw workaround. */
-       I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
-                  GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
-
-       /* WaApplyL3ControlAndL3ChickenMode:hsw */
-       I915_WRITE(GEN7_L3CNTLREG1,
-                       GEN7_WA_FOR_GEN7_L3_CONTROL);
-       I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
-                       GEN7_WA_L3_CHICKEN_MODE);
-
        /* L3 caching of data atomics doesn't work -- disable it. */
        I915_WRITE(HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE);
        I915_WRITE(HSW_ROW_CHICKEN3,
@@ -4789,12 +4898,28 @@ static void haswell_init_clock_gating(struct drm_device *dev)
                        GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
 
        /* WaVSRefCountFullforceMissDisable:hsw */
-       gen7_setup_fixed_func_scheduler(dev_priv);
+       I915_WRITE(GEN7_FF_THREAD_MODE,
+                  I915_READ(GEN7_FF_THREAD_MODE) & ~GEN7_FF_VS_REF_CNT_FFME);
+
+       /* enable HiZ Raw Stall Optimization */
+       I915_WRITE(CACHE_MODE_0_GEN7,
+                  _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
 
        /* WaDisable4x2SubspanOptimization:hsw */
        I915_WRITE(CACHE_MODE_1,
                   _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
 
+       /*
+        * BSpec recommends 8x4 when MSAA is used,
+        * however in practice 16x4 seems fastest.
+        *
+        * Note that PS/WM thread counts depend on the WIZ hashing
+        * disable bit, which we don't touch here, but it's good
+        * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
+        */
+       I915_WRITE(GEN7_GT_MODE,
+                  GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
+
        /* WaSwitchSolVfFArbitrationPriority:hsw */
        I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
 
@@ -4827,9 +4952,6 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
        if (IS_IVB_GT1(dev))
                I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
                           _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
-       else
-               I915_WRITE(GEN7_HALF_SLICE_CHICKEN1_GT2,
-                          _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
 
        /* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */
        I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
@@ -4843,31 +4965,24 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
        if (IS_IVB_GT1(dev))
                I915_WRITE(GEN7_ROW_CHICKEN2,
                           _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
-       else
+       else {
+               /* must write both registers */
+               I915_WRITE(GEN7_ROW_CHICKEN2,
+                          _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
                I915_WRITE(GEN7_ROW_CHICKEN2_GT2,
                           _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
-
+       }
 
        /* WaForceL3Serialization:ivb */
        I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
                   ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
 
-       /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
-        * gating disable must be set.  Failure to set it results in
-        * flickering pixels due to Z write ordering failures after
-        * some amount of runtime in the Mesa "fire" demo, and Unigine
-        * Sanctuary and Tropics, and apparently anything else with
-        * alpha test or pixel discard.
-        *
-        * According to the spec, bit 11 (RCCUNIT) must also be set,
-        * but we didn't debug actual testcases to find it out.
-        *
+       /*
         * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
         * This implements the WaDisableRCZUnitClockGating:ivb workaround.
         */
        I915_WRITE(GEN6_UCGCTL2,
-                  GEN6_RCZUNIT_CLOCK_GATE_DISABLE |
-                  GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
+                  GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
 
        /* This is required by WaCatErrorRejectionIssue:ivb */
        I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
@@ -4876,13 +4991,29 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
 
        g4x_disable_trickle_feed(dev);
 
-       /* WaVSRefCountFullforceMissDisable:ivb */
        gen7_setup_fixed_func_scheduler(dev_priv);
 
+       if (0) { /* causes HiZ corruption on ivb:gt1 */
+               /* enable HiZ Raw Stall Optimization */
+               I915_WRITE(CACHE_MODE_0_GEN7,
+                          _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
+       }
+
        /* WaDisable4x2SubspanOptimization:ivb */
        I915_WRITE(CACHE_MODE_1,
                   _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
 
+       /*
+        * BSpec recommends 8x4 when MSAA is used,
+        * however in practice 16x4 seems fastest.
+        *
+        * Note that PS/WM thread counts depend on the WIZ hashing
+        * disable bit, which we don't touch here, but it's good
+        * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
+        */
+       I915_WRITE(GEN7_GT_MODE,
+                  GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
+
        snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
        snpcr &= ~GEN6_MBC_SNPCR_MASK;
        snpcr |= GEN6_MBC_SNPCR_MED;
@@ -4929,19 +5060,12 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
                   CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
                   CHICKEN3_DGMG_DONE_FIX_DISABLE);
 
+       /* WaPsdDispatchEnable:vlv */
        /* WaDisablePSDDualDispatchEnable:vlv */
        I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
                   _MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP |
                                      GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
 
-       /* Apply the WaDisableRHWOOptimizationForRenderHang:vlv workaround. */
-       I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
-                  GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
-
-       /* WaApplyL3ControlAndL3ChickenMode:vlv */
-       I915_WRITE(GEN7_L3CNTLREG1, I915_READ(GEN7_L3CNTLREG1) | GEN7_L3AGDIS);
-       I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, GEN7_WA_L3_CHICKEN_MODE);
-
        /* WaForceL3Serialization:vlv */
        I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
                   ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
@@ -4955,51 +5079,39 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
                   I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
                   GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
 
-       /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
-        * gating disable must be set.  Failure to set it results in
-        * flickering pixels due to Z write ordering failures after
-        * some amount of runtime in the Mesa "fire" demo, and Unigine
-        * Sanctuary and Tropics, and apparently anything else with
-        * alpha test or pixel discard.
-        *
-        * According to the spec, bit 11 (RCCUNIT) must also be set,
-        * but we didn't debug actual testcases to find it out.
-        *
+       gen7_setup_fixed_func_scheduler(dev_priv);
+
+       /*
         * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
         * This implements the WaDisableRCZUnitClockGating:vlv workaround.
-        *
-        * Also apply WaDisableVDSUnitClockGating:vlv and
-        * WaDisableRCPBUnitClockGating:vlv.
         */
        I915_WRITE(GEN6_UCGCTL2,
-                  GEN7_VDSUNIT_CLOCK_GATE_DISABLE |
-                  GEN7_TDLUNIT_CLOCK_GATE_DISABLE |
-                  GEN6_RCZUNIT_CLOCK_GATE_DISABLE |
-                  GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
-                  GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
+                  GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
 
+       /* WaDisableL3Bank2xClockGate:vlv */
        I915_WRITE(GEN7_UCGCTL4, GEN7_L3BANK2X_CLOCK_GATE_DISABLE);
 
        I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
 
+       /*
+        * BSpec says this must be set, even though
+        * WaDisable4x2SubspanOptimization isn't listed for VLV.
+        */
        I915_WRITE(CACHE_MODE_1,
                   _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
 
+       /*
+        * WaIncreaseL3CreditsForVLVB0:vlv
+        * This is the hardware default actually.
+        */
+       I915_WRITE(GEN7_L3SQCREG1, VLV_B0_WA_L3SQCREG1_VALUE);
+
        /*
         * WaDisableVLVClockGating_VBIIssue:vlv
         * Disable clock gating on th GCFG unit to prevent a delay
         * in the reporting of vblank events.
         */
-       I915_WRITE(VLV_GUNIT_CLOCK_GATE, 0xffffffff);
-
-       /* Conservative clock gating settings for now */
-       I915_WRITE(0x9400, 0xffffffff);
-       I915_WRITE(0x9404, 0xffffffff);
-       I915_WRITE(0x9408, 0xffffffff);
-       I915_WRITE(0x940c, 0xffffffff);
-       I915_WRITE(0x9410, 0xffffffff);
-       I915_WRITE(0x9414, 0xffffffff);
-       I915_WRITE(0x9418, 0xffffffff);
+       I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS);
 }
 
 static void g4x_init_clock_gating(struct drm_device *dev)
@@ -5114,19 +5226,16 @@ void intel_suspend_hw(struct drm_device *dev)
  * enable it, so check if it's enabled and also check if we've requested it to
  * be enabled.
  */
-static bool hsw_power_well_enabled(struct drm_device *dev,
+static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
                                   struct i915_power_well *power_well)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
        return I915_READ(HSW_PWR_WELL_DRIVER) ==
                     (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
 }
 
-bool intel_display_power_enabled_sw(struct drm_device *dev,
+bool intel_display_power_enabled_sw(struct drm_i915_private *dev_priv,
                                    enum intel_display_power_domain domain)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        struct i915_power_domains *power_domains;
 
        power_domains = &dev_priv->power_domains;
@@ -5134,10 +5243,9 @@ bool intel_display_power_enabled_sw(struct drm_device *dev,
        return power_domains->domain_use_count[domain];
 }
 
-bool intel_display_power_enabled(struct drm_device *dev,
+bool intel_display_power_enabled(struct drm_i915_private *dev_priv,
                                 enum intel_display_power_domain domain)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        struct i915_power_domains *power_domains;
        struct i915_power_well *power_well;
        bool is_enabled;
@@ -5152,7 +5260,7 @@ bool intel_display_power_enabled(struct drm_device *dev,
                if (power_well->always_on)
                        continue;
 
-               if (!power_well->is_enabled(dev, power_well)) {
+               if (!power_well->ops->is_enabled(dev_priv, power_well)) {
                        is_enabled = false;
                        break;
                }
@@ -5162,6 +5270,12 @@ bool intel_display_power_enabled(struct drm_device *dev,
        return is_enabled;
 }
 
+/*
+ * Starting with Haswell, we have a "Power Down Well" that can be turned off
+ * when not needed anymore. We have 4 registers that can request the power well
+ * to be enabled, and it will only be disabled if none of the registers is
+ * requesting it to be enabled.
+ */
 static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
 {
        struct drm_device *dev = dev_priv->dev;
@@ -5198,10 +5312,17 @@ static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
        }
 }
 
+static void reset_vblank_counter(struct drm_device *dev, enum pipe pipe)
+{
+       assert_spin_locked(&dev->vbl_lock);
+
+       dev->vblank[pipe].last = 0;
+}
+
 static void hsw_power_well_post_disable(struct drm_i915_private *dev_priv)
 {
        struct drm_device *dev = dev_priv->dev;
-       enum pipe p;
+       enum pipe pipe;
        unsigned long irqflags;
 
        /*
@@ -5212,16 +5333,15 @@ static void hsw_power_well_post_disable(struct drm_i915_private *dev_priv)
         * FIXME: Should we do this in general in drm_vblank_post_modeset?
         */
        spin_lock_irqsave(&dev->vbl_lock, irqflags);
-       for_each_pipe(p)
-               if (p != PIPE_A)
-                       dev->vblank[p].last = 0;
+       for_each_pipe(pipe)
+               if (pipe != PIPE_A)
+                       reset_vblank_counter(dev, pipe);
        spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
 }
 
-static void hsw_set_power_well(struct drm_device *dev,
+static void hsw_set_power_well(struct drm_i915_private *dev_priv,
                               struct i915_power_well *power_well, bool enable)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        bool is_enabled, enable_requested;
        uint32_t tmp;
 
@@ -5255,35 +5375,204 @@ static void hsw_set_power_well(struct drm_device *dev,
        }
 }
 
-static void __intel_power_well_get(struct drm_device *dev,
+static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
                                   struct i915_power_well *power_well)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       hsw_set_power_well(dev_priv, power_well, power_well->count > 0);
 
-       if (!power_well->count++ && power_well->set) {
-               hsw_disable_package_c8(dev_priv);
-               power_well->set(dev, power_well, true);
-       }
+       /*
+        * We're taking over the BIOS, so clear any requests made by it since
+        * the driver is in charge now.
+        */
+       if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST)
+               I915_WRITE(HSW_PWR_WELL_BIOS, 0);
+}
+
+static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
+                                 struct i915_power_well *power_well)
+{
+       hsw_disable_package_c8(dev_priv);
+       hsw_set_power_well(dev_priv, power_well, true);
 }
 
-static void __intel_power_well_put(struct drm_device *dev,
+static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
                                   struct i915_power_well *power_well)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       hsw_set_power_well(dev_priv, power_well, false);
+       hsw_enable_package_c8(dev_priv);
+}
+
+static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
+                                          struct i915_power_well *power_well)
+{
+}
+
+static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
+                                            struct i915_power_well *power_well)
+{
+       return true;
+}
+
+static void vlv_set_power_well(struct drm_i915_private *dev_priv,
+                              struct i915_power_well *power_well, bool enable)
+{
+       enum punit_power_well power_well_id = power_well->data;
+       u32 mask;
+       u32 state;
+       u32 ctrl;
+
+       mask = PUNIT_PWRGT_MASK(power_well_id);
+       state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) :
+                        PUNIT_PWRGT_PWR_GATE(power_well_id);
+
+       mutex_lock(&dev_priv->rps.hw_lock);
+
+#define COND \
+       ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
+
+       if (COND)
+               goto out;
+
+       ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
+       ctrl &= ~mask;
+       ctrl |= state;
+       vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
+
+       if (wait_for(COND, 100))
+               DRM_ERROR("timout setting power well state %08x (%08x)\n",
+                         state,
+                         vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
+
+#undef COND
+
+out:
+       mutex_unlock(&dev_priv->rps.hw_lock);
+}
+
+static void vlv_power_well_sync_hw(struct drm_i915_private *dev_priv,
+                                  struct i915_power_well *power_well)
+{
+       vlv_set_power_well(dev_priv, power_well, power_well->count > 0);
+}
+
+static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
+                                 struct i915_power_well *power_well)
+{
+       vlv_set_power_well(dev_priv, power_well, true);
+}
+
+static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
+                                  struct i915_power_well *power_well)
+{
+       vlv_set_power_well(dev_priv, power_well, false);
+}
+
+static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
+                                  struct i915_power_well *power_well)
+{
+       int power_well_id = power_well->data;
+       bool enabled = false;
+       u32 mask;
+       u32 state;
+       u32 ctrl;
+
+       mask = PUNIT_PWRGT_MASK(power_well_id);
+       ctrl = PUNIT_PWRGT_PWR_ON(power_well_id);
+
+       mutex_lock(&dev_priv->rps.hw_lock);
+
+       state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
+       /*
+        * We only ever set the power-on and power-gate states, anything
+        * else is unexpected.
+        */
+       WARN_ON(state != PUNIT_PWRGT_PWR_ON(power_well_id) &&
+               state != PUNIT_PWRGT_PWR_GATE(power_well_id));
+       if (state == ctrl)
+               enabled = true;
+
+       /*
+        * A transient state at this point would mean some unexpected party
+        * is poking at the power controls too.
+        */
+       ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
+       WARN_ON(ctrl != state);
+
+       mutex_unlock(&dev_priv->rps.hw_lock);
+
+       return enabled;
+}
+
+static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
+                                         struct i915_power_well *power_well)
+{
+       WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
+
+       vlv_set_power_well(dev_priv, power_well, true);
+
+       spin_lock_irq(&dev_priv->irq_lock);
+       valleyview_enable_display_irqs(dev_priv);
+       spin_unlock_irq(&dev_priv->irq_lock);
+
+       /*
+        * During driver initialization we need to defer enabling hotplug
+        * processing until fbdev is set up.
+        */
+       if (dev_priv->enable_hotplug_processing)
+               intel_hpd_init(dev_priv->dev);
+
+       i915_redisable_vga_power_on(dev_priv->dev);
+}
+
+static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
+                                          struct i915_power_well *power_well)
+{
+       struct drm_device *dev = dev_priv->dev;
+       enum pipe pipe;
+
+       WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
+
+       spin_lock_irq(&dev_priv->irq_lock);
+       for_each_pipe(pipe)
+               __intel_set_cpu_fifo_underrun_reporting(dev, pipe, false);
 
-       WARN_ON(!power_well->count);
+       valleyview_disable_display_irqs(dev_priv);
+       spin_unlock_irq(&dev_priv->irq_lock);
+
+       spin_lock_irq(&dev->vbl_lock);
+       for_each_pipe(pipe)
+               reset_vblank_counter(dev, pipe);
+       spin_unlock_irq(&dev->vbl_lock);
+
+       vlv_set_power_well(dev_priv, power_well, false);
+}
+
+static void check_power_well_state(struct drm_i915_private *dev_priv,
+                                  struct i915_power_well *power_well)
+{
+       bool enabled = power_well->ops->is_enabled(dev_priv, power_well);
+
+       if (power_well->always_on || !i915.disable_power_well) {
+               if (!enabled)
+                       goto mismatch;
 
-       if (!--power_well->count && power_well->set &&
-           i915_disable_power_well) {
-               power_well->set(dev, power_well, false);
-               hsw_enable_package_c8(dev_priv);
+               return;
        }
+
+       if (enabled != (power_well->count > 0))
+               goto mismatch;
+
+       return;
+
+mismatch:
+       WARN(1, "state mismatch for '%s' (always_on %d hw state %d use-count %d disable_power_well %d\n",
+                 power_well->name, power_well->always_on, enabled,
+                 power_well->count, i915.disable_power_well);
 }
 
-void intel_display_power_get(struct drm_device *dev,
+void intel_display_power_get(struct drm_i915_private *dev_priv,
                             enum intel_display_power_domain domain)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        struct i915_power_domains *power_domains;
        struct i915_power_well *power_well;
        int i;
@@ -5292,18 +5581,23 @@ void intel_display_power_get(struct drm_device *dev,
 
        mutex_lock(&power_domains->lock);
 
-       for_each_power_well(i, power_well, BIT(domain), power_domains)
-               __intel_power_well_get(dev, power_well);
+       for_each_power_well(i, power_well, BIT(domain), power_domains) {
+               if (!power_well->count++) {
+                       DRM_DEBUG_KMS("enabling %s\n", power_well->name);
+                       power_well->ops->enable(dev_priv, power_well);
+               }
+
+               check_power_well_state(dev_priv, power_well);
+       }
 
        power_domains->domain_use_count[domain]++;
 
        mutex_unlock(&power_domains->lock);
 }
 
-void intel_display_power_put(struct drm_device *dev,
+void intel_display_power_put(struct drm_i915_private *dev_priv,
                             enum intel_display_power_domain domain)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        struct i915_power_domains *power_domains;
        struct i915_power_well *power_well;
        int i;
@@ -5315,8 +5609,16 @@ void intel_display_power_put(struct drm_device *dev,
        WARN_ON(!power_domains->domain_use_count[domain]);
        power_domains->domain_use_count[domain]--;
 
-       for_each_power_well_rev(i, power_well, BIT(domain), power_domains)
-               __intel_power_well_put(dev, power_well);
+       for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
+               WARN_ON(!power_well->count);
+
+               if (!--power_well->count && i915.disable_power_well) {
+                       DRM_DEBUG_KMS("disabling %s\n", power_well->name);
+                       power_well->ops->disable(dev_priv, power_well);
+               }
+
+               check_power_well_state(dev_priv, power_well);
+       }
 
        mutex_unlock(&power_domains->lock);
 }
@@ -5333,7 +5635,7 @@ void i915_request_power_well(void)
 
        dev_priv = container_of(hsw_pwr, struct drm_i915_private,
                                power_domains);
-       intel_display_power_get(dev_priv->dev, POWER_DOMAIN_AUDIO);
+       intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
 }
 EXPORT_SYMBOL_GPL(i915_request_power_well);
 
@@ -5347,29 +5649,99 @@ void i915_release_power_well(void)
 
        dev_priv = container_of(hsw_pwr, struct drm_i915_private,
                                power_domains);
-       intel_display_power_put(dev_priv->dev, POWER_DOMAIN_AUDIO);
+       intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
 }
 EXPORT_SYMBOL_GPL(i915_release_power_well);
 
+#define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
+
+#define HSW_ALWAYS_ON_POWER_DOMAINS (                  \
+       BIT(POWER_DOMAIN_PIPE_A) |                      \
+       BIT(POWER_DOMAIN_TRANSCODER_EDP) |              \
+       BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) |          \
+       BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) |          \
+       BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) |          \
+       BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) |          \
+       BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) |          \
+       BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) |          \
+       BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) |          \
+       BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) |          \
+       BIT(POWER_DOMAIN_PORT_CRT) |                    \
+       BIT(POWER_DOMAIN_INIT))
+#define HSW_DISPLAY_POWER_DOMAINS (                            \
+       (POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) |    \
+       BIT(POWER_DOMAIN_INIT))
+
+#define BDW_ALWAYS_ON_POWER_DOMAINS (                  \
+       HSW_ALWAYS_ON_POWER_DOMAINS |                   \
+       BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER))
+#define BDW_DISPLAY_POWER_DOMAINS (                            \
+       (POWER_DOMAIN_MASK & ~BDW_ALWAYS_ON_POWER_DOMAINS) |    \
+       BIT(POWER_DOMAIN_INIT))
+
+#define VLV_ALWAYS_ON_POWER_DOMAINS    BIT(POWER_DOMAIN_INIT)
+#define VLV_DISPLAY_POWER_DOMAINS      POWER_DOMAIN_MASK
+
+#define VLV_DPIO_CMN_BC_POWER_DOMAINS (                \
+       BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) |  \
+       BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) |  \
+       BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) |  \
+       BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) |  \
+       BIT(POWER_DOMAIN_PORT_CRT) |            \
+       BIT(POWER_DOMAIN_INIT))
+
+#define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \
+       BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) |  \
+       BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) |  \
+       BIT(POWER_DOMAIN_INIT))
+
+#define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \
+       BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) |  \
+       BIT(POWER_DOMAIN_INIT))
+
+#define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \
+       BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) |  \
+       BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) |  \
+       BIT(POWER_DOMAIN_INIT))
+
+#define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \
+       BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) |  \
+       BIT(POWER_DOMAIN_INIT))
+
+static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
+       .sync_hw = i9xx_always_on_power_well_noop,
+       .enable = i9xx_always_on_power_well_noop,
+       .disable = i9xx_always_on_power_well_noop,
+       .is_enabled = i9xx_always_on_power_well_enabled,
+};
+
 static struct i915_power_well i9xx_always_on_power_well[] = {
        {
                .name = "always-on",
                .always_on = 1,
                .domains = POWER_DOMAIN_MASK,
+               .ops = &i9xx_always_on_power_well_ops,
        },
 };
 
+static const struct i915_power_well_ops hsw_power_well_ops = {
+       .sync_hw = hsw_power_well_sync_hw,
+       .enable = hsw_power_well_enable,
+       .disable = hsw_power_well_disable,
+       .is_enabled = hsw_power_well_enabled,
+};
+
 static struct i915_power_well hsw_power_wells[] = {
        {
                .name = "always-on",
                .always_on = 1,
                .domains = HSW_ALWAYS_ON_POWER_DOMAINS,
+               .ops = &i9xx_always_on_power_well_ops,
        },
        {
                .name = "display",
-               .domains = POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS,
-               .is_enabled = hsw_power_well_enabled,
-               .set = hsw_set_power_well,
+               .domains = HSW_DISPLAY_POWER_DOMAINS,
+               .ops = &hsw_power_well_ops,
        },
 };
 
@@ -5378,12 +5750,83 @@ static struct i915_power_well bdw_power_wells[] = {
                .name = "always-on",
                .always_on = 1,
                .domains = BDW_ALWAYS_ON_POWER_DOMAINS,
+               .ops = &i9xx_always_on_power_well_ops,
        },
        {
                .name = "display",
-               .domains = POWER_DOMAIN_MASK & ~BDW_ALWAYS_ON_POWER_DOMAINS,
-               .is_enabled = hsw_power_well_enabled,
-               .set = hsw_set_power_well,
+               .domains = BDW_DISPLAY_POWER_DOMAINS,
+               .ops = &hsw_power_well_ops,
+       },
+};
+
+static const struct i915_power_well_ops vlv_display_power_well_ops = {
+       .sync_hw = vlv_power_well_sync_hw,
+       .enable = vlv_display_power_well_enable,
+       .disable = vlv_display_power_well_disable,
+       .is_enabled = vlv_power_well_enabled,
+};
+
+static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
+       .sync_hw = vlv_power_well_sync_hw,
+       .enable = vlv_power_well_enable,
+       .disable = vlv_power_well_disable,
+       .is_enabled = vlv_power_well_enabled,
+};
+
+static struct i915_power_well vlv_power_wells[] = {
+       {
+               .name = "always-on",
+               .always_on = 1,
+               .domains = VLV_ALWAYS_ON_POWER_DOMAINS,
+               .ops = &i9xx_always_on_power_well_ops,
+       },
+       {
+               .name = "display",
+               .domains = VLV_DISPLAY_POWER_DOMAINS,
+               .data = PUNIT_POWER_WELL_DISP2D,
+               .ops = &vlv_display_power_well_ops,
+       },
+       {
+               .name = "dpio-common",
+               .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
+               .data = PUNIT_POWER_WELL_DPIO_CMN_BC,
+               .ops = &vlv_dpio_power_well_ops,
+       },
+       {
+               .name = "dpio-tx-b-01",
+               .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
+                          VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
+                          VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
+                          VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
+               .ops = &vlv_dpio_power_well_ops,
+               .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
+       },
+       {
+               .name = "dpio-tx-b-23",
+               .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
+                          VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
+                          VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
+                          VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
+               .ops = &vlv_dpio_power_well_ops,
+               .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
+       },
+       {
+               .name = "dpio-tx-c-01",
+               .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
+                          VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
+                          VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
+                          VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
+               .ops = &vlv_dpio_power_well_ops,
+               .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
+       },
+       {
+               .name = "dpio-tx-c-23",
+               .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
+                          VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
+                          VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
+                          VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
+               .ops = &vlv_dpio_power_well_ops,
+               .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
        },
 };
 
@@ -5392,9 +5835,8 @@ static struct i915_power_well bdw_power_wells[] = {
        (power_domains)->power_well_count = ARRAY_SIZE(__power_wells);  \
 })
 
-int intel_power_domains_init(struct drm_device *dev)
+int intel_power_domains_init(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        struct i915_power_domains *power_domains = &dev_priv->power_domains;
 
        mutex_init(&power_domains->lock);
@@ -5403,12 +5845,14 @@ int intel_power_domains_init(struct drm_device *dev)
         * The enabling order will be from lower to higher indexed wells,
         * the disabling order is reversed.
         */
-       if (IS_HASWELL(dev)) {
+       if (IS_HASWELL(dev_priv->dev)) {
                set_power_wells(power_domains, hsw_power_wells);
                hsw_pwr = power_domains;
-       } else if (IS_BROADWELL(dev)) {
+       } else if (IS_BROADWELL(dev_priv->dev)) {
                set_power_wells(power_domains, bdw_power_wells);
                hsw_pwr = power_domains;
+       } else if (IS_VALLEYVIEW(dev_priv->dev)) {
+               set_power_wells(power_domains, vlv_power_wells);
        } else {
                set_power_wells(power_domains, i9xx_always_on_power_well);
        }
@@ -5416,47 +5860,28 @@ int intel_power_domains_init(struct drm_device *dev)
        return 0;
 }
 
-void intel_power_domains_remove(struct drm_device *dev)
+void intel_power_domains_remove(struct drm_i915_private *dev_priv)
 {
        hsw_pwr = NULL;
 }
 
-static void intel_power_domains_resume(struct drm_device *dev)
+static void intel_power_domains_resume(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        struct i915_power_domains *power_domains = &dev_priv->power_domains;
        struct i915_power_well *power_well;
        int i;
 
        mutex_lock(&power_domains->lock);
-       for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
-               if (power_well->set)
-                       power_well->set(dev, power_well, power_well->count > 0);
-       }
+       for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains)
+               power_well->ops->sync_hw(dev_priv, power_well);
        mutex_unlock(&power_domains->lock);
 }
 
-/*
- * Starting with Haswell, we have a "Power Down Well" that can be turned off
- * when not needed anymore. We have 4 registers that can request the power well
- * to be enabled, and it will only be disabled if none of the registers is
- * requesting it to be enabled.
- */
-void intel_power_domains_init_hw(struct drm_device *dev)
+void intel_power_domains_init_hw(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
        /* For now, we need the power well to be always enabled. */
-       intel_display_set_init_power(dev, true);
-       intel_power_domains_resume(dev);
-
-       if (!(IS_HASWELL(dev) || IS_BROADWELL(dev)))
-               return;
-
-       /* We're taking over the BIOS, so clear any requests made by it since
-        * the driver is in charge now. */
-       if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST)
-               I915_WRITE(HSW_PWR_WELL_BIOS, 0);
+       intel_display_set_init_power(dev_priv, true);
+       intel_power_domains_resume(dev_priv);
 }
 
 /* Disables PC8 so we can use the GMBUS and DP AUX interrupts. */
@@ -5733,10 +6158,9 @@ void intel_pm_setup(struct drm_device *dev)
 
        mutex_init(&dev_priv->pc8.lock);
        dev_priv->pc8.requirements_met = false;
-       dev_priv->pc8.gpu_idle = false;
        dev_priv->pc8.irqs_disabled = false;
        dev_priv->pc8.enabled = false;
-       dev_priv->pc8.disable_count = 2; /* requirements_met + gpu_idle */
+       dev_priv->pc8.disable_count = 1; /* requirements_met */
        INIT_DELAYED_WORK(&dev_priv->pc8.enable_work, hsw_enable_pc8_work);
        INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
                          intel_gen6_powersave_work);
index 31b36c5ac8941e844cd9f995b0c4e575219fc8ea..859092130c180c1d3eb6889b04c7b2e7c022c858 100644 (file)
@@ -531,9 +531,11 @@ init_pipe_control(struct intel_ring_buffer *ring)
                goto err;
        }
 
-       i915_gem_object_set_cache_level(ring->scratch.obj, I915_CACHE_LLC);
+       ret = i915_gem_object_set_cache_level(ring->scratch.obj, I915_CACHE_LLC);
+       if (ret)
+               goto err_unref;
 
-       ret = i915_gem_obj_ggtt_pin(ring->scratch.obj, 4096, true, false);
+       ret = i915_gem_obj_ggtt_pin(ring->scratch.obj, 4096, 0);
        if (ret)
                goto err_unref;
 
@@ -549,7 +551,7 @@ init_pipe_control(struct intel_ring_buffer *ring)
        return 0;
 
 err_unpin:
-       i915_gem_object_unpin(ring->scratch.obj);
+       i915_gem_object_ggtt_unpin(ring->scratch.obj);
 err_unref:
        drm_gem_object_unreference(&ring->scratch.obj->base);
 err:
@@ -569,7 +571,7 @@ static int init_render_ring(struct intel_ring_buffer *ring)
         * to use MI_WAIT_FOR_EVENT within the CS. It should already be
         * programmed to '1' on all products.
         *
-        * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
+        * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw
         */
        if (INTEL_INFO(dev)->gen >= 6)
                I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
@@ -625,7 +627,7 @@ static void render_ring_cleanup(struct intel_ring_buffer *ring)
 
        if (INTEL_INFO(dev)->gen >= 5) {
                kunmap(sg_page(ring->scratch.obj->pages->sgl));
-               i915_gem_object_unpin(ring->scratch.obj);
+               i915_gem_object_ggtt_unpin(ring->scratch.obj);
        }
 
        drm_gem_object_unreference(&ring->scratch.obj->base);
@@ -1253,7 +1255,7 @@ static void cleanup_status_page(struct intel_ring_buffer *ring)
                return;
 
        kunmap(sg_page(obj->pages->sgl));
-       i915_gem_object_unpin(obj);
+       i915_gem_object_ggtt_unpin(obj);
        drm_gem_object_unreference(&obj->base);
        ring->status_page.obj = NULL;
 }
@@ -1271,12 +1273,13 @@ static int init_status_page(struct intel_ring_buffer *ring)
                goto err;
        }
 
-       i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
+       ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
+       if (ret)
+               goto err_unref;
 
-       ret = i915_gem_obj_ggtt_pin(obj, 4096, true, false);
-       if (ret != 0) {
+       ret = i915_gem_obj_ggtt_pin(obj, 4096, 0);
+       if (ret)
                goto err_unref;
-       }
 
        ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj);
        ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl));
@@ -1293,7 +1296,7 @@ static int init_status_page(struct intel_ring_buffer *ring)
        return 0;
 
 err_unpin:
-       i915_gem_object_unpin(obj);
+       i915_gem_object_ggtt_unpin(obj);
 err_unref:
        drm_gem_object_unreference(&obj->base);
 err:
@@ -1356,7 +1359,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
 
        ring->obj = obj;
 
-       ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, true, false);
+       ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE);
        if (ret)
                goto err_unref;
 
@@ -1385,12 +1388,14 @@ static int intel_init_ring_buffer(struct drm_device *dev,
        if (IS_I830(ring->dev) || IS_845G(ring->dev))
                ring->effective_size -= 128;
 
+       i915_cmd_parser_init_ring(ring);
+
        return 0;
 
 err_unmap:
        iounmap(ring->virtual_start);
 err_unpin:
-       i915_gem_object_unpin(obj);
+       i915_gem_object_ggtt_unpin(obj);
 err_unref:
        drm_gem_object_unreference(&obj->base);
        ring->obj = NULL;
@@ -1418,7 +1423,7 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
 
        iounmap(ring->virtual_start);
 
-       i915_gem_object_unpin(ring->obj);
+       i915_gem_object_ggtt_unpin(ring->obj);
        drm_gem_object_unreference(&ring->obj->base);
        ring->obj = NULL;
        ring->preallocated_lazy_request = NULL;
@@ -1430,28 +1435,16 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
        cleanup_status_page(ring);
 }
 
-static int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno)
-{
-       int ret;
-
-       ret = i915_wait_seqno(ring, seqno);
-       if (!ret)
-               i915_gem_retire_requests_ring(ring);
-
-       return ret;
-}
-
 static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n)
 {
        struct drm_i915_gem_request *request;
-       u32 seqno = 0;
+       u32 seqno = 0, tail;
        int ret;
 
-       i915_gem_retire_requests_ring(ring);
-
        if (ring->last_retired_head != -1) {
                ring->head = ring->last_retired_head;
                ring->last_retired_head = -1;
+
                ring->space = ring_space(ring);
                if (ring->space >= n)
                        return 0;
@@ -1468,6 +1461,7 @@ static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n)
                        space += ring->size;
                if (space >= n) {
                        seqno = request->seqno;
+                       tail = request->tail;
                        break;
                }
 
@@ -1482,15 +1476,11 @@ static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n)
        if (seqno == 0)
                return -ENOSPC;
 
-       ret = intel_ring_wait_seqno(ring, seqno);
+       ret = i915_wait_seqno(ring, seqno);
        if (ret)
                return ret;
 
-       if (WARN_ON(ring->last_retired_head == -1))
-               return -ENOSPC;
-
-       ring->head = ring->last_retired_head;
-       ring->last_retired_head = -1;
+       ring->head = tail;
        ring->space = ring_space(ring);
        if (WARN_ON(ring->space < n))
                return -ENOSPC;
@@ -1528,7 +1518,8 @@ static int ring_wait_for_space(struct intel_ring_buffer *ring, int n)
                        return 0;
                }
 
-               if (dev->primary->master) {
+               if (!drm_core_check_feature(dev, DRIVER_MODESET) &&
+                   dev->primary->master) {
                        struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
                        if (master_priv->sarea_priv)
                                master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
@@ -1954,7 +1945,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
                        return -ENOMEM;
                }
 
-               ret = i915_gem_obj_ggtt_pin(obj, 0, true, false);
+               ret = i915_gem_obj_ggtt_pin(obj, 0, 0);
                if (ret != 0) {
                        drm_gem_object_unreference(&obj->base);
                        DRM_ERROR("Failed to ping batch bo\n");
index 0b243ce337147d51f7e9cb4b09d774f4cc33e856..09af92099c1bbbe30ed70e8cb0647e12e9ffa4c6 100644 (file)
@@ -41,6 +41,8 @@ enum intel_ring_hangcheck_action {
        HANGCHECK_HUNG,
 };
 
+#define HANGCHECK_SCORE_RING_HUNG 31
+
 struct intel_ring_hangcheck {
        bool deadlock;
        u32 seqno;
@@ -162,6 +164,38 @@ struct  intel_ring_buffer {
                u32 gtt_offset;
                volatile u32 *cpu_page;
        } scratch;
+
+       /*
+        * Tables of commands the command parser needs to know about
+        * for this ring.
+        */
+       const struct drm_i915_cmd_table *cmd_tables;
+       int cmd_table_count;
+
+       /*
+        * Table of registers allowed in commands that read/write registers.
+        */
+       const u32 *reg_table;
+       int reg_count;
+
+       /*
+        * Table of registers allowed in commands that read/write registers, but
+        * only from the DRM master.
+        */
+       const u32 *master_reg_table;
+       int master_reg_count;
+
+       /*
+        * Returns the bitmask for the length field of the specified command.
+        * Return 0 for an unrecognized/invalid command.
+        *
+        * If the command parser finds an entry for a command in the ring's
+        * cmd_tables, it gets the command's length based on the table entry.
+        * If not, it calls this function to determine the per-ring length field
+        * encoding for the command (i.e. certain opcode ranges use certain bits
+        * to encode the command length in the header).
+        */
+       u32 (*get_cmd_length_mask)(u32 cmd_header);
 };
 
 static inline bool
index 95bdfb3c431c8467b105c616f6d5f9567505804f..9a0b71f6aed6e9f32bfcf8881392dc360e3726eb 100644 (file)
@@ -2382,24 +2382,62 @@ intel_sdvo_get_slave_addr(struct drm_device *dev, struct intel_sdvo *sdvo)
 }
 
 static void
+intel_sdvo_connector_unregister(struct intel_connector *intel_connector)
+{
+       struct drm_connector *drm_connector;
+       struct intel_sdvo *sdvo_encoder;
+
+       drm_connector = &intel_connector->base;
+       sdvo_encoder = intel_attached_sdvo(&intel_connector->base);
+
+       sysfs_remove_link(&drm_connector->kdev->kobj,
+                         sdvo_encoder->ddc.dev.kobj.name);
+       intel_connector_unregister(intel_connector);
+}
+
+static int
 intel_sdvo_connector_init(struct intel_sdvo_connector *connector,
                          struct intel_sdvo *encoder)
 {
-       drm_connector_init(encoder->base.base.dev,
-                          &connector->base.base,
+       struct drm_connector *drm_connector;
+       int ret;
+
+       drm_connector = &connector->base.base;
+       ret = drm_connector_init(encoder->base.base.dev,
+                          drm_connector,
                           &intel_sdvo_connector_funcs,
                           connector->base.base.connector_type);
+       if (ret < 0)
+               return ret;
 
-       drm_connector_helper_add(&connector->base.base,
+       drm_connector_helper_add(drm_connector,
                                 &intel_sdvo_connector_helper_funcs);
 
        connector->base.base.interlace_allowed = 1;
        connector->base.base.doublescan_allowed = 0;
        connector->base.base.display_info.subpixel_order = SubPixelHorizontalRGB;
        connector->base.get_hw_state = intel_sdvo_connector_get_hw_state;
+       connector->base.unregister = intel_sdvo_connector_unregister;
 
        intel_connector_attach_encoder(&connector->base, &encoder->base);
-       drm_sysfs_connector_add(&connector->base.base);
+       ret = drm_sysfs_connector_add(drm_connector);
+       if (ret < 0)
+               goto err1;
+
+       ret = sysfs_create_link(&encoder->ddc.dev.kobj,
+                               &drm_connector->kdev->kobj,
+                               encoder->ddc.dev.kobj.name);
+       if (ret < 0)
+               goto err2;
+
+       return 0;
+
+err2:
+       drm_sysfs_connector_remove(drm_connector);
+err1:
+       drm_connector_cleanup(drm_connector);
+
+       return ret;
 }
 
 static void
@@ -2459,7 +2497,11 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
                intel_sdvo->is_hdmi = true;
        }
 
-       intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
+       if (intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo) < 0) {
+               kfree(intel_sdvo_connector);
+               return false;
+       }
+
        if (intel_sdvo->is_hdmi)
                intel_sdvo_add_hdmi_properties(intel_sdvo, intel_sdvo_connector);
 
@@ -2490,7 +2532,10 @@ intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type)
 
        intel_sdvo->is_tv = true;
 
-       intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
+       if (intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo) < 0) {
+               kfree(intel_sdvo_connector);
+               return false;
+       }
 
        if (!intel_sdvo_tv_create_property(intel_sdvo, intel_sdvo_connector, type))
                goto err;
@@ -2534,8 +2579,11 @@ intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, int device)
                intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1;
        }
 
-       intel_sdvo_connector_init(intel_sdvo_connector,
-                                 intel_sdvo);
+       if (intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo) < 0) {
+               kfree(intel_sdvo_connector);
+               return false;
+       }
+
        return true;
 }
 
@@ -2566,7 +2614,11 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
                intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1;
        }
 
-       intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
+       if (intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo) < 0) {
+               kfree(intel_sdvo_connector);
+               return false;
+       }
+
        if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector))
                goto err;
 
@@ -2980,7 +3032,7 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
         * simplistic anyway to express such constraints, so just give up on
         * cloning for SDVO encoders.
         */
-       intel_sdvo->base.cloneable = false;
+       intel_sdvo->base.cloneable = 0;
 
        intel_sdvo_select_ddc_bus(dev_priv, intel_sdvo, sdvo_reg);
 
index 716a3c9c0751c18927cb1b10633b6effff584e7b..336ae6c602f2a6855e58c782a68debf350f49807 100644 (file)
@@ -124,9 +124,6 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
        crtc_w--;
        crtc_h--;
 
-       I915_WRITE(SPSTRIDE(pipe, plane), fb->pitches[0]);
-       I915_WRITE(SPPOS(pipe, plane), (crtc_y << 16) | crtc_x);
-
        linear_offset = y * fb->pitches[0] + x * pixel_size;
        sprsurf_offset = intel_gen4_compute_page_offset(&x, &y,
                                                        obj->tiling_mode,
@@ -134,6 +131,9 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
                                                        fb->pitches[0]);
        linear_offset -= sprsurf_offset;
 
+       I915_WRITE(SPSTRIDE(pipe, plane), fb->pitches[0]);
+       I915_WRITE(SPPOS(pipe, plane), (crtc_y << 16) | crtc_x);
+
        if (obj->tiling_mode != I915_TILING_NONE)
                I915_WRITE(SPTILEOFF(pipe, plane), (y << 16) | x);
        else
@@ -293,15 +293,15 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
        if (crtc_w != src_w || crtc_h != src_h)
                sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h;
 
-       I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]);
-       I915_WRITE(SPRPOS(pipe), (crtc_y << 16) | crtc_x);
-
        linear_offset = y * fb->pitches[0] + x * pixel_size;
        sprsurf_offset =
                intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
                                               pixel_size, fb->pitches[0]);
        linear_offset -= sprsurf_offset;
 
+       I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]);
+       I915_WRITE(SPRPOS(pipe), (crtc_y << 16) | crtc_x);
+
        /* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET
         * register */
        if (IS_HASWELL(dev) || IS_BROADWELL(dev))
@@ -472,15 +472,15 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
        if (crtc_w != src_w || crtc_h != src_h)
                dvsscale = DVS_SCALE_ENABLE | (src_w << 16) | src_h;
 
-       I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]);
-       I915_WRITE(DVSPOS(pipe), (crtc_y << 16) | crtc_x);
-
        linear_offset = y * fb->pitches[0] + x * pixel_size;
        dvssurf_offset =
                intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
                                               pixel_size, fb->pitches[0]);
        linear_offset -= dvssurf_offset;
 
+       I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]);
+       I915_WRITE(DVSPOS(pipe), (crtc_y << 16) | crtc_x);
+
        if (obj->tiling_mode != I915_TILING_NONE)
                I915_WRITE(DVSTILEOFF(pipe), (y << 16) | x);
        else
index 22cf0f4ba24871ad2697306aaec37a577496837f..5be4ab218054dedc2af9d5367ca829dda568da5d 100644 (file)
@@ -1189,8 +1189,8 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
        if (connector->polled & DRM_CONNECTOR_POLL_HPD) {
                spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
                i915_disable_pipestat(dev_priv, 0,
-                                     PIPE_HOTPLUG_INTERRUPT_ENABLE |
-                                     PIPE_HOTPLUG_TV_INTERRUPT_ENABLE);
+                                     PIPE_HOTPLUG_INTERRUPT_STATUS |
+                                     PIPE_HOTPLUG_TV_INTERRUPT_STATUS);
                spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
        }
 
@@ -1266,8 +1266,8 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
        if (connector->polled & DRM_CONNECTOR_POLL_HPD) {
                spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
                i915_enable_pipestat(dev_priv, 0,
-                                    PIPE_HOTPLUG_INTERRUPT_ENABLE |
-                                    PIPE_HOTPLUG_TV_INTERRUPT_ENABLE);
+                                    PIPE_HOTPLUG_INTERRUPT_STATUS |
+                                    PIPE_HOTPLUG_TV_INTERRUPT_STATUS);
                spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
        }
 
@@ -1634,13 +1634,13 @@ intel_tv_init(struct drm_device *dev)
        intel_encoder->disable = intel_disable_tv;
        intel_encoder->get_hw_state = intel_tv_get_hw_state;
        intel_connector->get_hw_state = intel_connector_get_hw_state;
+       intel_connector->unregister = intel_connector_unregister;
 
        intel_connector_attach_encoder(intel_connector, intel_encoder);
        intel_encoder->type = INTEL_OUTPUT_TVOUT;
        intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
-       intel_encoder->cloneable = false;
+       intel_encoder->cloneable = 0;
        intel_encoder->base.possible_crtcs = ((1 << 0) | (1 << 1));
-       intel_encoder->base.possible_clones = (1 << INTEL_OUTPUT_TVOUT);
        intel_tv->type = DRM_MODE_CONNECTOR_Unknown;
 
        /* BIOS margin values */
index 87df68f5f504b5a2dd352f503d1af4dbf06c30ba..7861d97600e1819ecbd75aaf6f278e7fe8424d49 100644 (file)
 
 #define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32(dev_priv__, reg__)
 
+static void
+assert_device_not_suspended(struct drm_i915_private *dev_priv)
+{
+       WARN(HAS_RUNTIME_PM(dev_priv->dev) && dev_priv->pm.suspended,
+            "Device suspended\n");
+}
 
 static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
 {
@@ -83,14 +89,14 @@ static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv,
        __gen6_gt_wait_for_thread_c0(dev_priv);
 }
 
-static void __gen6_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv)
+static void __gen7_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv)
 {
        __raw_i915_write32(dev_priv, FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff));
        /* something from same cacheline, but !FORCEWAKE_MT */
        __raw_posting_read(dev_priv, ECOBUS);
 }
 
-static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv,
+static void __gen7_gt_force_wake_mt_get(struct drm_i915_private *dev_priv,
                                                        int fw_engine)
 {
        u32 forcewake_ack;
@@ -136,14 +142,16 @@ static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv,
        gen6_gt_check_fifodbg(dev_priv);
 }
 
-static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv,
+static void __gen7_gt_force_wake_mt_put(struct drm_i915_private *dev_priv,
                                                        int fw_engine)
 {
        __raw_i915_write32(dev_priv, FORCEWAKE_MT,
                           _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
        /* something from same cacheline, but !FORCEWAKE_MT */
        __raw_posting_read(dev_priv, ECOBUS);
-       gen6_gt_check_fifodbg(dev_priv);
+
+       if (IS_GEN7(dev_priv->dev))
+               gen6_gt_check_fifodbg(dev_priv);
 }
 
 static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
@@ -251,16 +259,16 @@ void vlv_force_wake_get(struct drm_i915_private *dev_priv,
        unsigned long irqflags;
 
        spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
-       if (FORCEWAKE_RENDER & fw_engine) {
-               if (dev_priv->uncore.fw_rendercount++ == 0)
-                       dev_priv->uncore.funcs.force_wake_get(dev_priv,
-                                                       FORCEWAKE_RENDER);
-       }
-       if (FORCEWAKE_MEDIA & fw_engine) {
-               if (dev_priv->uncore.fw_mediacount++ == 0)
-                       dev_priv->uncore.funcs.force_wake_get(dev_priv,
-                                                       FORCEWAKE_MEDIA);
-       }
+
+       if (fw_engine & FORCEWAKE_RENDER &&
+           dev_priv->uncore.fw_rendercount++ != 0)
+               fw_engine &= ~FORCEWAKE_RENDER;
+       if (fw_engine & FORCEWAKE_MEDIA &&
+           dev_priv->uncore.fw_mediacount++ != 0)
+               fw_engine &= ~FORCEWAKE_MEDIA;
+
+       if (fw_engine)
+               dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_engine);
 
        spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 }
@@ -272,46 +280,45 @@ void vlv_force_wake_put(struct drm_i915_private *dev_priv,
 
        spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
 
-       if (FORCEWAKE_RENDER & fw_engine) {
-               WARN_ON(dev_priv->uncore.fw_rendercount == 0);
-               if (--dev_priv->uncore.fw_rendercount == 0)
-                       dev_priv->uncore.funcs.force_wake_put(dev_priv,
-                                                       FORCEWAKE_RENDER);
-       }
+       if (fw_engine & FORCEWAKE_RENDER &&
+           --dev_priv->uncore.fw_rendercount != 0)
+               fw_engine &= ~FORCEWAKE_RENDER;
+       if (fw_engine & FORCEWAKE_MEDIA &&
+           --dev_priv->uncore.fw_mediacount != 0)
+               fw_engine &= ~FORCEWAKE_MEDIA;
 
-       if (FORCEWAKE_MEDIA & fw_engine) {
-               WARN_ON(dev_priv->uncore.fw_mediacount == 0);
-               if (--dev_priv->uncore.fw_mediacount == 0)
-                       dev_priv->uncore.funcs.force_wake_put(dev_priv,
-                                                       FORCEWAKE_MEDIA);
-       }
+       if (fw_engine)
+               dev_priv->uncore.funcs.force_wake_put(dev_priv, fw_engine);
 
        spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 }
 
-static void gen6_force_wake_work(struct work_struct *work)
+static void gen6_force_wake_timer(unsigned long arg)
 {
-       struct drm_i915_private *dev_priv =
-               container_of(work, typeof(*dev_priv), uncore.force_wake_work.work);
+       struct drm_i915_private *dev_priv = (void *)arg;
        unsigned long irqflags;
 
+       assert_device_not_suspended(dev_priv);
+
        spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
        if (--dev_priv->uncore.forcewake_count == 0)
                dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL);
        spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+
+       intel_runtime_pm_put(dev_priv);
 }
 
 static void intel_uncore_forcewake_reset(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
 
-       if (IS_VALLEYVIEW(dev)) {
+       if (IS_VALLEYVIEW(dev))
                vlv_force_wake_reset(dev_priv);
-       } else if (INTEL_INFO(dev)->gen >= 6) {
+       else if (IS_GEN6(dev) || IS_GEN7(dev))
                __gen6_gt_force_wake_reset(dev_priv);
-               if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
-                       __gen6_gt_force_wake_mt_reset(dev_priv);
-       }
+
+       if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev) || IS_GEN8(dev))
+               __gen7_gt_force_wake_mt_reset(dev_priv);
 }
 
 void intel_uncore_early_sanitize(struct drm_device *dev)
@@ -354,7 +361,9 @@ void intel_uncore_sanitize(struct drm_device *dev)
                mutex_lock(&dev_priv->rps.hw_lock);
                reg_val = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS);
 
-               if (reg_val & (RENDER_PWRGT | MEDIA_PWRGT | DISP2D_PWRGT))
+               if (reg_val & (PUNIT_PWRGT_PWR_GATE(PUNIT_POWER_WELL_RENDER) |
+                              PUNIT_PWRGT_PWR_GATE(PUNIT_POWER_WELL_MEDIA) |
+                              PUNIT_PWRGT_PWR_GATE(PUNIT_POWER_WELL_DISP2D)))
                        vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, 0x0);
 
                mutex_unlock(&dev_priv->rps.hw_lock);
@@ -393,25 +402,38 @@ void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine)
 void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine)
 {
        unsigned long irqflags;
+       bool delayed = false;
 
        if (!dev_priv->uncore.funcs.force_wake_put)
                return;
 
        /* Redirect to VLV specific routine */
-       if (IS_VALLEYVIEW(dev_priv->dev))
-               return vlv_force_wake_put(dev_priv, fw_engine);
+       if (IS_VALLEYVIEW(dev_priv->dev)) {
+               vlv_force_wake_put(dev_priv, fw_engine);
+               goto out;
+       }
 
 
        spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
        if (--dev_priv->uncore.forcewake_count == 0) {
                dev_priv->uncore.forcewake_count++;
-               mod_delayed_work(dev_priv->wq,
-                                &dev_priv->uncore.force_wake_work,
-                                1);
+               delayed = true;
+               mod_timer_pinned(&dev_priv->uncore.force_wake_timer,
+                                jiffies + 1);
        }
        spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 
-       intel_runtime_pm_put(dev_priv);
+out:
+       if (!delayed)
+               intel_runtime_pm_put(dev_priv);
+}
+
+void assert_force_wake_inactive(struct drm_i915_private *dev_priv)
+{
+       if (!dev_priv->uncore.funcs.force_wake_get)
+               return;
+
+       WARN_ON(dev_priv->uncore.forcewake_count > 0);
 }
 
 /* We give fast paths for the really cool registers */
@@ -446,16 +468,10 @@ hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg)
        }
 }
 
-static void
-assert_device_not_suspended(struct drm_i915_private *dev_priv)
-{
-       WARN(HAS_RUNTIME_PM(dev_priv->dev) && dev_priv->pm.suspended,
-            "Device suspended\n");
-}
-
 #define REG_READ_HEADER(x) \
        unsigned long irqflags; \
        u##x val = 0; \
+       assert_device_not_suspended(dev_priv); \
        spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
 
 #define REG_READ_FOOTER \
@@ -484,17 +500,15 @@ gen5_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
 static u##x \
 gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
        REG_READ_HEADER(x); \
-       if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
-               if (dev_priv->uncore.forcewake_count == 0) \
-                       dev_priv->uncore.funcs.force_wake_get(dev_priv, \
-                                                       FORCEWAKE_ALL); \
-               val = __raw_i915_read##x(dev_priv, reg); \
-               if (dev_priv->uncore.forcewake_count == 0) \
-                       dev_priv->uncore.funcs.force_wake_put(dev_priv, \
-                                                       FORCEWAKE_ALL); \
-       } else { \
-               val = __raw_i915_read##x(dev_priv, reg); \
+       if (dev_priv->uncore.forcewake_count == 0 && \
+           NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
+               dev_priv->uncore.funcs.force_wake_get(dev_priv, \
+                                                     FORCEWAKE_ALL); \
+               dev_priv->uncore.forcewake_count++; \
+               mod_timer_pinned(&dev_priv->uncore.force_wake_timer, \
+                                jiffies + 1); \
        } \
+       val = __raw_i915_read##x(dev_priv, reg); \
        REG_READ_FOOTER; \
 }
 
@@ -502,27 +516,19 @@ gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
 static u##x \
 vlv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
        unsigned fwengine = 0; \
-       unsigned *fwcount; \
        REG_READ_HEADER(x); \
-       if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg)) {   \
-               fwengine = FORCEWAKE_RENDER;            \
-               fwcount = &dev_priv->uncore.fw_rendercount;    \
-       }                                               \
-       else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)) {       \
-               fwengine = FORCEWAKE_MEDIA;             \
-               fwcount = &dev_priv->uncore.fw_mediacount;     \
+       if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg)) { \
+               if (dev_priv->uncore.fw_rendercount == 0) \
+                       fwengine = FORCEWAKE_RENDER; \
+       } else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)) { \
+               if (dev_priv->uncore.fw_mediacount == 0) \
+                       fwengine = FORCEWAKE_MEDIA; \
        }  \
-       if (fwengine != 0) {            \
-               if ((*fwcount)++ == 0) \
-                       (dev_priv)->uncore.funcs.force_wake_get(dev_priv, \
-                                                               fwengine); \
-               val = __raw_i915_read##x(dev_priv, reg); \
-               if (--(*fwcount) == 0) \
-                       (dev_priv)->uncore.funcs.force_wake_put(dev_priv, \
-                                                       fwengine); \
-       } else { \
-               val = __raw_i915_read##x(dev_priv, reg); \
-       } \
+       if (fwengine) \
+               dev_priv->uncore.funcs.force_wake_get(dev_priv, fwengine); \
+       val = __raw_i915_read##x(dev_priv, reg); \
+       if (fwengine) \
+               dev_priv->uncore.funcs.force_wake_put(dev_priv, fwengine); \
        REG_READ_FOOTER; \
 }
 
@@ -554,6 +560,7 @@ __gen4_read(64)
 #define REG_WRITE_HEADER \
        unsigned long irqflags; \
        trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
+       assert_device_not_suspended(dev_priv); \
        spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
 
 #define REG_WRITE_FOOTER \
@@ -584,7 +591,6 @@ gen6_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace
        if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
                __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
        } \
-       assert_device_not_suspended(dev_priv); \
        __raw_i915_write##x(dev_priv, reg, val); \
        if (unlikely(__fifo_ret)) { \
                gen6_gt_check_fifodbg(dev_priv); \
@@ -600,7 +606,6 @@ hsw_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace)
        if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
                __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
        } \
-       assert_device_not_suspended(dev_priv); \
        hsw_unclaimed_reg_clear(dev_priv, reg); \
        __raw_i915_write##x(dev_priv, reg, val); \
        if (unlikely(__fifo_ret)) { \
@@ -634,16 +639,17 @@ static bool is_gen8_shadowed(struct drm_i915_private *dev_priv, u32 reg)
 #define __gen8_write(x) \
 static void \
 gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
-       bool __needs_put = reg < 0x40000 && !is_gen8_shadowed(dev_priv, reg); \
        REG_WRITE_HEADER; \
-       if (__needs_put) { \
-               dev_priv->uncore.funcs.force_wake_get(dev_priv, \
-                                                       FORCEWAKE_ALL); \
-       } \
-       __raw_i915_write##x(dev_priv, reg, val); \
-       if (__needs_put) { \
-               dev_priv->uncore.funcs.force_wake_put(dev_priv, \
-                                                       FORCEWAKE_ALL); \
+       if (reg < 0x40000 && !is_gen8_shadowed(dev_priv, reg)) { \
+               if (dev_priv->uncore.forcewake_count == 0) \
+                       dev_priv->uncore.funcs.force_wake_get(dev_priv, \
+                                                             FORCEWAKE_ALL); \
+               __raw_i915_write##x(dev_priv, reg, val); \
+               if (dev_priv->uncore.forcewake_count == 0) \
+                       dev_priv->uncore.funcs.force_wake_put(dev_priv, \
+                                                             FORCEWAKE_ALL); \
+       } else { \
+               __raw_i915_write##x(dev_priv, reg, val); \
        } \
        REG_WRITE_FOOTER; \
 }
@@ -681,15 +687,15 @@ void intel_uncore_init(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
 
-       INIT_DELAYED_WORK(&dev_priv->uncore.force_wake_work,
-                         gen6_force_wake_work);
+       setup_timer(&dev_priv->uncore.force_wake_timer,
+                   gen6_force_wake_timer, (unsigned long)dev_priv);
 
        if (IS_VALLEYVIEW(dev)) {
                dev_priv->uncore.funcs.force_wake_get = __vlv_force_wake_get;
                dev_priv->uncore.funcs.force_wake_put = __vlv_force_wake_put;
        } else if (IS_HASWELL(dev) || IS_GEN8(dev)) {
-               dev_priv->uncore.funcs.force_wake_get = __gen6_gt_force_wake_mt_get;
-               dev_priv->uncore.funcs.force_wake_put = __gen6_gt_force_wake_mt_put;
+               dev_priv->uncore.funcs.force_wake_get = __gen7_gt_force_wake_mt_get;
+               dev_priv->uncore.funcs.force_wake_put = __gen7_gt_force_wake_mt_put;
        } else if (IS_IVYBRIDGE(dev)) {
                u32 ecobus;
 
@@ -703,16 +709,16 @@ void intel_uncore_init(struct drm_device *dev)
                 * forcewake being disabled.
                 */
                mutex_lock(&dev->struct_mutex);
-               __gen6_gt_force_wake_mt_get(dev_priv, FORCEWAKE_ALL);
+               __gen7_gt_force_wake_mt_get(dev_priv, FORCEWAKE_ALL);
                ecobus = __raw_i915_read32(dev_priv, ECOBUS);
-               __gen6_gt_force_wake_mt_put(dev_priv, FORCEWAKE_ALL);
+               __gen7_gt_force_wake_mt_put(dev_priv, FORCEWAKE_ALL);
                mutex_unlock(&dev->struct_mutex);
 
                if (ecobus & FORCEWAKE_MT_ENABLE) {
                        dev_priv->uncore.funcs.force_wake_get =
-                               __gen6_gt_force_wake_mt_get;
+                               __gen7_gt_force_wake_mt_get;
                        dev_priv->uncore.funcs.force_wake_put =
-                               __gen6_gt_force_wake_mt_put;
+                               __gen7_gt_force_wake_mt_put;
                } else {
                        DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
                        DRM_INFO("when using vblank-synced partial screen updates.\n");
@@ -794,10 +800,11 @@ void intel_uncore_fini(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
 
-       flush_delayed_work(&dev_priv->uncore.force_wake_work);
+       del_timer_sync(&dev_priv->uncore.force_wake_timer);
 
        /* Paranoia: make sure we have disabled everything before we exit. */
        intel_uncore_sanitize(dev);
+       intel_uncore_forcewake_reset(dev);
 }
 
 static const struct register_whitelist {
@@ -852,6 +859,7 @@ int i915_get_reset_stats_ioctl(struct drm_device *dev,
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_reset_stats *args = data;
        struct i915_ctx_hang_stats *hs;
+       struct i915_hw_context *ctx;
        int ret;
 
        if (args->flags || args->pad)
@@ -864,11 +872,12 @@ int i915_get_reset_stats_ioctl(struct drm_device *dev,
        if (ret)
                return ret;
 
-       hs = i915_gem_context_get_hang_stats(dev, file, args->ctx_id);
-       if (IS_ERR(hs)) {
+       ctx = i915_gem_context_get(file->driver_priv, args->ctx_id);
+       if (IS_ERR(ctx)) {
                mutex_unlock(&dev->struct_mutex);
-               return PTR_ERR(hs);
+               return PTR_ERR(ctx);
        }
+       hs = &ctx->hang_stats;
 
        if (capable(CAP_SYS_ADMIN))
                args->reset_count = i915_reset_count(&dev_priv->gpu_error);
@@ -945,6 +954,7 @@ static int gen6_do_reset(struct drm_device *dev)
        struct drm_i915_private *dev_priv = dev->dev_private;
        int     ret;
        unsigned long irqflags;
+       u32 fw_engine = 0;
 
        /* Hold uncore.lock across reset to prevent any register access
         * with forcewake not set correctly
@@ -964,14 +974,25 @@ static int gen6_do_reset(struct drm_device *dev)
 
        intel_uncore_forcewake_reset(dev);
 
-       /* If reset with a user forcewake, try to restore, otherwise turn it off */
-       if (dev_priv->uncore.forcewake_count)
-               dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL);
-       else
-               dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL);
+       /* If reset with a user forcewake, try to restore */
+       if (IS_VALLEYVIEW(dev)) {
+               if (dev_priv->uncore.fw_rendercount)
+                       fw_engine |= FORCEWAKE_RENDER;
+
+               if (dev_priv->uncore.fw_mediacount)
+                       fw_engine |= FORCEWAKE_MEDIA;
+       } else {
+               if (dev_priv->uncore.forcewake_count)
+                       fw_engine = FORCEWAKE_ALL;
+       }
 
-       /* Restore fifo count */
-       dev_priv->uncore.fifo_count = __raw_i915_read32(dev_priv, GTFIFOCTL) & GT_FIFO_FREE_ENTRIES_MASK;
+       if (fw_engine)
+               dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_engine);
+
+       if (IS_GEN6(dev) || IS_GEN7(dev))
+               dev_priv->uncore.fifo_count =
+                       __raw_i915_read32(dev_priv, GTFIFOCTL) &
+                       GT_FIFO_FREE_ENTRIES_MASK;
 
        spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
        return ret;
index 8f3dee09757999c7c959e284250142c143d37d7d..f7646548660d7e98e160d2dac40d1f12b310a48d 100644 (file)
@@ -994,6 +994,7 @@ extern void drm_encoder_cleanup(struct drm_encoder *encoder);
 
 extern const char *drm_get_connector_name(const struct drm_connector *connector);
 extern const char *drm_get_connector_status_name(enum drm_connector_status status);
+extern const char *drm_get_subpixel_order_name(enum subpixel_order order);
 extern const char *drm_get_dpms_name(int val);
 extern const char *drm_get_dvi_i_subconnector_name(int val);
 extern const char *drm_get_dvi_i_select_name(int val);
index 1d09050a8c001749ba26087581b93cb2de1f9153..73c3d1f2b20d58bdfc6c2d08558829a66a647430 100644 (file)
 
 #define DP_TEST_PATTERN                            0x221
 
+#define DP_TEST_CRC_R_CR                   0x240
+#define DP_TEST_CRC_G_Y                            0x242
+#define DP_TEST_CRC_B_CB                   0x244
+
+#define DP_TEST_SINK_MISC                  0x246
+#define DP_TEST_CRC_SUPPORTED              (1 << 5)
+
 #define DP_TEST_RESPONSE                   0x260
 # define DP_TEST_ACK                       (1 << 0)
 # define DP_TEST_NAK                       (1 << 1)
 # define DP_TEST_EDID_CHECKSUM_WRITE       (1 << 2)
 
+#define DP_TEST_SINK                       0x270
+#define DP_TEST_SINK_START         (1 << 0)
+
 #define DP_SOURCE_OUI                      0x300
 #define DP_SINK_OUI                        0x400
 #define DP_BRANCH_OUI                      0x500
index 471f276ce8f741638ad6ced787aa7f943457e751..2d659dc39972f9a30dce9d9b1bfd6cdd8a9e9606 100644 (file)
@@ -121,5 +121,11 @@ bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel);
 int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper);
 int drm_fb_helper_debug_enter(struct fb_info *info);
 int drm_fb_helper_debug_leave(struct fb_info *info);
+struct drm_display_mode *
+drm_has_preferred_mode(struct drm_fb_helper_connector *fb_connector,
+                       int width, int height);
+struct drm_display_mode *
+drm_pick_cmdline_mode(struct drm_fb_helper_connector *fb_helper_conn,
+                     int width, int height);
 
 #endif