]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/commitdiff
Merge remote-tracking branch 'airlied/drm-fixes' into drm-intel-next-queued
authorDaniel Vetter <daniel.vetter@ffwll.ch>
Fri, 10 Feb 2012 15:52:55 +0000 (16:52 +0100)
committerDaniel Vetter <daniel.vetter@ffwll.ch>
Fri, 10 Feb 2012 16:14:49 +0000 (17:14 +0100)
Back-merge from drm-fixes into drm-intel-next to sort out two things:

- interlaced support: -fixes contains a bugfix to correctly clear
  interlaced configuration bits in case the bios sets up an interlaced
  mode and we want to set up the progressive mode (current kernels
  don't support interlaced). The actual feature work to support
  interlaced depends upon (and conflicts with) this bugfix.

- forcewake voodoo to workaround missed IRQ issues: -fixes only enabled
  this for ivybridge, but some recent bug reports indicate that we
  need this on Sandybridge, too. But in a slightly different flavour
  and with other fixes and reworks on top. Additionally there are some
  forcewake cleanup patches heading to -next that would conflict with
  currrent -fixes.

Signed-Off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
28 files changed:
drivers/char/agp/intel-agp.c
drivers/char/agp/intel-gtt.c
drivers/gpu/drm/drm_ioctl.c
drivers/gpu/drm/i915/Makefile
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_dma.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_evict.c
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/i915_gem_gtt.c
drivers/gpu/drm/i915/i915_gem_tiling.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_mem.c [deleted file]
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/intel_acpi.c
drivers/gpu/drm/i915/intel_bios.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_overlay.c
drivers/gpu/drm/i915/intel_panel.c
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/i915/intel_ringbuffer.h
drivers/gpu/drm/i915/intel_sprite.c
include/drm/i915_drm.h
include/drm/intel-gtt.h

index b427711be4be385c2704d4c971dc5cd512ed85c4..962e75dc47810a0c7d0eac323d596b2be507189c 100644 (file)
@@ -850,6 +850,7 @@ static struct pci_device_id agp_intel_pci_table[] = {
        .subvendor      = PCI_ANY_ID,                   \
        .subdevice      = PCI_ANY_ID,                   \
        }
+       ID(PCI_DEVICE_ID_INTEL_82441), /* for HAS2 support */
        ID(PCI_DEVICE_ID_INTEL_82443LX_0),
        ID(PCI_DEVICE_ID_INTEL_82443BX_0),
        ID(PCI_DEVICE_ID_INTEL_82443GX_0),
index c92424ca1a55370dcdaa7798cb6eb1ae6fbdd90c..5cf47ac2d401d2551661355d2d5fbaccfccdb72e 100644 (file)
@@ -76,7 +76,6 @@ static struct _intel_private {
        struct resource ifp_resource;
        int resource_valid;
        struct page *scratch_page;
-       dma_addr_t scratch_page_dma;
 } intel_private;
 
 #define INTEL_GTT_GEN  intel_private.driver->gen
@@ -306,9 +305,9 @@ static int intel_gtt_setup_scratch_page(void)
                if (pci_dma_mapping_error(intel_private.pcidev, dma_addr))
                        return -EINVAL;
 
-               intel_private.scratch_page_dma = dma_addr;
+               intel_private.base.scratch_page_dma = dma_addr;
        } else
-               intel_private.scratch_page_dma = page_to_phys(page);
+               intel_private.base.scratch_page_dma = page_to_phys(page);
 
        intel_private.scratch_page = page;
 
@@ -631,7 +630,7 @@ static unsigned int intel_gtt_mappable_entries(void)
 static void intel_gtt_teardown_scratch_page(void)
 {
        set_pages_wb(intel_private.scratch_page, 1);
-       pci_unmap_page(intel_private.pcidev, intel_private.scratch_page_dma,
+       pci_unmap_page(intel_private.pcidev, intel_private.base.scratch_page_dma,
                       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
        put_page(intel_private.scratch_page);
        __free_page(intel_private.scratch_page);
@@ -681,6 +680,7 @@ static int intel_gtt_init(void)
                iounmap(intel_private.registers);
                return -ENOMEM;
        }
+       intel_private.base.gtt = intel_private.gtt;
 
        global_cache_flush();   /* FIXME: ? */
 
@@ -975,7 +975,7 @@ void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries)
        unsigned int i;
 
        for (i = first_entry; i < (first_entry + num_entries); i++) {
-               intel_private.driver->write_entry(intel_private.scratch_page_dma,
+               intel_private.driver->write_entry(intel_private.base.scratch_page_dma,
                                                  i, 0);
        }
        readl(intel_private.gtt+i-1);
index 956fd38d7c9ed6da526479cf8b1d91ad2038c22e..2300ab1a2a776beca1098bda07fc812c191d99b4 100644 (file)
@@ -37,6 +37,7 @@
 #include "drm_core.h"
 
 #include "linux/pci.h"
+#include "linux/export.h"
 
 /**
  * Get the bus id.
@@ -346,3 +347,4 @@ int drm_noop(struct drm_device *dev, void *data,
        DRM_DEBUG("\n");
        return 0;
 }
+EXPORT_SYMBOL(drm_noop);
index 808b255d7fc68a2b967470f5656a1bbbc0b7768c..ce7fc77678b4ce00e6eae112166af5773c44bb0a 100644 (file)
@@ -3,7 +3,7 @@
 # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
 
 ccflags-y := -Iinclude/drm
-i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \
+i915-y := i915_drv.o i915_dma.o i915_irq.o \
          i915_debugfs.o \
           i915_suspend.o \
          i915_gem.o \
index deaa657292b45b910a81cd9a018baf81c1fce6dc..ae73288a9699cbb7c02915680e656fced951842c 100644 (file)
@@ -83,6 +83,7 @@ static int i915_capabilities(struct seq_file *m, void *data)
        B(supports_tv);
        B(has_bsd_ring);
        B(has_blt_ring);
+       B(has_llc);
 #undef B
 
        return 0;
@@ -563,45 +564,6 @@ static int i915_hws_info(struct seq_file *m, void *data)
        return 0;
 }
 
-static void i915_dump_object(struct seq_file *m,
-                            struct io_mapping *mapping,
-                            struct drm_i915_gem_object *obj)
-{
-       int page, page_count, i;
-
-       page_count = obj->base.size / PAGE_SIZE;
-       for (page = 0; page < page_count; page++) {
-               u32 *mem = io_mapping_map_wc(mapping,
-                                            obj->gtt_offset + page * PAGE_SIZE);
-               for (i = 0; i < PAGE_SIZE; i += 4)
-                       seq_printf(m, "%08x :  %08x\n", i, mem[i / 4]);
-               io_mapping_unmap(mem);
-       }
-}
-
-static int i915_batchbuffer_info(struct seq_file *m, void *data)
-{
-       struct drm_info_node *node = (struct drm_info_node *) m->private;
-       struct drm_device *dev = node->minor->dev;
-       drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_i915_gem_object *obj;
-       int ret;
-
-       ret = mutex_lock_interruptible(&dev->struct_mutex);
-       if (ret)
-               return ret;
-
-       list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
-               if (obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) {
-                   seq_printf(m, "--- gtt_offset = 0x%08x\n", obj->gtt_offset);
-                   i915_dump_object(m, dev_priv->mm.gtt_mapping, obj);
-               }
-       }
-
-       mutex_unlock(&dev->struct_mutex);
-       return 0;
-}
-
 static int i915_ringbuffer_data(struct seq_file *m, void *data)
 {
        struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -668,9 +630,9 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data)
 static const char *ring_str(int ring)
 {
        switch (ring) {
-       case RING_RENDER: return " render";
-       case RING_BSD: return " bsd";
-       case RING_BLT: return " blt";
+       case RCS: return "render";
+       case VCS: return "bsd";
+       case BCS: return "blt";
        default: return "";
        }
 }
@@ -713,7 +675,7 @@ static void print_error_buffers(struct seq_file *m,
        seq_printf(m, "%s [%d]:\n", name, count);
 
        while (count--) {
-               seq_printf(m, "  %08x %8u %04x %04x %08x%s%s%s%s%s%s",
+               seq_printf(m, "  %08x %8u %04x %04x %08x%s%s%s%s%s%s%s",
                           err->gtt_offset,
                           err->size,
                           err->read_domains,
@@ -723,6 +685,7 @@ static void print_error_buffers(struct seq_file *m,
                           tiling_flag(err->tiling),
                           dirty_flag(err->dirty),
                           purgeable_flag(err->purgeable),
+                          err->ring != -1 ? " " : "",
                           ring_str(err->ring),
                           cache_level_str(err->cache_level));
 
@@ -736,6 +699,38 @@ static void print_error_buffers(struct seq_file *m,
        }
 }
 
+static void i915_ring_error_state(struct seq_file *m,
+                                 struct drm_device *dev,
+                                 struct drm_i915_error_state *error,
+                                 unsigned ring)
+{
+       seq_printf(m, "%s command stream:\n", ring_str(ring));
+       seq_printf(m, "  HEAD: 0x%08x\n", error->head[ring]);
+       seq_printf(m, "  TAIL: 0x%08x\n", error->tail[ring]);
+       seq_printf(m, "  ACTHD: 0x%08x\n", error->acthd[ring]);
+       seq_printf(m, "  IPEIR: 0x%08x\n", error->ipeir[ring]);
+       seq_printf(m, "  IPEHR: 0x%08x\n", error->ipehr[ring]);
+       seq_printf(m, "  INSTDONE: 0x%08x\n", error->instdone[ring]);
+       if (ring == RCS && INTEL_INFO(dev)->gen >= 4) {
+               seq_printf(m, "  INSTDONE1: 0x%08x\n", error->instdone1);
+               seq_printf(m, "  BBADDR: 0x%08llx\n", error->bbaddr);
+       }
+       if (INTEL_INFO(dev)->gen >= 4)
+               seq_printf(m, "  INSTPS: 0x%08x\n", error->instps[ring]);
+       seq_printf(m, "  INSTPM: 0x%08x\n", error->instpm[ring]);
+       if (INTEL_INFO(dev)->gen >= 6) {
+               seq_printf(m, "  FADDR: 0x%08x\n", error->faddr[ring]);
+               seq_printf(m, "  FAULT_REG: 0x%08x\n", error->fault_reg[ring]);
+               seq_printf(m, "  SYNC_0: 0x%08x\n",
+                          error->semaphore_mboxes[ring][0]);
+               seq_printf(m, "  SYNC_1: 0x%08x\n",
+                          error->semaphore_mboxes[ring][1]);
+       }
+       seq_printf(m, "  seqno: 0x%08x\n", error->seqno[ring]);
+       seq_printf(m, "  ring->head: 0x%08x\n", error->cpu_ring_head[ring]);
+       seq_printf(m, "  ring->tail: 0x%08x\n", error->cpu_ring_tail[ring]);
+}
+
 static int i915_error_state(struct seq_file *m, void *unused)
 {
        struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -758,35 +753,20 @@ static int i915_error_state(struct seq_file *m, void *unused)
        seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
        seq_printf(m, "EIR: 0x%08x\n", error->eir);
        seq_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
+
+       for (i = 0; i < dev_priv->num_fence_regs; i++)
+               seq_printf(m, "  fence[%d] = %08llx\n", i, error->fence[i]);
+
        if (INTEL_INFO(dev)->gen >= 6) {
                seq_printf(m, "ERROR: 0x%08x\n", error->error);
-               seq_printf(m, "Blitter command stream:\n");
-               seq_printf(m, "  ACTHD:    0x%08x\n", error->bcs_acthd);
-               seq_printf(m, "  IPEIR:    0x%08x\n", error->bcs_ipeir);
-               seq_printf(m, "  IPEHR:    0x%08x\n", error->bcs_ipehr);
-               seq_printf(m, "  INSTDONE: 0x%08x\n", error->bcs_instdone);
-               seq_printf(m, "  seqno:    0x%08x\n", error->bcs_seqno);
-               seq_printf(m, "Video (BSD) command stream:\n");
-               seq_printf(m, "  ACTHD:    0x%08x\n", error->vcs_acthd);
-               seq_printf(m, "  IPEIR:    0x%08x\n", error->vcs_ipeir);
-               seq_printf(m, "  IPEHR:    0x%08x\n", error->vcs_ipehr);
-               seq_printf(m, "  INSTDONE: 0x%08x\n", error->vcs_instdone);
-               seq_printf(m, "  seqno:    0x%08x\n", error->vcs_seqno);
-       }
-       seq_printf(m, "Render command stream:\n");
-       seq_printf(m, "  ACTHD: 0x%08x\n", error->acthd);
-       seq_printf(m, "  IPEIR: 0x%08x\n", error->ipeir);
-       seq_printf(m, "  IPEHR: 0x%08x\n", error->ipehr);
-       seq_printf(m, "  INSTDONE: 0x%08x\n", error->instdone);
-       if (INTEL_INFO(dev)->gen >= 4) {
-               seq_printf(m, "  INSTDONE1: 0x%08x\n", error->instdone1);
-               seq_printf(m, "  INSTPS: 0x%08x\n", error->instps);
+               seq_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
        }
-       seq_printf(m, "  INSTPM: 0x%08x\n", error->instpm);
-       seq_printf(m, "  seqno: 0x%08x\n", error->seqno);
 
-       for (i = 0; i < dev_priv->num_fence_regs; i++)
-               seq_printf(m, "  fence[%d] = %08llx\n", i, error->fence[i]);
+       i915_ring_error_state(m, dev, error, RCS);
+       if (HAS_BLT(dev))
+               i915_ring_error_state(m, dev, error, BCS);
+       if (HAS_BSD(dev))
+               i915_ring_error_state(m, dev, error, VCS);
 
        if (error->active_bo)
                print_error_buffers(m, "Active",
@@ -1414,9 +1394,108 @@ static int i915_gen6_forcewake_count_info(struct seq_file *m, void *data)
        return 0;
 }
 
+static const char *swizzle_string(unsigned swizzle)
+{
+       switch(swizzle) {
+       case I915_BIT_6_SWIZZLE_NONE:
+               return "none";
+       case I915_BIT_6_SWIZZLE_9:
+               return "bit9";
+       case I915_BIT_6_SWIZZLE_9_10:
+               return "bit9/bit10";
+       case I915_BIT_6_SWIZZLE_9_11:
+               return "bit9/bit11";
+       case I915_BIT_6_SWIZZLE_9_10_11:
+               return "bit9/bit10/bit11";
+       case I915_BIT_6_SWIZZLE_9_17:
+               return "bit9/bit17";
+       case I915_BIT_6_SWIZZLE_9_10_17:
+               return "bit9/bit10/bit17";
+       case I915_BIT_6_SWIZZLE_UNKNOWN:
+               return "unkown";
+       }
+
+       return "bug";
+}
+
+static int i915_swizzle_info(struct seq_file *m, void *data)
+{
+       struct drm_info_node *node = (struct drm_info_node *) m->private;
+       struct drm_device *dev = node->minor->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       mutex_lock(&dev->struct_mutex);
+       seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
+                  swizzle_string(dev_priv->mm.bit_6_swizzle_x));
+       seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
+                  swizzle_string(dev_priv->mm.bit_6_swizzle_y));
+
+       if (IS_GEN3(dev) || IS_GEN4(dev)) {
+               seq_printf(m, "DDC = 0x%08x\n",
+                          I915_READ(DCC));
+               seq_printf(m, "C0DRB3 = 0x%04x\n",
+                          I915_READ16(C0DRB3));
+               seq_printf(m, "C1DRB3 = 0x%04x\n",
+                          I915_READ16(C1DRB3));
+       } else if (IS_GEN6(dev) || IS_GEN7(dev)) {
+               seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
+                          I915_READ(MAD_DIMM_C0));
+               seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
+                          I915_READ(MAD_DIMM_C1));
+               seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
+                          I915_READ(MAD_DIMM_C2));
+               seq_printf(m, "TILECTL = 0x%08x\n",
+                          I915_READ(TILECTL));
+               seq_printf(m, "ARB_MODE = 0x%08x\n",
+                          I915_READ(ARB_MODE));
+               seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
+                          I915_READ(DISP_ARB_CTL));
+       }
+       mutex_unlock(&dev->struct_mutex);
+
+       return 0;
+}
+
+static int i915_ppgtt_info(struct seq_file *m, void *data)
+{
+       struct drm_info_node *node = (struct drm_info_node *) m->private;
+       struct drm_device *dev = node->minor->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_ring_buffer *ring;
+       int i, ret;
+
+
+       ret = mutex_lock_interruptible(&dev->struct_mutex);
+       if (ret)
+               return ret;
+       if (INTEL_INFO(dev)->gen == 6)
+               seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
+
+       for (i = 0; i < I915_NUM_RINGS; i++) {
+               ring = &dev_priv->ring[i];
+
+               seq_printf(m, "%s\n", ring->name);
+               if (INTEL_INFO(dev)->gen == 7)
+                       seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(RING_MODE_GEN7(ring)));
+               seq_printf(m, "PP_DIR_BASE: 0x%08x\n", I915_READ(RING_PP_DIR_BASE(ring)));
+               seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n", I915_READ(RING_PP_DIR_BASE_READ(ring)));
+               seq_printf(m, "PP_DIR_DCLV: 0x%08x\n", I915_READ(RING_PP_DIR_DCLV(ring)));
+       }
+       if (dev_priv->mm.aliasing_ppgtt) {
+               struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
+
+               seq_printf(m, "aliasing PPGTT:\n");
+               seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd_offset);
+       }
+       seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
+       mutex_unlock(&dev->struct_mutex);
+
+       return 0;
+}
+
 static int
-i915_wedged_open(struct inode *inode,
-                struct file *filp)
+i915_debugfs_common_open(struct inode *inode,
+                        struct file *filp)
 {
        filp->private_data = inode->i_private;
        return 0;
@@ -1472,20 +1551,12 @@ i915_wedged_write(struct file *filp,
 
 static const struct file_operations i915_wedged_fops = {
        .owner = THIS_MODULE,
-       .open = i915_wedged_open,
+       .open = i915_debugfs_common_open,
        .read = i915_wedged_read,
        .write = i915_wedged_write,
        .llseek = default_llseek,
 };
 
-static int
-i915_max_freq_open(struct inode *inode,
-                  struct file *filp)
-{
-       filp->private_data = inode->i_private;
-       return 0;
-}
-
 static ssize_t
 i915_max_freq_read(struct file *filp,
                   char __user *ubuf,
@@ -1542,20 +1613,12 @@ i915_max_freq_write(struct file *filp,
 
 static const struct file_operations i915_max_freq_fops = {
        .owner = THIS_MODULE,
-       .open = i915_max_freq_open,
+       .open = i915_debugfs_common_open,
        .read = i915_max_freq_read,
        .write = i915_max_freq_write,
        .llseek = default_llseek,
 };
 
-static int
-i915_cache_sharing_open(struct inode *inode,
-                  struct file *filp)
-{
-       filp->private_data = inode->i_private;
-       return 0;
-}
-
 static ssize_t
 i915_cache_sharing_read(struct file *filp,
                   char __user *ubuf,
@@ -1621,7 +1684,7 @@ i915_cache_sharing_write(struct file *filp,
 
 static const struct file_operations i915_cache_sharing_fops = {
        .owner = THIS_MODULE,
-       .open = i915_cache_sharing_open,
+       .open = i915_debugfs_common_open,
        .read = i915_cache_sharing_read,
        .write = i915_cache_sharing_write,
        .llseek = default_llseek,
@@ -1653,21 +1716,6 @@ drm_add_fake_info_node(struct drm_minor *minor,
        return 0;
 }
 
-static int i915_wedged_create(struct dentry *root, struct drm_minor *minor)
-{
-       struct drm_device *dev = minor->dev;
-       struct dentry *ent;
-
-       ent = debugfs_create_file("i915_wedged",
-                                 S_IRUGO | S_IWUSR,
-                                 root, dev,
-                                 &i915_wedged_fops);
-       if (IS_ERR(ent))
-               return PTR_ERR(ent);
-
-       return drm_add_fake_info_node(minor, ent, &i915_wedged_fops);
-}
-
 static int i915_forcewake_open(struct inode *inode, struct file *file)
 {
        struct drm_device *dev = inode->i_private;
@@ -1729,34 +1777,22 @@ static int i915_forcewake_create(struct dentry *root, struct drm_minor *minor)
        return drm_add_fake_info_node(minor, ent, &i915_forcewake_fops);
 }
 
-static int i915_max_freq_create(struct dentry *root, struct drm_minor *minor)
+static int i915_debugfs_create(struct dentry *root,
+                              struct drm_minor *minor,
+                              const char *name,
+                              const struct file_operations *fops)
 {
        struct drm_device *dev = minor->dev;
        struct dentry *ent;
 
-       ent = debugfs_create_file("i915_max_freq",
+       ent = debugfs_create_file(name,
                                  S_IRUGO | S_IWUSR,
                                  root, dev,
-                                 &i915_max_freq_fops);
+                                 fops);
        if (IS_ERR(ent))
                return PTR_ERR(ent);
 
-       return drm_add_fake_info_node(minor, ent, &i915_max_freq_fops);
-}
-
-static int i915_cache_sharing_create(struct dentry *root, struct drm_minor *minor)
-{
-       struct drm_device *dev = minor->dev;
-       struct dentry *ent;
-
-       ent = debugfs_create_file("i915_cache_sharing",
-                                 S_IRUGO | S_IWUSR,
-                                 root, dev,
-                                 &i915_cache_sharing_fops);
-       if (IS_ERR(ent))
-               return PTR_ERR(ent);
-
-       return drm_add_fake_info_node(minor, ent, &i915_cache_sharing_fops);
+       return drm_add_fake_info_node(minor, ent, fops);
 }
 
 static struct drm_info_list i915_debugfs_list[] = {
@@ -1782,7 +1818,6 @@ static struct drm_info_list i915_debugfs_list[] = {
        {"i915_bsd_ringbuffer_info", i915_ringbuffer_info, 0, (void *)VCS},
        {"i915_blt_ringbuffer_data", i915_ringbuffer_data, 0, (void *)BCS},
        {"i915_blt_ringbuffer_info", i915_ringbuffer_info, 0, (void *)BCS},
-       {"i915_batchbuffers", i915_batchbuffer_info, 0},
        {"i915_error_state", i915_error_state, 0},
        {"i915_rstdby_delays", i915_rstdby_delays, 0},
        {"i915_cur_delayinfo", i915_cur_delayinfo, 0},
@@ -1798,6 +1833,8 @@ static struct drm_info_list i915_debugfs_list[] = {
        {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
        {"i915_context_status", i915_context_status, 0},
        {"i915_gen6_forcewake_count", i915_gen6_forcewake_count_info, 0},
+       {"i915_swizzle_info", i915_swizzle_info, 0},
+       {"i915_ppgtt_info", i915_ppgtt_info, 0},
 };
 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
 
@@ -1805,17 +1842,25 @@ int i915_debugfs_init(struct drm_minor *minor)
 {
        int ret;
 
-       ret = i915_wedged_create(minor->debugfs_root, minor);
+       ret = i915_debugfs_create(minor->debugfs_root, minor,
+                                 "i915_wedged",
+                                 &i915_wedged_fops);
        if (ret)
                return ret;
 
        ret = i915_forcewake_create(minor->debugfs_root, minor);
        if (ret)
                return ret;
-       ret = i915_max_freq_create(minor->debugfs_root, minor);
+
+       ret = i915_debugfs_create(minor->debugfs_root, minor,
+                                 "i915_max_freq",
+                                 &i915_max_freq_fops);
        if (ret)
                return ret;
-       ret = i915_cache_sharing_create(minor->debugfs_root, minor);
+
+       ret = i915_debugfs_create(minor->debugfs_root, minor,
+                                 "i915_cache_sharing",
+                                 &i915_cache_sharing_fops);
        if (ret)
                return ret;
 
index ddfe3d902b2a3a5d908b0c7664a4348f94ce3988..38dfcf91f400332af1ea9641699d375d9384c33f 100644 (file)
@@ -784,6 +784,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
        case I915_PARAM_HAS_GEN7_SOL_RESET:
                value = 1;
                break;
+       case I915_PARAM_HAS_LLC:
+               value = HAS_LLC(dev);
+               break;
        default:
                DRM_DEBUG_DRIVER("Unknown parameter %d\n",
                                 param->param);
@@ -1193,22 +1196,39 @@ static int i915_load_gem_init(struct drm_device *dev)
        /* Basic memrange allocator for stolen space */
        drm_mm_init(&dev_priv->mm.stolen, 0, prealloc_size);
 
-       /* Let GEM Manage all of the aperture.
-        *
-        * However, leave one page at the end still bound to the scratch page.
-        * There are a number of places where the hardware apparently
-        * prefetches past the end of the object, and we've seen multiple
-        * hangs with the GPU head pointer stuck in a batchbuffer bound
-        * at the last page of the aperture.  One page should be enough to
-        * keep any prefetching inside of the aperture.
-        */
-       i915_gem_do_init(dev, 0, mappable_size, gtt_size - PAGE_SIZE);
+       if (i915_enable_ppgtt && HAS_ALIASING_PPGTT(dev)) {
+               /* PPGTT pdes are stolen from global gtt ptes, so shrink the
+                * aperture accordingly when using aliasing ppgtt. */
+               gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
+               /* For paranoia keep the guard page in between. */
+               gtt_size -= PAGE_SIZE;
+
+               i915_gem_do_init(dev, 0, mappable_size, gtt_size);
+
+               ret = i915_gem_init_aliasing_ppgtt(dev);
+               if (ret)
+                       return ret;
+       } else {
+               /* Let GEM Manage all of the aperture.
+                *
+                * However, leave one page at the end still bound to the scratch
+                * page.  There are a number of places where the hardware
+                * apparently prefetches past the end of the object, and we've
+                * seen multiple hangs with the GPU head pointer stuck in a
+                * batchbuffer bound at the last page of the aperture.  One page
+                * should be enough to keep any prefetching inside of the
+                * aperture.
+                */
+               i915_gem_do_init(dev, 0, mappable_size, gtt_size - PAGE_SIZE);
+       }
 
        mutex_lock(&dev->struct_mutex);
-       ret = i915_gem_init_ringbuffer(dev);
+       ret = i915_gem_init_hw(dev);
        mutex_unlock(&dev->struct_mutex);
-       if (ret)
+       if (ret) {
+               i915_gem_cleanup_aliasing_ppgtt(dev);
                return ret;
+       }
 
        /* Try to set up FBC with a reasonable compressed buffer size */
        if (I915_HAS_FBC(dev) && i915_powersave) {
@@ -1295,6 +1315,7 @@ cleanup_gem:
        mutex_lock(&dev->struct_mutex);
        i915_gem_cleanup_ringbuffer(dev);
        mutex_unlock(&dev->struct_mutex);
+       i915_gem_cleanup_aliasing_ppgtt(dev);
 cleanup_vga_switcheroo:
        vga_switcheroo_unregister_client(dev->pdev);
 cleanup_vga_client:
@@ -2129,7 +2150,7 @@ int i915_driver_unload(struct drm_device *dev)
                unregister_shrinker(&dev_priv->mm.inactive_shrinker);
 
        mutex_lock(&dev->struct_mutex);
-       ret = i915_gpu_idle(dev);
+       ret = i915_gpu_idle(dev, true);
        if (ret)
                DRM_ERROR("failed to idle hardware: %d\n", ret);
        mutex_unlock(&dev->struct_mutex);
@@ -2182,6 +2203,7 @@ int i915_driver_unload(struct drm_device *dev)
                i915_gem_free_all_phys_object(dev);
                i915_gem_cleanup_ringbuffer(dev);
                mutex_unlock(&dev->struct_mutex);
+               i915_gem_cleanup_aliasing_ppgtt(dev);
                if (I915_HAS_FBC(dev) && i915_powersave)
                        i915_cleanup_compression(dev);
                drm_mm_takedown(&dev_priv->mm.stolen);
@@ -2247,18 +2269,12 @@ void i915_driver_lastclose(struct drm_device * dev)
 
        i915_gem_lastclose(dev);
 
-       if (dev_priv->agp_heap)
-               i915_mem_takedown(&(dev_priv->agp_heap));
-
        i915_dma_cleanup(dev);
 }
 
 void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
 {
-       drm_i915_private_t *dev_priv = dev->dev_private;
        i915_gem_release(dev, file_priv);
-       if (!drm_core_check_feature(dev, DRIVER_MODESET))
-               i915_mem_release(dev, file_priv, dev_priv->agp_heap);
 }
 
 void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
@@ -2277,11 +2293,11 @@ struct drm_ioctl_desc i915_ioctls[] = {
        DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH),
        DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH),
        DRM_IOCTL_DEF_DRV(I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF_DRV(I915_ALLOC, i915_mem_alloc, DRM_AUTH),
-       DRM_IOCTL_DEF_DRV(I915_FREE, i915_mem_free, DRM_AUTH),
-       DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, i915_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
        DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH),
-       DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP,  i915_mem_destroy_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
        DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE,  i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
        DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE,  i915_vblank_pipe_get, DRM_AUTH),
        DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
index 308f819135626c6b9c8d3805c953bd7a9ddf55dc..189041984aba1b3db287778f881c905703a3358f 100644 (file)
@@ -103,6 +103,11 @@ MODULE_PARM_DESC(enable_hangcheck,
                "WARNING: Disabling this can cause system wide hangs. "
                "(default: true)");
 
+bool i915_enable_ppgtt __read_mostly = 1;
+module_param_named(i915_enable_ppgtt, i915_enable_ppgtt, bool, 0600);
+MODULE_PARM_DESC(i915_enable_ppgtt,
+               "Enable PPGTT (default: true)");
+
 static struct drm_driver driver;
 extern int intel_agp_enabled;
 
@@ -198,7 +203,7 @@ static const struct intel_device_info intel_pineview_info = {
 
 static const struct intel_device_info intel_ironlake_d_info = {
        .gen = 5,
-       .need_gfx_hws = 1, .has_pipe_cxsr = 1, .has_hotplug = 1,
+       .need_gfx_hws = 1, .has_hotplug = 1,
        .has_bsd_ring = 1,
 };
 
@@ -214,6 +219,7 @@ static const struct intel_device_info intel_sandybridge_d_info = {
        .need_gfx_hws = 1, .has_hotplug = 1,
        .has_bsd_ring = 1,
        .has_blt_ring = 1,
+       .has_llc = 1,
 };
 
 static const struct intel_device_info intel_sandybridge_m_info = {
@@ -222,6 +228,7 @@ static const struct intel_device_info intel_sandybridge_m_info = {
        .has_fbc = 1,
        .has_bsd_ring = 1,
        .has_blt_ring = 1,
+       .has_llc = 1,
 };
 
 static const struct intel_device_info intel_ivybridge_d_info = {
@@ -229,6 +236,7 @@ static const struct intel_device_info intel_ivybridge_d_info = {
        .need_gfx_hws = 1, .has_hotplug = 1,
        .has_bsd_ring = 1,
        .has_blt_ring = 1,
+       .has_llc = 1,
 };
 
 static const struct intel_device_info intel_ivybridge_m_info = {
@@ -237,6 +245,7 @@ static const struct intel_device_info intel_ivybridge_m_info = {
        .has_fbc = 0,   /* FBC is not enabled on Ivybridge mobile yet */
        .has_bsd_ring = 1,
        .has_blt_ring = 1,
+       .has_llc = 1,
 };
 
 static const struct pci_device_id pciidlist[] = {              /* aka */
@@ -494,7 +503,7 @@ static int i915_drm_thaw(struct drm_device *dev)
                mutex_lock(&dev->struct_mutex);
                dev_priv->mm.suspended = 0;
 
-               error = i915_gem_init_ringbuffer(dev);
+               error = i915_gem_init_hw(dev);
                mutex_unlock(&dev->struct_mutex);
 
                if (HAS_PCH_SPLIT(dev))
@@ -633,7 +642,7 @@ static int gen6_do_reset(struct drm_device *dev, u8 flags)
 }
 
 /**
- * i965_reset - reset chip after a hang
+ * i915_reset - reset chip after a hang
  * @dev: drm device to reset
  * @flags: reset domains
  *
@@ -709,12 +718,16 @@ int i915_reset(struct drm_device *dev, u8 flags)
                        !dev_priv->mm.suspended) {
                dev_priv->mm.suspended = 0;
 
+               i915_gem_init_swizzling(dev);
+
                dev_priv->ring[RCS].init(&dev_priv->ring[RCS]);
                if (HAS_BSD(dev))
                    dev_priv->ring[VCS].init(&dev_priv->ring[VCS]);
                if (HAS_BLT(dev))
                    dev_priv->ring[BCS].init(&dev_priv->ring[BCS]);
 
+               i915_gem_init_ppgtt(dev);
+
                mutex_unlock(&dev->struct_mutex);
                drm_irq_uninstall(dev);
                drm_mode_config_reset(dev);
index 9689ca38b2b333f26c75e95421e773b3331bd81c..922aed33035d01143940227df2a5e77fdc49a5be 100644 (file)
@@ -135,6 +135,7 @@ struct drm_i915_fence_reg {
        struct list_head lru_list;
        struct drm_i915_gem_object *obj;
        uint32_t setup_seqno;
+       int pin_count;
 };
 
 struct sdvo_device_mapping {
@@ -152,26 +153,25 @@ struct drm_i915_error_state {
        u32 eir;
        u32 pgtbl_er;
        u32 pipestat[I915_MAX_PIPES];
-       u32 ipeir;
-       u32 ipehr;
-       u32 instdone;
-       u32 acthd;
+       u32 tail[I915_NUM_RINGS];
+       u32 head[I915_NUM_RINGS];
+       u32 ipeir[I915_NUM_RINGS];
+       u32 ipehr[I915_NUM_RINGS];
+       u32 instdone[I915_NUM_RINGS];
+       u32 acthd[I915_NUM_RINGS];
+       u32 semaphore_mboxes[I915_NUM_RINGS][I915_NUM_RINGS - 1];
+       /* our own tracking of ring head and tail */
+       u32 cpu_ring_head[I915_NUM_RINGS];
+       u32 cpu_ring_tail[I915_NUM_RINGS];
        u32 error; /* gen6+ */
-       u32 bcs_acthd; /* gen6+ blt engine */
-       u32 bcs_ipehr;
-       u32 bcs_ipeir;
-       u32 bcs_instdone;
-       u32 bcs_seqno;
-       u32 vcs_acthd; /* gen6+ bsd engine */
-       u32 vcs_ipehr;
-       u32 vcs_ipeir;
-       u32 vcs_instdone;
-       u32 vcs_seqno;
-       u32 instpm;
-       u32 instps;
+       u32 instpm[I915_NUM_RINGS];
+       u32 instps[I915_NUM_RINGS];
        u32 instdone1;
-       u32 seqno;
+       u32 seqno[I915_NUM_RINGS];
        u64 bbaddr;
+       u32 fault_reg[I915_NUM_RINGS];
+       u32 done_reg;
+       u32 faddr[I915_NUM_RINGS];
        u64 fence[I915_MAX_NUM_FENCES];
        struct timeval time;
        struct drm_i915_error_object {
@@ -255,6 +255,17 @@ struct intel_device_info {
        u8 supports_tv:1;
        u8 has_bsd_ring:1;
        u8 has_blt_ring:1;
+       u8 has_llc:1;
+};
+
+#define I915_PPGTT_PD_ENTRIES 512
+#define I915_PPGTT_PT_ENTRIES 1024
+struct i915_hw_ppgtt {
+       unsigned num_pd_entries;
+       struct page **pt_pages;
+       uint32_t pd_offset;
+       dma_addr_t *pt_dma_addr;
+       dma_addr_t scratch_page_dma_addr;
 };
 
 enum no_fbc_reason {
@@ -335,7 +346,6 @@ typedef struct drm_i915_private {
 
        int tex_lru_log_granularity;
        int allow_batchbuffer;
-       struct mem_block *agp_heap;
        unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds;
        int vblank_pipe;
        int num_pipe;
@@ -584,6 +594,9 @@ typedef struct drm_i915_private {
                struct io_mapping *gtt_mapping;
                int gtt_mtrr;
 
+               /** PPGTT used for aliasing the PPGTT with the GTT */
+               struct i915_hw_ppgtt *aliasing_ppgtt;
+
                struct shrinker inactive_shrinker;
 
                /**
@@ -841,6 +854,8 @@ struct drm_i915_gem_object {
 
        unsigned int cache_level:2;
 
+       unsigned int has_aliasing_ppgtt_mapping:1;
+
        struct page **pages;
 
        /**
@@ -974,8 +989,11 @@ struct drm_i915_file_private {
 
 #define HAS_BSD(dev)            (INTEL_INFO(dev)->has_bsd_ring)
 #define HAS_BLT(dev)            (INTEL_INFO(dev)->has_blt_ring)
+#define HAS_LLC(dev)            (INTEL_INFO(dev)->has_llc)
 #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
 
+#define HAS_ALIASING_PPGTT(dev)        (INTEL_INFO(dev)->gen >=6)
+
 #define HAS_OVERLAY(dev)               (INTEL_INFO(dev)->has_overlay)
 #define OVERLAY_NEEDS_PHYSICAL(dev)    (INTEL_INFO(dev)->overlay_needs_physical)
 
@@ -1018,6 +1036,7 @@ extern int i915_vbt_sdvo_panel_type __read_mostly;
 extern int i915_enable_rc6 __read_mostly;
 extern int i915_enable_fbc __read_mostly;
 extern bool i915_enable_hangcheck __read_mostly;
+extern bool i915_enable_ppgtt __read_mostly;
 
 extern int i915_suspend(struct drm_device *dev, pm_message_t state);
 extern int i915_resume(struct drm_device *dev);
@@ -1079,18 +1098,6 @@ extern void i915_destroy_error_state(struct drm_device *dev);
 #endif
 
 
-/* i915_mem.c */
-extern int i915_mem_alloc(struct drm_device *dev, void *data,
-                         struct drm_file *file_priv);
-extern int i915_mem_free(struct drm_device *dev, void *data,
-                        struct drm_file *file_priv);
-extern int i915_mem_init_heap(struct drm_device *dev, void *data,
-                             struct drm_file *file_priv);
-extern int i915_mem_destroy_heap(struct drm_device *dev, void *data,
-                                struct drm_file *file_priv);
-extern void i915_mem_takedown(struct mem_block **heap);
-extern void i915_mem_release(struct drm_device * dev,
-                            struct drm_file *file_priv, struct mem_block *heap);
 /* i915_gem.c */
 int i915_gem_init_ioctl(struct drm_device *dev, void *data,
                        struct drm_file *file_priv);
@@ -1181,6 +1188,24 @@ int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
                                           struct intel_ring_buffer *pipelined);
 int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
 
+static inline void
+i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
+{
+       if (obj->fence_reg != I915_FENCE_REG_NONE) {
+               struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+               dev_priv->fence_regs[obj->fence_reg].pin_count++;
+       }
+}
+
+static inline void
+i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
+{
+       if (obj->fence_reg != I915_FENCE_REG_NONE) {
+               struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+               dev_priv->fence_regs[obj->fence_reg].pin_count--;
+       }
+}
+
 void i915_gem_retire_requests(struct drm_device *dev);
 void i915_gem_reset(struct drm_device *dev);
 void i915_gem_clflush_object(struct drm_i915_gem_object *obj);
@@ -1188,19 +1213,22 @@ int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj,
                                            uint32_t read_domains,
                                            uint32_t write_domain);
 int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
-int __must_check i915_gem_init_ringbuffer(struct drm_device *dev);
+int __must_check i915_gem_init_hw(struct drm_device *dev);
+void i915_gem_init_swizzling(struct drm_device *dev);
+void i915_gem_init_ppgtt(struct drm_device *dev);
 void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
 void i915_gem_do_init(struct drm_device *dev,
                      unsigned long start,
                      unsigned long mappable_end,
                      unsigned long end);
-int __must_check i915_gpu_idle(struct drm_device *dev);
+int __must_check i915_gpu_idle(struct drm_device *dev, bool do_retire);
 int __must_check i915_gem_idle(struct drm_device *dev);
 int __must_check i915_add_request(struct intel_ring_buffer *ring,
                                  struct drm_file *file,
                                  struct drm_i915_gem_request *request);
 int __must_check i915_wait_request(struct intel_ring_buffer *ring,
-                                  uint32_t seqno);
+                                  uint32_t seqno,
+                                  bool do_retire);
 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
 int __must_check
 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
@@ -1227,6 +1255,14 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
                                    enum i915_cache_level cache_level);
 
 /* i915_gem_gtt.c */
+int __must_check i915_gem_init_aliasing_ppgtt(struct drm_device *dev);
+void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev);
+void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
+                           struct drm_i915_gem_object *obj,
+                           enum i915_cache_level cache_level);
+void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
+                             struct drm_i915_gem_object *obj);
+
 void i915_gem_restore_gtt_mappings(struct drm_device *dev);
 int __must_check i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj);
 void i915_gem_gtt_rebind_object(struct drm_i915_gem_object *obj,
index e55badb2d86ddc5d7f245b8e81343a358f5dc43c..f1193b194331dc16bbfbfa10830f62c6e357224b 100644 (file)
@@ -58,6 +58,7 @@ static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj);
 
 static int i915_gem_inactive_shrink(struct shrinker *shrinker,
                                    struct shrink_control *sc);
+static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
 
 /* some bookkeeping */
 static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
@@ -258,73 +259,6 @@ static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
                obj->tiling_mode != I915_TILING_NONE;
 }
 
-static inline void
-slow_shmem_copy(struct page *dst_page,
-               int dst_offset,
-               struct page *src_page,
-               int src_offset,
-               int length)
-{
-       char *dst_vaddr, *src_vaddr;
-
-       dst_vaddr = kmap(dst_page);
-       src_vaddr = kmap(src_page);
-
-       memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length);
-
-       kunmap(src_page);
-       kunmap(dst_page);
-}
-
-static inline void
-slow_shmem_bit17_copy(struct page *gpu_page,
-                     int gpu_offset,
-                     struct page *cpu_page,
-                     int cpu_offset,
-                     int length,
-                     int is_read)
-{
-       char *gpu_vaddr, *cpu_vaddr;
-
-       /* Use the unswizzled path if this page isn't affected. */
-       if ((page_to_phys(gpu_page) & (1 << 17)) == 0) {
-               if (is_read)
-                       return slow_shmem_copy(cpu_page, cpu_offset,
-                                              gpu_page, gpu_offset, length);
-               else
-                       return slow_shmem_copy(gpu_page, gpu_offset,
-                                              cpu_page, cpu_offset, length);
-       }
-
-       gpu_vaddr = kmap(gpu_page);
-       cpu_vaddr = kmap(cpu_page);
-
-       /* Copy the data, XORing A6 with A17 (1). The user already knows he's
-        * XORing with the other bits (A9 for Y, A9 and A10 for X)
-        */
-       while (length > 0) {
-               int cacheline_end = ALIGN(gpu_offset + 1, 64);
-               int this_length = min(cacheline_end - gpu_offset, length);
-               int swizzled_gpu_offset = gpu_offset ^ 64;
-
-               if (is_read) {
-                       memcpy(cpu_vaddr + cpu_offset,
-                              gpu_vaddr + swizzled_gpu_offset,
-                              this_length);
-               } else {
-                       memcpy(gpu_vaddr + swizzled_gpu_offset,
-                              cpu_vaddr + cpu_offset,
-                              this_length);
-               }
-               cpu_offset += this_length;
-               gpu_offset += this_length;
-               length -= this_length;
-       }
-
-       kunmap(cpu_page);
-       kunmap(gpu_page);
-}
-
 /**
  * This is the fast shmem pread path, which attempts to copy_from_user directly
  * from the backing pages of the object to the user's address space.  On a
@@ -385,6 +319,58 @@ i915_gem_shmem_pread_fast(struct drm_device *dev,
        return 0;
 }
 
+static inline int
+__copy_to_user_swizzled(char __user *cpu_vaddr,
+                       const char *gpu_vaddr, int gpu_offset,
+                       int length)
+{
+       int ret, cpu_offset = 0;
+
+       while (length > 0) {
+               int cacheline_end = ALIGN(gpu_offset + 1, 64);
+               int this_length = min(cacheline_end - gpu_offset, length);
+               int swizzled_gpu_offset = gpu_offset ^ 64;
+
+               ret = __copy_to_user(cpu_vaddr + cpu_offset,
+                                    gpu_vaddr + swizzled_gpu_offset,
+                                    this_length);
+               if (ret)
+                       return ret + length;
+
+               cpu_offset += this_length;
+               gpu_offset += this_length;
+               length -= this_length;
+       }
+
+       return 0;
+}
+
+static inline int
+__copy_from_user_swizzled(char __user *gpu_vaddr, int gpu_offset,
+                         const char *cpu_vaddr,
+                         int length)
+{
+       int ret, cpu_offset = 0;
+
+       while (length > 0) {
+               int cacheline_end = ALIGN(gpu_offset + 1, 64);
+               int this_length = min(cacheline_end - gpu_offset, length);
+               int swizzled_gpu_offset = gpu_offset ^ 64;
+
+               ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
+                                      cpu_vaddr + cpu_offset,
+                                      this_length);
+               if (ret)
+                       return ret + length;
+
+               cpu_offset += this_length;
+               gpu_offset += this_length;
+               length -= this_length;
+       }
+
+       return 0;
+}
+
 /**
  * This is the fallback shmem pread path, which allocates temporary storage
  * in kernel space to copy_to_user into outside of the struct_mutex, so we
@@ -398,72 +384,34 @@ i915_gem_shmem_pread_slow(struct drm_device *dev,
                          struct drm_file *file)
 {
        struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
-       struct mm_struct *mm = current->mm;
-       struct page **user_pages;
+       char __user *user_data;
        ssize_t remain;
-       loff_t offset, pinned_pages, i;
-       loff_t first_data_page, last_data_page, num_pages;
-       int shmem_page_offset;
-       int data_page_index, data_page_offset;
-       int page_length;
-       int ret;
-       uint64_t data_ptr = args->data_ptr;
-       int do_bit17_swizzling;
+       loff_t offset;
+       int shmem_page_offset, page_length, ret;
+       int obj_do_bit17_swizzling, page_do_bit17_swizzling;
 
+       user_data = (char __user *) (uintptr_t) args->data_ptr;
        remain = args->size;
 
-       /* Pin the user pages containing the data.  We can't fault while
-        * holding the struct mutex, yet we want to hold it while
-        * dereferencing the user data.
-        */
-       first_data_page = data_ptr / PAGE_SIZE;
-       last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
-       num_pages = last_data_page - first_data_page + 1;
+       obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
 
-       user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
-       if (user_pages == NULL)
-               return -ENOMEM;
+       offset = args->offset;
 
        mutex_unlock(&dev->struct_mutex);
-       down_read(&mm->mmap_sem);
-       pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
-                                     num_pages, 1, 0, user_pages, NULL);
-       up_read(&mm->mmap_sem);
-       mutex_lock(&dev->struct_mutex);
-       if (pinned_pages < num_pages) {
-               ret = -EFAULT;
-               goto out;
-       }
-
-       ret = i915_gem_object_set_cpu_read_domain_range(obj,
-                                                       args->offset,
-                                                       args->size);
-       if (ret)
-               goto out;
-
-       do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
-
-       offset = args->offset;
 
        while (remain > 0) {
                struct page *page;
+               char *vaddr;
 
                /* Operation in this page
                 *
                 * shmem_page_offset = offset within page in shmem file
-                * data_page_index = page number in get_user_pages return
-                * data_page_offset = offset with data_page_index page.
                 * page_length = bytes to copy for this page
                 */
                shmem_page_offset = offset_in_page(offset);
-               data_page_index = data_ptr / PAGE_SIZE - first_data_page;
-               data_page_offset = offset_in_page(data_ptr);
-
                page_length = remain;
                if ((shmem_page_offset + page_length) > PAGE_SIZE)
                        page_length = PAGE_SIZE - shmem_page_offset;
-               if ((data_page_offset + page_length) > PAGE_SIZE)
-                       page_length = PAGE_SIZE - data_page_offset;
 
                page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
                if (IS_ERR(page)) {
@@ -471,36 +419,38 @@ i915_gem_shmem_pread_slow(struct drm_device *dev,
                        goto out;
                }
 
-               if (do_bit17_swizzling) {
-                       slow_shmem_bit17_copy(page,
-                                             shmem_page_offset,
-                                             user_pages[data_page_index],
-                                             data_page_offset,
-                                             page_length,
-                                             1);
-               } else {
-                       slow_shmem_copy(user_pages[data_page_index],
-                                       data_page_offset,
-                                       page,
-                                       shmem_page_offset,
-                                       page_length);
-               }
+               page_do_bit17_swizzling = obj_do_bit17_swizzling &&
+                       (page_to_phys(page) & (1 << 17)) != 0;
+
+               vaddr = kmap(page);
+               if (page_do_bit17_swizzling)
+                       ret = __copy_to_user_swizzled(user_data,
+                                                     vaddr, shmem_page_offset,
+                                                     page_length);
+               else
+                       ret = __copy_to_user(user_data,
+                                            vaddr + shmem_page_offset,
+                                            page_length);
+               kunmap(page);
 
                mark_page_accessed(page);
                page_cache_release(page);
 
+               if (ret) {
+                       ret = -EFAULT;
+                       goto out;
+               }
+
                remain -= page_length;
-               data_ptr += page_length;
+               user_data += page_length;
                offset += page_length;
        }
 
 out:
-       for (i = 0; i < pinned_pages; i++) {
-               SetPageDirty(user_pages[i]);
-               mark_page_accessed(user_pages[i]);
-               page_cache_release(user_pages[i]);
-       }
-       drm_free_large(user_pages);
+       mutex_lock(&dev->struct_mutex);
+       /* Fixup: Kill any reinstated backing storage pages */
+       if (obj->madv == __I915_MADV_PURGED)
+               i915_gem_object_truncate(obj);
 
        return ret;
 }
@@ -841,71 +791,36 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev,
                           struct drm_file *file)
 {
        struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
-       struct mm_struct *mm = current->mm;
-       struct page **user_pages;
        ssize_t remain;
-       loff_t offset, pinned_pages, i;
-       loff_t first_data_page, last_data_page, num_pages;
-       int shmem_page_offset;
-       int data_page_index,  data_page_offset;
-       int page_length;
-       int ret;
-       uint64_t data_ptr = args->data_ptr;
-       int do_bit17_swizzling;
+       loff_t offset;
+       char __user *user_data;
+       int shmem_page_offset, page_length, ret;
+       int obj_do_bit17_swizzling, page_do_bit17_swizzling;
 
+       user_data = (char __user *) (uintptr_t) args->data_ptr;
        remain = args->size;
 
-       /* Pin the user pages containing the data.  We can't fault while
-        * holding the struct mutex, and all of the pwrite implementations
-        * want to hold it while dereferencing the user data.
-        */
-       first_data_page = data_ptr / PAGE_SIZE;
-       last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
-       num_pages = last_data_page - first_data_page + 1;
-
-       user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
-       if (user_pages == NULL)
-               return -ENOMEM;
-
-       mutex_unlock(&dev->struct_mutex);
-       down_read(&mm->mmap_sem);
-       pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
-                                     num_pages, 0, 0, user_pages, NULL);
-       up_read(&mm->mmap_sem);
-       mutex_lock(&dev->struct_mutex);
-       if (pinned_pages < num_pages) {
-               ret = -EFAULT;
-               goto out;
-       }
-
-       ret = i915_gem_object_set_to_cpu_domain(obj, 1);
-       if (ret)
-               goto out;
-
-       do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
+       obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
 
        offset = args->offset;
        obj->dirty = 1;
 
+       mutex_unlock(&dev->struct_mutex);
+
        while (remain > 0) {
                struct page *page;
+               char *vaddr;
 
                /* Operation in this page
                 *
                 * shmem_page_offset = offset within page in shmem file
-                * data_page_index = page number in get_user_pages return
-                * data_page_offset = offset with data_page_index page.
                 * page_length = bytes to copy for this page
                 */
                shmem_page_offset = offset_in_page(offset);
-               data_page_index = data_ptr / PAGE_SIZE - first_data_page;
-               data_page_offset = offset_in_page(data_ptr);
 
                page_length = remain;
                if ((shmem_page_offset + page_length) > PAGE_SIZE)
                        page_length = PAGE_SIZE - shmem_page_offset;
-               if ((data_page_offset + page_length) > PAGE_SIZE)
-                       page_length = PAGE_SIZE - data_page_offset;
 
                page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
                if (IS_ERR(page)) {
@@ -913,34 +828,45 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev,
                        goto out;
                }
 
-               if (do_bit17_swizzling) {
-                       slow_shmem_bit17_copy(page,
-                                             shmem_page_offset,
-                                             user_pages[data_page_index],
-                                             data_page_offset,
-                                             page_length,
-                                             0);
-               } else {
-                       slow_shmem_copy(page,
-                                       shmem_page_offset,
-                                       user_pages[data_page_index],
-                                       data_page_offset,
-                                       page_length);
-               }
+               page_do_bit17_swizzling = obj_do_bit17_swizzling &&
+                       (page_to_phys(page) & (1 << 17)) != 0;
+
+               vaddr = kmap(page);
+               if (page_do_bit17_swizzling)
+                       ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
+                                                       user_data,
+                                                       page_length);
+               else
+                       ret = __copy_from_user(vaddr + shmem_page_offset,
+                                              user_data,
+                                              page_length);
+               kunmap(page);
 
                set_page_dirty(page);
                mark_page_accessed(page);
                page_cache_release(page);
 
+               if (ret) {
+                       ret = -EFAULT;
+                       goto out;
+               }
+
                remain -= page_length;
-               data_ptr += page_length;
+               user_data += page_length;
                offset += page_length;
        }
 
 out:
-       for (i = 0; i < pinned_pages; i++)
-               page_cache_release(user_pages[i]);
-       drm_free_large(user_pages);
+       mutex_lock(&dev->struct_mutex);
+       /* Fixup: Kill any reinstated backing storage pages */
+       if (obj->madv == __I915_MADV_PURGED)
+               i915_gem_object_truncate(obj);
+       /* and flush dirty cachelines in case the object isn't in the cpu write
+        * domain anymore. */
+       if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
+               i915_gem_clflush_object(obj);
+               intel_gtt_chipset_flush();
+       }
 
        return ret;
 }
@@ -996,10 +922,13 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
         * pread/pwrite currently are reading and writing from the CPU
         * perspective, requiring manual detiling by the client.
         */
-       if (obj->phys_obj)
+       if (obj->phys_obj) {
                ret = i915_gem_phys_pwrite(dev, obj, args, file);
-       else if (obj->gtt_space &&
-                obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
+               goto out;
+       }
+
+       if (obj->gtt_space &&
+           obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
                ret = i915_gem_object_pin(obj, 0, true);
                if (ret)
                        goto out;
@@ -1018,18 +947,24 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
 
 out_unpin:
                i915_gem_object_unpin(obj);
-       } else {
-               ret = i915_gem_object_set_to_cpu_domain(obj, 1);
-               if (ret)
-                       goto out;
 
-               ret = -EFAULT;
-               if (!i915_gem_object_needs_bit17_swizzle(obj))
-                       ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file);
-               if (ret == -EFAULT)
-                       ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file);
+               if (ret != -EFAULT)
+                       goto out;
+               /* Fall through to the shmfs paths because the gtt paths might
+                * fail with non-page-backed user pointers (e.g. gtt mappings
+                * when moving data between textures). */
        }
 
+       ret = i915_gem_object_set_to_cpu_domain(obj, 1);
+       if (ret)
+               goto out;
+
+       ret = -EFAULT;
+       if (!i915_gem_object_needs_bit17_swizzle(obj))
+               ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file);
+       if (ret == -EFAULT)
+               ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file);
+
 out:
        drm_gem_object_unreference(&obj->base);
 unlock:
@@ -1141,7 +1076,6 @@ int
 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
                    struct drm_file *file)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_mmap *args = data;
        struct drm_gem_object *obj;
        unsigned long addr;
@@ -1153,11 +1087,6 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
        if (obj == NULL)
                return -ENOENT;
 
-       if (obj->size > dev_priv->mm.gtt_mappable_end) {
-               drm_gem_object_unreference_unlocked(obj);
-               return -E2BIG;
-       }
-
        down_write(&current->mm->mmap_sem);
        addr = do_mmap(obj->filp, 0, args->size,
                       PROT_READ | PROT_WRITE, MAP_SHARED,
@@ -1943,7 +1872,8 @@ i915_gem_retire_work_handler(struct work_struct *work)
  */
 int
 i915_wait_request(struct intel_ring_buffer *ring,
-                 uint32_t seqno)
+                 uint32_t seqno,
+                 bool do_retire)
 {
        drm_i915_private_t *dev_priv = ring->dev->dev_private;
        u32 ier;
@@ -2027,7 +1957,7 @@ i915_wait_request(struct intel_ring_buffer *ring,
         * buffer to have made it to the inactive list, and we would need
         * a separate wait queue to handle that.
         */
-       if (ret == 0)
+       if (ret == 0 && do_retire)
                i915_gem_retire_requests_ring(ring);
 
        return ret;
@@ -2051,7 +1981,8 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj)
         * it.
         */
        if (obj->active) {
-               ret = i915_wait_request(obj->ring, obj->last_rendering_seqno);
+               ret = i915_wait_request(obj->ring, obj->last_rendering_seqno,
+                                       true);
                if (ret)
                        return ret;
        }
@@ -2089,6 +2020,7 @@ static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
 int
 i915_gem_object_unbind(struct drm_i915_gem_object *obj)
 {
+       drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
        int ret = 0;
 
        if (obj->gtt_space == NULL)
@@ -2133,6 +2065,11 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
        trace_i915_gem_object_unbind(obj);
 
        i915_gem_gtt_unbind_object(obj);
+       if (obj->has_aliasing_ppgtt_mapping) {
+               i915_ppgtt_unbind_object(dev_priv->mm.aliasing_ppgtt, obj);
+               obj->has_aliasing_ppgtt_mapping = 0;
+       }
+
        i915_gem_object_put_pages_gtt(obj);
 
        list_del_init(&obj->gtt_list);
@@ -2172,7 +2109,7 @@ i915_gem_flush_ring(struct intel_ring_buffer *ring,
        return 0;
 }
 
-static int i915_ring_idle(struct intel_ring_buffer *ring)
+static int i915_ring_idle(struct intel_ring_buffer *ring, bool do_retire)
 {
        int ret;
 
@@ -2186,18 +2123,18 @@ static int i915_ring_idle(struct intel_ring_buffer *ring)
                        return ret;
        }
 
-       return i915_wait_request(ring, i915_gem_next_request_seqno(ring));
+       return i915_wait_request(ring, i915_gem_next_request_seqno(ring),
+                                do_retire);
 }
 
-int
-i915_gpu_idle(struct drm_device *dev)
+int i915_gpu_idle(struct drm_device *dev, bool do_retire)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
        int ret, i;
 
        /* Flush everything onto the inactive list. */
        for (i = 0; i < I915_NUM_RINGS; i++) {
-               ret = i915_ring_idle(&dev_priv->ring[i]);
+               ret = i915_ring_idle(&dev_priv->ring[i], do_retire);
                if (ret)
                        return ret;
        }
@@ -2400,7 +2337,8 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj,
                if (!ring_passed_seqno(obj->last_fenced_ring,
                                       obj->last_fenced_seqno)) {
                        ret = i915_wait_request(obj->last_fenced_ring,
-                                               obj->last_fenced_seqno);
+                                               obj->last_fenced_seqno,
+                                               true);
                        if (ret)
                                return ret;
                }
@@ -2432,6 +2370,8 @@ i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
 
        if (obj->fence_reg != I915_FENCE_REG_NONE) {
                struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+
+               WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count);
                i915_gem_clear_fence_reg(obj->base.dev,
                                         &dev_priv->fence_regs[obj->fence_reg]);
 
@@ -2456,7 +2396,7 @@ i915_find_fence_reg(struct drm_device *dev,
                if (!reg->obj)
                        return reg;
 
-               if (!reg->obj->pin_count)
+               if (!reg->pin_count)
                        avail = reg;
        }
 
@@ -2466,7 +2406,7 @@ i915_find_fence_reg(struct drm_device *dev,
        /* None available, try to steal one or wait for a user to finish */
        avail = first = NULL;
        list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
-               if (reg->obj->pin_count)
+               if (reg->pin_count)
                        continue;
 
                if (first == NULL)
@@ -2541,7 +2481,8 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
                                if (!ring_passed_seqno(obj->last_fenced_ring,
                                                       reg->setup_seqno)) {
                                        ret = i915_wait_request(obj->last_fenced_ring,
-                                                               reg->setup_seqno);
+                                                               reg->setup_seqno,
+                                                               true);
                                        if (ret)
                                                return ret;
                                }
@@ -2560,7 +2501,7 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
 
        reg = i915_find_fence_reg(dev, pipelined);
        if (reg == NULL)
-               return -ENOSPC;
+               return -EDEADLK;
 
        ret = i915_gem_object_flush_fence(obj, pipelined);
        if (ret)
@@ -2660,6 +2601,7 @@ i915_gem_clear_fence_reg(struct drm_device *dev,
        list_del_init(&reg->lru_list);
        reg->obj = NULL;
        reg->setup_seqno = 0;
+       reg->pin_count = 0;
 }
 
 /**
@@ -2946,6 +2888,8 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
                                    enum i915_cache_level cache_level)
 {
+       struct drm_device *dev = obj->base.dev;
+       drm_i915_private_t *dev_priv = dev->dev_private;
        int ret;
 
        if (obj->cache_level == cache_level)
@@ -2974,6 +2918,9 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
                }
 
                i915_gem_gtt_rebind_object(obj, cache_level);
+               if (obj->has_aliasing_ppgtt_mapping)
+                       i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
+                                              obj, cache_level);
        }
 
        if (cache_level == I915_CACHE_NONE) {
@@ -3619,8 +3566,8 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
        obj->base.write_domain = I915_GEM_DOMAIN_CPU;
        obj->base.read_domains = I915_GEM_DOMAIN_CPU;
 
-       if (IS_GEN6(dev) || IS_GEN7(dev)) {
-               /* On Gen6, we can have the GPU use the LLC (the CPU
+       if (HAS_LLC(dev)) {
+               /* On some devices, we can have the GPU use the LLC (the CPU
                 * cache) for about a 10% performance improvement
                 * compared to uncached.  Graphics requests other than
                 * display scanout are coherent with the CPU in
@@ -3710,7 +3657,7 @@ i915_gem_idle(struct drm_device *dev)
                return 0;
        }
 
-       ret = i915_gpu_idle(dev);
+       ret = i915_gpu_idle(dev, true);
        if (ret) {
                mutex_unlock(&dev->struct_mutex);
                return ret;
@@ -3745,12 +3692,71 @@ i915_gem_idle(struct drm_device *dev)
        return 0;
 }
 
+void i915_gem_init_swizzling(struct drm_device *dev)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+
+       if (INTEL_INFO(dev)->gen < 5 ||
+           dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
+               return;
+
+       I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
+                                DISP_TILE_SURFACE_SWIZZLING);
+
+       if (IS_GEN5(dev))
+               return;
+
+       I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
+       if (IS_GEN6(dev))
+               I915_WRITE(ARB_MODE, ARB_MODE_ENABLE(ARB_MODE_SWIZZLE_SNB));
+       else
+               I915_WRITE(ARB_MODE, ARB_MODE_ENABLE(ARB_MODE_SWIZZLE_IVB));
+}
+
+void i915_gem_init_ppgtt(struct drm_device *dev)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       uint32_t pd_offset;
+       struct intel_ring_buffer *ring;
+       int i;
+
+       if (!dev_priv->mm.aliasing_ppgtt)
+               return;
+
+       pd_offset = dev_priv->mm.aliasing_ppgtt->pd_offset;
+       pd_offset /= 64; /* in cachelines, */
+       pd_offset <<= 16;
+
+       if (INTEL_INFO(dev)->gen == 6) {
+               uint32_t ecochk = I915_READ(GAM_ECOCHK);
+               I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT |
+                                      ECOCHK_PPGTT_CACHE64B);
+               I915_WRITE(GFX_MODE, GFX_MODE_ENABLE(GFX_PPGTT_ENABLE));
+       } else if (INTEL_INFO(dev)->gen >= 7) {
+               I915_WRITE(GAM_ECOCHK, ECOCHK_PPGTT_CACHE64B);
+               /* GFX_MODE is per-ring on gen7+ */
+       }
+
+       for (i = 0; i < I915_NUM_RINGS; i++) {
+               ring = &dev_priv->ring[i];
+
+               if (INTEL_INFO(dev)->gen >= 7)
+                       I915_WRITE(RING_MODE_GEN7(ring),
+                                  GFX_MODE_ENABLE(GFX_PPGTT_ENABLE));
+
+               I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
+               I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset);
+       }
+}
+
 int
-i915_gem_init_ringbuffer(struct drm_device *dev)
+i915_gem_init_hw(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
        int ret;
 
+       i915_gem_init_swizzling(dev);
+
        ret = intel_init_render_ring_buffer(dev);
        if (ret)
                return ret;
@@ -3769,6 +3775,8 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
 
        dev_priv->next_seqno = 1;
 
+       i915_gem_init_ppgtt(dev);
+
        return 0;
 
 cleanup_bsd_ring:
@@ -3806,7 +3814,7 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
        mutex_lock(&dev->struct_mutex);
        dev_priv->mm.suspended = 0;
 
-       ret = i915_gem_init_ringbuffer(dev);
+       ret = i915_gem_init_hw(dev);
        if (ret != 0) {
                mutex_unlock(&dev->struct_mutex);
                return ret;
@@ -4201,7 +4209,7 @@ rescan:
                 * This has a dramatic impact to reduce the number of
                 * OOM-killer events whilst running the GPU aggressively.
                 */
-               if (i915_gpu_idle(dev) == 0)
+               if (i915_gpu_idle(dev, true) == 0)
                        goto rescan;
        }
        mutex_unlock(&dev->struct_mutex);
index ead5d00f91b043618f90457f10841b2a8505dbcc..097119caa36a55bfe28868016c364389695c796a 100644 (file)
@@ -195,7 +195,7 @@ i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only)
        trace_i915_gem_evict_everything(dev, purgeable_only);
 
        /* Flush everything (on to the inactive lists) and evict */
-       ret = i915_gpu_idle(dev);
+       ret = i915_gpu_idle(dev, true);
        if (ret)
                return ret;
 
index 65e1f0043f9df5564d63eb8e95d413f7b5f01513..81687af00893ca5c20edfe2256dd5b98da7fdf45 100644 (file)
@@ -203,9 +203,9 @@ i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
        cd->invalidate_domains |= invalidate_domains;
        cd->flush_domains |= flush_domains;
        if (flush_domains & I915_GEM_GPU_DOMAINS)
-               cd->flush_rings |= obj->ring->id;
+               cd->flush_rings |= intel_ring_flag(obj->ring);
        if (invalidate_domains & I915_GEM_GPU_DOMAINS)
-               cd->flush_rings |= ring->id;
+               cd->flush_rings |= intel_ring_flag(ring);
 }
 
 struct eb_objects {
@@ -287,14 +287,14 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
         * exec_object list, so it should have a GTT space bound by now.
         */
        if (unlikely(target_offset == 0)) {
-               DRM_ERROR("No GTT space found for object %d\n",
+               DRM_DEBUG("No GTT space found for object %d\n",
                          reloc->target_handle);
                return ret;
        }
 
        /* Validate that the target is in a valid r/w GPU domain */
        if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
-               DRM_ERROR("reloc with multiple write domains: "
+               DRM_DEBUG("reloc with multiple write domains: "
                          "obj %p target %d offset %d "
                          "read %08x write %08x",
                          obj, reloc->target_handle,
@@ -303,8 +303,9 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
                          reloc->write_domain);
                return ret;
        }
-       if (unlikely((reloc->write_domain | reloc->read_domains) & I915_GEM_DOMAIN_CPU)) {
-               DRM_ERROR("reloc with read/write CPU domains: "
+       if (unlikely((reloc->write_domain | reloc->read_domains)
+                    & ~I915_GEM_GPU_DOMAINS)) {
+               DRM_DEBUG("reloc with read/write non-GPU domains: "
                          "obj %p target %d offset %d "
                          "read %08x write %08x",
                          obj, reloc->target_handle,
@@ -315,7 +316,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
        }
        if (unlikely(reloc->write_domain && target_obj->pending_write_domain &&
                     reloc->write_domain != target_obj->pending_write_domain)) {
-               DRM_ERROR("Write domain conflict: "
+               DRM_DEBUG("Write domain conflict: "
                          "obj %p target %d offset %d "
                          "new %08x old %08x\n",
                          obj, reloc->target_handle,
@@ -336,7 +337,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
 
        /* Check that the relocation address is valid... */
        if (unlikely(reloc->offset > obj->base.size - 4)) {
-               DRM_ERROR("Relocation beyond object bounds: "
+               DRM_DEBUG("Relocation beyond object bounds: "
                          "obj %p target %d offset %d size %d.\n",
                          obj, reloc->target_handle,
                          (int) reloc->offset,
@@ -344,7 +345,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
                return ret;
        }
        if (unlikely(reloc->offset & 3)) {
-               DRM_ERROR("Relocation not 4-byte aligned: "
+               DRM_DEBUG("Relocation not 4-byte aligned: "
                          "obj %p target %d offset %d.\n",
                          obj, reloc->target_handle,
                          (int) reloc->offset);
@@ -461,11 +462,60 @@ i915_gem_execbuffer_relocate(struct drm_device *dev,
        return ret;
 }
 
+#define  __EXEC_OBJECT_HAS_FENCE (1<<31)
+
+static int
+pin_and_fence_object(struct drm_i915_gem_object *obj,
+                    struct intel_ring_buffer *ring)
+{
+       struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
+       bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
+       bool need_fence, need_mappable;
+       int ret;
+
+       need_fence =
+               has_fenced_gpu_access &&
+               entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
+               obj->tiling_mode != I915_TILING_NONE;
+       need_mappable =
+               entry->relocation_count ? true : need_fence;
+
+       ret = i915_gem_object_pin(obj, entry->alignment, need_mappable);
+       if (ret)
+               return ret;
+
+       if (has_fenced_gpu_access) {
+               if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
+                       if (obj->tiling_mode) {
+                               ret = i915_gem_object_get_fence(obj, ring);
+                               if (ret)
+                                       goto err_unpin;
+
+                               entry->flags |= __EXEC_OBJECT_HAS_FENCE;
+                               i915_gem_object_pin_fence(obj);
+                       } else {
+                               ret = i915_gem_object_put_fence(obj);
+                               if (ret)
+                                       goto err_unpin;
+                       }
+               }
+               obj->pending_fenced_gpu_access = need_fence;
+       }
+
+       entry->offset = obj->gtt_offset;
+       return 0;
+
+err_unpin:
+       i915_gem_object_unpin(obj);
+       return ret;
+}
+
 static int
 i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
                            struct drm_file *file,
                            struct list_head *objects)
 {
+       drm_i915_private_t *dev_priv = ring->dev->dev_private;
        struct drm_i915_gem_object *obj;
        int ret, retry;
        bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
@@ -518,6 +568,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
                list_for_each_entry(obj, objects, exec_list) {
                        struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
                        bool need_fence, need_mappable;
+
                        if (!obj->gtt_space)
                                continue;
 
@@ -532,58 +583,55 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
                            (need_mappable && !obj->map_and_fenceable))
                                ret = i915_gem_object_unbind(obj);
                        else
-                               ret = i915_gem_object_pin(obj,
-                                                         entry->alignment,
-                                                         need_mappable);
+                               ret = pin_and_fence_object(obj, ring);
                        if (ret)
                                goto err;
-
-                       entry++;
                }
 
                /* Bind fresh objects */
                list_for_each_entry(obj, objects, exec_list) {
-                       struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
-                       bool need_fence;
+                       if (obj->gtt_space)
+                               continue;
 
-                       need_fence =
-                               has_fenced_gpu_access &&
-                               entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
-                               obj->tiling_mode != I915_TILING_NONE;
+                       ret = pin_and_fence_object(obj, ring);
+                       if (ret) {
+                               int ret_ignore;
+
+                               /* This can potentially raise a harmless
+                                * -EINVAL if we failed to bind in the above
+                                * call. It cannot raise -EINTR since we know
+                                * that the bo is freshly bound and so will
+                                * not need to be flushed or waited upon.
+                                */
+                               ret_ignore = i915_gem_object_unbind(obj);
+                               (void)ret_ignore;
+                               WARN_ON(obj->gtt_space);
+                               break;
+                       }
+               }
 
-                       if (!obj->gtt_space) {
-                               bool need_mappable =
-                                       entry->relocation_count ? true : need_fence;
+               /* Decrement pin count for bound objects */
+               list_for_each_entry(obj, objects, exec_list) {
+                       struct drm_i915_gem_exec_object2 *entry;
 
-                               ret = i915_gem_object_pin(obj,
-                                                         entry->alignment,
-                                                         need_mappable);
-                               if (ret)
-                                       break;
-                       }
+                       if (!obj->gtt_space)
+                               continue;
 
-                       if (has_fenced_gpu_access) {
-                               if (need_fence) {
-                                       ret = i915_gem_object_get_fence(obj, ring);
-                                       if (ret)
-                                               break;
-                               } else if (entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
-                                          obj->tiling_mode == I915_TILING_NONE) {
-                                       /* XXX pipelined! */
-                                       ret = i915_gem_object_put_fence(obj);
-                                       if (ret)
-                                               break;
-                               }
-                               obj->pending_fenced_gpu_access = need_fence;
+                       entry = obj->exec_entry;
+                       if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
+                               i915_gem_object_unpin_fence(obj);
+                               entry->flags &= ~__EXEC_OBJECT_HAS_FENCE;
                        }
 
-                       entry->offset = obj->gtt_offset;
-               }
+                       i915_gem_object_unpin(obj);
 
-               /* Decrement pin count for bound objects */
-               list_for_each_entry(obj, objects, exec_list) {
-                       if (obj->gtt_space)
-                               i915_gem_object_unpin(obj);
+                       /* ... and ensure ppgtt mapping exist if needed. */
+                       if (dev_priv->mm.aliasing_ppgtt && !obj->has_aliasing_ppgtt_mapping) {
+                               i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
+                                                      obj, obj->cache_level);
+
+                               obj->has_aliasing_ppgtt_mapping = 1;
+                       }
                }
 
                if (ret != -ENOSPC || retry > 1)
@@ -600,16 +648,19 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
        } while (1);
 
 err:
-       obj = list_entry(obj->exec_list.prev,
-                        struct drm_i915_gem_object,
-                        exec_list);
-       while (objects != &obj->exec_list) {
-               if (obj->gtt_space)
-                       i915_gem_object_unpin(obj);
+       list_for_each_entry_continue_reverse(obj, objects, exec_list) {
+               struct drm_i915_gem_exec_object2 *entry;
+
+               if (!obj->gtt_space)
+                       continue;
+
+               entry = obj->exec_entry;
+               if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
+                       i915_gem_object_unpin_fence(obj);
+                       entry->flags &= ~__EXEC_OBJECT_HAS_FENCE;
+               }
 
-               obj = list_entry(obj->exec_list.prev,
-                                struct drm_i915_gem_object,
-                                exec_list);
+               i915_gem_object_unpin(obj);
        }
 
        return ret;
@@ -682,7 +733,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
                obj = to_intel_bo(drm_gem_object_lookup(dev, file,
                                                        exec[i].handle));
                if (&obj->base == NULL) {
-                       DRM_ERROR("Invalid object handle %d at index %d\n",
+                       DRM_DEBUG("Invalid object handle %d at index %d\n",
                                   exec[i].handle, i);
                        ret = -ENOENT;
                        goto err;
@@ -1013,7 +1064,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
        int ret, mode, i;
 
        if (!i915_gem_check_execbuffer(args)) {
-               DRM_ERROR("execbuf with invalid offset/length\n");
+               DRM_DEBUG("execbuf with invalid offset/length\n");
                return -EINVAL;
        }
 
@@ -1028,20 +1079,20 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                break;
        case I915_EXEC_BSD:
                if (!HAS_BSD(dev)) {
-                       DRM_ERROR("execbuf with invalid ring (BSD)\n");
+                       DRM_DEBUG("execbuf with invalid ring (BSD)\n");
                        return -EINVAL;
                }
                ring = &dev_priv->ring[VCS];
                break;
        case I915_EXEC_BLT:
                if (!HAS_BLT(dev)) {
-                       DRM_ERROR("execbuf with invalid ring (BLT)\n");
+                       DRM_DEBUG("execbuf with invalid ring (BLT)\n");
                        return -EINVAL;
                }
                ring = &dev_priv->ring[BCS];
                break;
        default:
-               DRM_ERROR("execbuf with unknown ring: %d\n",
+               DRM_DEBUG("execbuf with unknown ring: %d\n",
                          (int)(args->flags & I915_EXEC_RING_MASK));
                return -EINVAL;
        }
@@ -1067,18 +1118,18 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                }
                break;
        default:
-               DRM_ERROR("execbuf with unknown constants: %d\n", mode);
+               DRM_DEBUG("execbuf with unknown constants: %d\n", mode);
                return -EINVAL;
        }
 
        if (args->buffer_count < 1) {
-               DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
+               DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
                return -EINVAL;
        }
 
        if (args->num_cliprects != 0) {
                if (ring != &dev_priv->ring[RCS]) {
-                       DRM_ERROR("clip rectangles are only valid with the render ring\n");
+                       DRM_DEBUG("clip rectangles are only valid with the render ring\n");
                        return -EINVAL;
                }
 
@@ -1123,7 +1174,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                obj = to_intel_bo(drm_gem_object_lookup(dev, file,
                                                        exec[i].handle));
                if (&obj->base == NULL) {
-                       DRM_ERROR("Invalid object handle %d at index %d\n",
+                       DRM_DEBUG("Invalid object handle %d at index %d\n",
                                   exec[i].handle, i);
                        /* prevent error path from reading uninitialized data */
                        ret = -ENOENT;
@@ -1131,7 +1182,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                }
 
                if (!list_empty(&obj->exec_list)) {
-                       DRM_ERROR("Object %p [handle %d, index %d] appears more than once in object list\n",
+                       DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
                                   obj, exec[i].handle, i);
                        ret = -EINVAL;
                        goto err;
@@ -1169,7 +1220,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 
        /* Set the pending read domains for the batch buffer to COMMAND */
        if (batch_obj->base.pending_write_domain) {
-               DRM_ERROR("Attempting to use self-modifying batch buffer\n");
+               DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
                ret = -EINVAL;
                goto err;
        }
@@ -1186,7 +1237,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                         * so every billion or so execbuffers, we need to stall
                         * the GPU in order to reset the counters.
                         */
-                       ret = i915_gpu_idle(dev);
+                       ret = i915_gpu_idle(dev, true);
                        if (ret)
                                goto err;
 
@@ -1274,7 +1325,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
        int ret, i;
 
        if (args->buffer_count < 1) {
-               DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
+               DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
                return -EINVAL;
        }
 
@@ -1282,7 +1333,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
        exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
        exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
        if (exec_list == NULL || exec2_list == NULL) {
-               DRM_ERROR("Failed to allocate exec list for %d buffers\n",
+               DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
                          args->buffer_count);
                drm_free_large(exec_list);
                drm_free_large(exec2_list);
@@ -1293,7 +1344,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
                             (uintptr_t) args->buffers_ptr,
                             sizeof(*exec_list) * args->buffer_count);
        if (ret != 0) {
-               DRM_ERROR("copy %d exec entries failed %d\n",
+               DRM_DEBUG("copy %d exec entries failed %d\n",
                          args->buffer_count, ret);
                drm_free_large(exec_list);
                drm_free_large(exec2_list);
@@ -1334,7 +1385,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
                                   sizeof(*exec_list) * args->buffer_count);
                if (ret) {
                        ret = -EFAULT;
-                       DRM_ERROR("failed to copy %d exec entries "
+                       DRM_DEBUG("failed to copy %d exec entries "
                                  "back to user (%d)\n",
                                  args->buffer_count, ret);
                }
@@ -1354,7 +1405,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
        int ret;
 
        if (args->buffer_count < 1) {
-               DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count);
+               DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
                return -EINVAL;
        }
 
@@ -1364,7 +1415,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
                exec2_list = drm_malloc_ab(sizeof(*exec2_list),
                                           args->buffer_count);
        if (exec2_list == NULL) {
-               DRM_ERROR("Failed to allocate exec list for %d buffers\n",
+               DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
                          args->buffer_count);
                return -ENOMEM;
        }
@@ -1373,7 +1424,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
                             (uintptr_t) args->buffers_ptr,
                             sizeof(*exec2_list) * args->buffer_count);
        if (ret != 0) {
-               DRM_ERROR("copy %d exec entries failed %d\n",
+               DRM_DEBUG("copy %d exec entries failed %d\n",
                          args->buffer_count, ret);
                drm_free_large(exec2_list);
                return -EFAULT;
@@ -1388,7 +1439,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
                                   sizeof(*exec2_list) * args->buffer_count);
                if (ret) {
                        ret = -EFAULT;
-                       DRM_ERROR("failed to copy %d exec entries "
+                       DRM_DEBUG("failed to copy %d exec entries "
                                  "back to user (%d)\n",
                                  args->buffer_count, ret);
                }
index 6042c5e6d2785c500e5c1391318aab686c442fa2..2eacd78bb93be76fff278152f2dedeb4793b08df 100644 (file)
 #include "i915_trace.h"
 #include "intel_drv.h"
 
+/* PPGTT support for Sandybdrige/Gen6 and later */
+static void i915_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
+                                  unsigned first_entry,
+                                  unsigned num_entries)
+{
+       uint32_t *pt_vaddr;
+       uint32_t scratch_pte;
+       unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
+       unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
+       unsigned last_pte, i;
+
+       scratch_pte = GEN6_PTE_ADDR_ENCODE(ppgtt->scratch_page_dma_addr);
+       scratch_pte |= GEN6_PTE_VALID | GEN6_PTE_CACHE_LLC;
+
+       while (num_entries) {
+               last_pte = first_pte + num_entries;
+               if (last_pte > I915_PPGTT_PT_ENTRIES)
+                       last_pte = I915_PPGTT_PT_ENTRIES;
+
+               pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]);
+
+               for (i = first_pte; i < last_pte; i++)
+                       pt_vaddr[i] = scratch_pte;
+
+               kunmap_atomic(pt_vaddr);
+
+               num_entries -= last_pte - first_pte;
+               first_pte = 0;
+               act_pd++;
+       }
+}
+
+int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct i915_hw_ppgtt *ppgtt;
+       uint32_t pd_entry;
+       unsigned first_pd_entry_in_global_pt;
+       uint32_t __iomem *pd_addr;
+       int i;
+       int ret = -ENOMEM;
+
+       /* ppgtt PDEs reside in the global gtt pagetable, which has 512*1024
+        * entries. For aliasing ppgtt support we just steal them at the end for
+        * now. */
+       first_pd_entry_in_global_pt = 512*1024 - I915_PPGTT_PD_ENTRIES;
+
+       ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
+       if (!ppgtt)
+               return ret;
+
+       ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES;
+       ppgtt->pt_pages = kzalloc(sizeof(struct page *)*ppgtt->num_pd_entries,
+                                 GFP_KERNEL);
+       if (!ppgtt->pt_pages)
+               goto err_ppgtt;
+
+       for (i = 0; i < ppgtt->num_pd_entries; i++) {
+               ppgtt->pt_pages[i] = alloc_page(GFP_KERNEL);
+               if (!ppgtt->pt_pages[i])
+                       goto err_pt_alloc;
+       }
+
+       if (dev_priv->mm.gtt->needs_dmar) {
+               ppgtt->pt_dma_addr = kzalloc(sizeof(dma_addr_t)
+                                               *ppgtt->num_pd_entries,
+                                            GFP_KERNEL);
+               if (!ppgtt->pt_dma_addr)
+                       goto err_pt_alloc;
+       }
+
+       pd_addr = dev_priv->mm.gtt->gtt + first_pd_entry_in_global_pt;
+       for (i = 0; i < ppgtt->num_pd_entries; i++) {
+               dma_addr_t pt_addr;
+               if (dev_priv->mm.gtt->needs_dmar) {
+                       pt_addr = pci_map_page(dev->pdev, ppgtt->pt_pages[i],
+                                              0, 4096,
+                                              PCI_DMA_BIDIRECTIONAL);
+
+                       if (pci_dma_mapping_error(dev->pdev,
+                                                 pt_addr)) {
+                               ret = -EIO;
+                               goto err_pd_pin;
+
+                       }
+                       ppgtt->pt_dma_addr[i] = pt_addr;
+               } else
+                       pt_addr = page_to_phys(ppgtt->pt_pages[i]);
+
+               pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
+               pd_entry |= GEN6_PDE_VALID;
+
+               writel(pd_entry, pd_addr + i);
+       }
+       readl(pd_addr);
+
+       ppgtt->scratch_page_dma_addr = dev_priv->mm.gtt->scratch_page_dma;
+
+       i915_ppgtt_clear_range(ppgtt, 0,
+                              ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES);
+
+       ppgtt->pd_offset = (first_pd_entry_in_global_pt)*sizeof(uint32_t);
+
+       dev_priv->mm.aliasing_ppgtt = ppgtt;
+
+       return 0;
+
+err_pd_pin:
+       if (ppgtt->pt_dma_addr) {
+               for (i--; i >= 0; i--)
+                       pci_unmap_page(dev->pdev, ppgtt->pt_dma_addr[i],
+                                      4096, PCI_DMA_BIDIRECTIONAL);
+       }
+err_pt_alloc:
+       kfree(ppgtt->pt_dma_addr);
+       for (i = 0; i < ppgtt->num_pd_entries; i++) {
+               if (ppgtt->pt_pages[i])
+                       __free_page(ppgtt->pt_pages[i]);
+       }
+       kfree(ppgtt->pt_pages);
+err_ppgtt:
+       kfree(ppgtt);
+
+       return ret;
+}
+
+void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
+       int i;
+
+       if (!ppgtt)
+               return;
+
+       if (ppgtt->pt_dma_addr) {
+               for (i = 0; i < ppgtt->num_pd_entries; i++)
+                       pci_unmap_page(dev->pdev, ppgtt->pt_dma_addr[i],
+                                      4096, PCI_DMA_BIDIRECTIONAL);
+       }
+
+       kfree(ppgtt->pt_dma_addr);
+       for (i = 0; i < ppgtt->num_pd_entries; i++)
+               __free_page(ppgtt->pt_pages[i]);
+       kfree(ppgtt->pt_pages);
+       kfree(ppgtt);
+}
+
+static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt,
+                                        struct scatterlist *sg_list,
+                                        unsigned sg_len,
+                                        unsigned first_entry,
+                                        uint32_t pte_flags)
+{
+       uint32_t *pt_vaddr, pte;
+       unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
+       unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
+       unsigned i, j, m, segment_len;
+       dma_addr_t page_addr;
+       struct scatterlist *sg;
+
+       /* init sg walking */
+       sg = sg_list;
+       i = 0;
+       segment_len = sg_dma_len(sg) >> PAGE_SHIFT;
+       m = 0;
+
+       while (i < sg_len) {
+               pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]);
+
+               for (j = first_pte; j < I915_PPGTT_PT_ENTRIES; j++) {
+                       page_addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
+                       pte = GEN6_PTE_ADDR_ENCODE(page_addr);
+                       pt_vaddr[j] = pte | pte_flags;
+
+                       /* grab the next page */
+                       m++;
+                       if (m == segment_len) {
+                               sg = sg_next(sg);
+                               i++;
+                               if (i == sg_len)
+                                       break;
+
+                               segment_len = sg_dma_len(sg) >> PAGE_SHIFT;
+                               m = 0;
+                       }
+               }
+
+               kunmap_atomic(pt_vaddr);
+
+               first_pte = 0;
+               act_pd++;
+       }
+}
+
+static void i915_ppgtt_insert_pages(struct i915_hw_ppgtt *ppgtt,
+                                   unsigned first_entry, unsigned num_entries,
+                                   struct page **pages, uint32_t pte_flags)
+{
+       uint32_t *pt_vaddr, pte;
+       unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
+       unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
+       unsigned last_pte, i;
+       dma_addr_t page_addr;
+
+       while (num_entries) {
+               last_pte = first_pte + num_entries;
+               last_pte = min_t(unsigned, last_pte, I915_PPGTT_PT_ENTRIES);
+
+               pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]);
+
+               for (i = first_pte; i < last_pte; i++) {
+                       page_addr = page_to_phys(*pages);
+                       pte = GEN6_PTE_ADDR_ENCODE(page_addr);
+                       pt_vaddr[i] = pte | pte_flags;
+
+                       pages++;
+               }
+
+               kunmap_atomic(pt_vaddr);
+
+               num_entries -= last_pte - first_pte;
+               first_pte = 0;
+               act_pd++;
+       }
+}
+
+void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
+                           struct drm_i915_gem_object *obj,
+                           enum i915_cache_level cache_level)
+{
+       struct drm_device *dev = obj->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       uint32_t pte_flags = GEN6_PTE_VALID;
+
+       switch (cache_level) {
+       case I915_CACHE_LLC_MLC:
+               pte_flags |= GEN6_PTE_CACHE_LLC_MLC;
+               break;
+       case I915_CACHE_LLC:
+               pte_flags |= GEN6_PTE_CACHE_LLC;
+               break;
+       case I915_CACHE_NONE:
+               pte_flags |= GEN6_PTE_UNCACHED;
+               break;
+       default:
+               BUG();
+       }
+
+       if (dev_priv->mm.gtt->needs_dmar) {
+               BUG_ON(!obj->sg_list);
+
+               i915_ppgtt_insert_sg_entries(ppgtt,
+                                            obj->sg_list,
+                                            obj->num_sg,
+                                            obj->gtt_space->start >> PAGE_SHIFT,
+                                            pte_flags);
+       } else
+               i915_ppgtt_insert_pages(ppgtt,
+                                       obj->gtt_space->start >> PAGE_SHIFT,
+                                       obj->base.size >> PAGE_SHIFT,
+                                       obj->pages,
+                                       pte_flags);
+}
+
+void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
+                             struct drm_i915_gem_object *obj)
+{
+       i915_ppgtt_clear_range(ppgtt,
+                              obj->gtt_space->start >> PAGE_SHIFT,
+                              obj->base.size >> PAGE_SHIFT);
+}
+
 /* XXX kill agp_type! */
 static unsigned int cache_level_to_agp_type(struct drm_device *dev,
                                            enum i915_cache_level cache_level)
@@ -55,7 +328,7 @@ static bool do_idling(struct drm_i915_private *dev_priv)
 
        if (unlikely(dev_priv->mm.gtt->do_idle_maps)) {
                dev_priv->mm.interruptible = false;
-               if (i915_gpu_idle(dev_priv->dev)) {
+               if (i915_gpu_idle(dev_priv->dev, false)) {
                        DRM_ERROR("Couldn't idle GPU\n");
                        /* Wait a bit, in hopes it avoids the hang */
                        udelay(10);
index 31d334d9d9da8a3b78ef9edb8023d5c0ea6c47f3..1a93066659871eedfcc1e50668cfd82b1c7b4b63 100644 (file)
@@ -93,8 +93,23 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
        uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
 
        if (INTEL_INFO(dev)->gen >= 6) {
-               swizzle_x = I915_BIT_6_SWIZZLE_NONE;
-               swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+               uint32_t dimm_c0, dimm_c1;
+               dimm_c0 = I915_READ(MAD_DIMM_C0);
+               dimm_c1 = I915_READ(MAD_DIMM_C1);
+               dimm_c0 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
+               dimm_c1 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
+               /* Enable swizzling when the channels are populated with
+                * identically sized dimms. We don't need to check the 3rd
+                * channel because no cpu with gpu attached ships in that
+                * configuration. Also, swizzling only makes sense for 2
+                * channels anyway. */
+               if (dimm_c0 == dimm_c1) {
+                       swizzle_x = I915_BIT_6_SWIZZLE_9_10;
+                       swizzle_y = I915_BIT_6_SWIZZLE_9;
+               } else {
+                       swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+                       swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+               }
        } else if (IS_GEN5(dev)) {
                /* On Ironlake whatever DRAM config, GPU always do
                 * same swizzling setup.
@@ -107,10 +122,10 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
                 */
                swizzle_x = I915_BIT_6_SWIZZLE_NONE;
                swizzle_y = I915_BIT_6_SWIZZLE_NONE;
-       } else if (IS_MOBILE(dev)) {
+       } else if (IS_MOBILE(dev) || (IS_GEN3(dev) && !IS_G33(dev))) {
                uint32_t dcc;
 
-               /* On mobile 9xx chipsets, channel interleave by the CPU is
+               /* On 9xx chipsets, channel interleave by the CPU is
                 * determined by DCC.  For single-channel, neither the CPU
                 * nor the GPU do swizzling.  For dual channel interleaved,
                 * the GPU's interleave is bit 9 and 10 for X tiled, and bit
index 5bd4361ea84dd2e5e4a0e39a6af249ccd7786573..063b4577d4c6f30513282cbbc461090f019a3eca 100644 (file)
@@ -720,7 +720,6 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
        reloc_offset = src->gtt_offset;
        for (page = 0; page < page_count; page++) {
                unsigned long flags;
-               void __iomem *s;
                void *d;
 
                d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
@@ -728,10 +727,29 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
                        goto unwind;
 
                local_irq_save(flags);
-               s = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
-                                            reloc_offset);
-               memcpy_fromio(d, s, PAGE_SIZE);
-               io_mapping_unmap_atomic(s);
+               if (reloc_offset < dev_priv->mm.gtt_mappable_end) {
+                       void __iomem *s;
+
+                       /* Simply ignore tiling or any overlapping fence.
+                        * It's part of the error state, and this hopefully
+                        * captures what the GPU read.
+                        */
+
+                       s = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
+                                                    reloc_offset);
+                       memcpy_fromio(d, s, PAGE_SIZE);
+                       io_mapping_unmap_atomic(s);
+               } else {
+                       void *s;
+
+                       drm_clflush_pages(&src->pages[page], 1);
+
+                       s = kmap_atomic(src->pages[page]);
+                       memcpy(d, s, PAGE_SIZE);
+                       kunmap_atomic(s);
+
+                       drm_clflush_pages(&src->pages[page], 1);
+               }
                local_irq_restore(flags);
 
                dst->pages[page] = d;
@@ -804,7 +822,7 @@ static u32 capture_bo_list(struct drm_i915_error_buffer *err,
                err->tiling = obj->tiling_mode;
                err->dirty = obj->dirty;
                err->purgeable = obj->madv != I915_MADV_WILLNEED;
-               err->ring = obj->ring ? obj->ring->id : 0;
+               err->ring = obj->ring ? obj->ring->id : -1;
                err->cache_level = obj->cache_level;
 
                if (++i == count)
@@ -876,6 +894,46 @@ i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
        return NULL;
 }
 
+static void i915_record_ring_state(struct drm_device *dev,
+                                  struct drm_i915_error_state *error,
+                                  struct intel_ring_buffer *ring)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       if (INTEL_INFO(dev)->gen >= 6) {
+               error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
+               error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring));
+               error->semaphore_mboxes[ring->id][0]
+                       = I915_READ(RING_SYNC_0(ring->mmio_base));
+               error->semaphore_mboxes[ring->id][1]
+                       = I915_READ(RING_SYNC_1(ring->mmio_base));
+       }
+
+       if (INTEL_INFO(dev)->gen >= 4) {
+               error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base));
+               error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
+               error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
+               error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
+               if (ring->id == RCS) {
+                       error->instdone1 = I915_READ(INSTDONE1);
+                       error->bbaddr = I915_READ64(BB_ADDR);
+               }
+       } else {
+               error->ipeir[ring->id] = I915_READ(IPEIR);
+               error->ipehr[ring->id] = I915_READ(IPEHR);
+               error->instdone[ring->id] = I915_READ(INSTDONE);
+       }
+
+       error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
+       error->seqno[ring->id] = ring->get_seqno(ring);
+       error->acthd[ring->id] = intel_ring_get_active_head(ring);
+       error->head[ring->id] = I915_READ_HEAD(ring);
+       error->tail[ring->id] = I915_READ_TAIL(ring);
+
+       error->cpu_ring_head[ring->id] = ring->head;
+       error->cpu_ring_tail[ring->id] = ring->tail;
+}
+
 /**
  * i915_capture_error_state - capture an error record for later analysis
  * @dev: drm device
@@ -900,7 +958,7 @@ static void i915_capture_error_state(struct drm_device *dev)
                return;
 
        /* Account for pipe specific data like PIPE*STAT */
-       error = kmalloc(sizeof(*error), GFP_ATOMIC);
+       error = kzalloc(sizeof(*error), GFP_ATOMIC);
        if (!error) {
                DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
                return;
@@ -909,47 +967,22 @@ static void i915_capture_error_state(struct drm_device *dev)
        DRM_INFO("capturing error event; look for more information in /debug/dri/%d/i915_error_state\n",
                 dev->primary->index);
 
-       error->seqno = dev_priv->ring[RCS].get_seqno(&dev_priv->ring[RCS]);
        error->eir = I915_READ(EIR);
        error->pgtbl_er = I915_READ(PGTBL_ER);
        for_each_pipe(pipe)
                error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
-       error->instpm = I915_READ(INSTPM);
-       error->error = 0;
+
        if (INTEL_INFO(dev)->gen >= 6) {
                error->error = I915_READ(ERROR_GEN6);
-
-               error->bcs_acthd = I915_READ(BCS_ACTHD);
-               error->bcs_ipehr = I915_READ(BCS_IPEHR);
-               error->bcs_ipeir = I915_READ(BCS_IPEIR);
-               error->bcs_instdone = I915_READ(BCS_INSTDONE);
-               error->bcs_seqno = 0;
-               if (dev_priv->ring[BCS].get_seqno)
-                       error->bcs_seqno = dev_priv->ring[BCS].get_seqno(&dev_priv->ring[BCS]);
-
-               error->vcs_acthd = I915_READ(VCS_ACTHD);
-               error->vcs_ipehr = I915_READ(VCS_IPEHR);
-               error->vcs_ipeir = I915_READ(VCS_IPEIR);
-               error->vcs_instdone = I915_READ(VCS_INSTDONE);
-               error->vcs_seqno = 0;
-               if (dev_priv->ring[VCS].get_seqno)
-                       error->vcs_seqno = dev_priv->ring[VCS].get_seqno(&dev_priv->ring[VCS]);
-       }
-       if (INTEL_INFO(dev)->gen >= 4) {
-               error->ipeir = I915_READ(IPEIR_I965);
-               error->ipehr = I915_READ(IPEHR_I965);
-               error->instdone = I915_READ(INSTDONE_I965);
-               error->instps = I915_READ(INSTPS);
-               error->instdone1 = I915_READ(INSTDONE1);
-               error->acthd = I915_READ(ACTHD_I965);
-               error->bbaddr = I915_READ64(BB_ADDR);
-       } else {
-               error->ipeir = I915_READ(IPEIR);
-               error->ipehr = I915_READ(IPEHR);
-               error->instdone = I915_READ(INSTDONE);
-               error->acthd = I915_READ(ACTHD);
-               error->bbaddr = 0;
+               error->done_reg = I915_READ(DONE_REG);
        }
+
+       i915_record_ring_state(dev, error, &dev_priv->ring[RCS]);
+       if (HAS_BLT(dev))
+               i915_record_ring_state(dev, error, &dev_priv->ring[BCS]);
+       if (HAS_BSD(dev))
+               i915_record_ring_state(dev, error, &dev_priv->ring[VCS]);
+
        i915_gem_record_fences(dev, error);
 
        /* Record the active batch and ring buffers */
@@ -1017,11 +1050,12 @@ void i915_destroy_error_state(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_error_state *error;
+       unsigned long flags;
 
-       spin_lock(&dev_priv->error_lock);
+       spin_lock_irqsave(&dev_priv->error_lock, flags);
        error = dev_priv->first_error;
        dev_priv->first_error = NULL;
-       spin_unlock(&dev_priv->error_lock);
+       spin_unlock_irqrestore(&dev_priv->error_lock, flags);
 
        if (error)
                i915_error_state_free(dev, error);
@@ -1698,6 +1732,7 @@ void i915_hangcheck_elapsed(unsigned long data)
            dev_priv->last_instdone1 == instdone1) {
                if (dev_priv->hangcheck_count++ > 1) {
                        DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
+                       i915_handle_error(dev, true);
 
                        if (!IS_GEN2(dev)) {
                                /* Is the chip hanging on a WAIT_FOR_EVENT?
@@ -1705,7 +1740,6 @@ void i915_hangcheck_elapsed(unsigned long data)
                                 * and break the hang. This should work on
                                 * all but the second generation chipsets.
                                 */
-
                                if (kick_ring(&dev_priv->ring[RCS]))
                                        goto repeat;
 
@@ -1718,7 +1752,6 @@ void i915_hangcheck_elapsed(unsigned long data)
                                        goto repeat;
                        }
 
-                       i915_handle_error(dev, true);
                        return;
                }
        } else {
diff --git a/drivers/gpu/drm/i915/i915_mem.c b/drivers/gpu/drm/i915/i915_mem.c
deleted file mode 100644 (file)
index cc8f6d4..0000000
+++ /dev/null
@@ -1,387 +0,0 @@
-/* i915_mem.c -- Simple agp/fb memory manager for i915 -*- linux-c -*-
- */
-/*
- * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
- * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
- * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
- * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include "drmP.h"
-#include "drm.h"
-#include "i915_drm.h"
-#include "i915_drv.h"
-
-/* This memory manager is integrated into the global/local lru
- * mechanisms used by the clients.  Specifically, it operates by
- * setting the 'in_use' fields of the global LRU to indicate whether
- * this region is privately allocated to a client.
- *
- * This does require the client to actually respect that field.
- *
- * Currently no effort is made to allocate 'private' memory in any
- * clever way - the LRU information isn't used to determine which
- * block to allocate, and the ring is drained prior to allocations --
- * in other words allocation is expensive.
- */
-static void mark_block(struct drm_device * dev, struct mem_block *p, int in_use)
-{
-       drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
-       drm_i915_sarea_t *sarea_priv = master_priv->sarea_priv;
-       struct drm_tex_region *list;
-       unsigned shift, nr;
-       unsigned start;
-       unsigned end;
-       unsigned i;
-       int age;
-
-       shift = dev_priv->tex_lru_log_granularity;
-       nr = I915_NR_TEX_REGIONS;
-
-       start = p->start >> shift;
-       end = (p->start + p->size - 1) >> shift;
-
-       age = ++sarea_priv->texAge;
-       list = sarea_priv->texList;
-
-       /* Mark the regions with the new flag and update their age.  Move
-        * them to head of list to preserve LRU semantics.
-        */
-       for (i = start; i <= end; i++) {
-               list[i].in_use = in_use;
-               list[i].age = age;
-
-               /* remove_from_list(i)
-                */
-               list[(unsigned)list[i].next].prev = list[i].prev;
-               list[(unsigned)list[i].prev].next = list[i].next;
-
-               /* insert_at_head(list, i)
-                */
-               list[i].prev = nr;
-               list[i].next = list[nr].next;
-               list[(unsigned)list[nr].next].prev = i;
-               list[nr].next = i;
-       }
-}
-
-/* Very simple allocator for agp memory, working on a static range
- * already mapped into each client's address space.
- */
-
-static struct mem_block *split_block(struct mem_block *p, int start, int size,
-                                    struct drm_file *file_priv)
-{
-       /* Maybe cut off the start of an existing block */
-       if (start > p->start) {
-               struct mem_block *newblock = kmalloc(sizeof(*newblock),
-                                                    GFP_KERNEL);
-               if (!newblock)
-                       goto out;
-               newblock->start = start;
-               newblock->size = p->size - (start - p->start);
-               newblock->file_priv = NULL;
-               newblock->next = p->next;
-               newblock->prev = p;
-               p->next->prev = newblock;
-               p->next = newblock;
-               p->size -= newblock->size;
-               p = newblock;
-       }
-
-       /* Maybe cut off the end of an existing block */
-       if (size < p->size) {
-               struct mem_block *newblock = kmalloc(sizeof(*newblock),
-                                                    GFP_KERNEL);
-               if (!newblock)
-                       goto out;
-               newblock->start = start + size;
-               newblock->size = p->size - size;
-               newblock->file_priv = NULL;
-               newblock->next = p->next;
-               newblock->prev = p;
-               p->next->prev = newblock;
-               p->next = newblock;
-               p->size = size;
-       }
-
-      out:
-       /* Our block is in the middle */
-       p->file_priv = file_priv;
-       return p;
-}
-
-static struct mem_block *alloc_block(struct mem_block *heap, int size,
-                                    int align2, struct drm_file *file_priv)
-{
-       struct mem_block *p;
-       int mask = (1 << align2) - 1;
-
-       for (p = heap->next; p != heap; p = p->next) {
-               int start = (p->start + mask) & ~mask;
-               if (p->file_priv == NULL && start + size <= p->start + p->size)
-                       return split_block(p, start, size, file_priv);
-       }
-
-       return NULL;
-}
-
-static struct mem_block *find_block(struct mem_block *heap, int start)
-{
-       struct mem_block *p;
-
-       for (p = heap->next; p != heap; p = p->next)
-               if (p->start == start)
-                       return p;
-
-       return NULL;
-}
-
-static void free_block(struct mem_block *p)
-{
-       p->file_priv = NULL;
-
-       /* Assumes a single contiguous range.  Needs a special file_priv in
-        * 'heap' to stop it being subsumed.
-        */
-       if (p->next->file_priv == NULL) {
-               struct mem_block *q = p->next;
-               p->size += q->size;
-               p->next = q->next;
-               p->next->prev = p;
-               kfree(q);
-       }
-
-       if (p->prev->file_priv == NULL) {
-               struct mem_block *q = p->prev;
-               q->size += p->size;
-               q->next = p->next;
-               q->next->prev = q;
-               kfree(p);
-       }
-}
-
-/* Initialize.  How to check for an uninitialized heap?
- */
-static int init_heap(struct mem_block **heap, int start, int size)
-{
-       struct mem_block *blocks = kmalloc(sizeof(*blocks), GFP_KERNEL);
-
-       if (!blocks)
-               return -ENOMEM;
-
-       *heap = kmalloc(sizeof(**heap), GFP_KERNEL);
-       if (!*heap) {
-               kfree(blocks);
-               return -ENOMEM;
-       }
-
-       blocks->start = start;
-       blocks->size = size;
-       blocks->file_priv = NULL;
-       blocks->next = blocks->prev = *heap;
-
-       memset(*heap, 0, sizeof(**heap));
-       (*heap)->file_priv = (struct drm_file *) -1;
-       (*heap)->next = (*heap)->prev = blocks;
-       return 0;
-}
-
-/* Free all blocks associated with the releasing file.
- */
-void i915_mem_release(struct drm_device * dev, struct drm_file *file_priv,
-                     struct mem_block *heap)
-{
-       struct mem_block *p;
-
-       if (!heap || !heap->next)
-               return;
-
-       for (p = heap->next; p != heap; p = p->next) {
-               if (p->file_priv == file_priv) {
-                       p->file_priv = NULL;
-                       mark_block(dev, p, 0);
-               }
-       }
-
-       /* Assumes a single contiguous range.  Needs a special file_priv in
-        * 'heap' to stop it being subsumed.
-        */
-       for (p = heap->next; p != heap; p = p->next) {
-               while (p->file_priv == NULL && p->next->file_priv == NULL) {
-                       struct mem_block *q = p->next;
-                       p->size += q->size;
-                       p->next = q->next;
-                       p->next->prev = p;
-                       kfree(q);
-               }
-       }
-}
-
-/* Shutdown.
- */
-void i915_mem_takedown(struct mem_block **heap)
-{
-       struct mem_block *p;
-
-       if (!*heap)
-               return;
-
-       for (p = (*heap)->next; p != *heap;) {
-               struct mem_block *q = p;
-               p = p->next;
-               kfree(q);
-       }
-
-       kfree(*heap);
-       *heap = NULL;
-}
-
-static struct mem_block **get_heap(drm_i915_private_t * dev_priv, int region)
-{
-       switch (region) {
-       case I915_MEM_REGION_AGP:
-               return &dev_priv->agp_heap;
-       default:
-               return NULL;
-       }
-}
-
-/* IOCTL HANDLERS */
-
-int i915_mem_alloc(struct drm_device *dev, void *data,
-                  struct drm_file *file_priv)
-{
-       drm_i915_private_t *dev_priv = dev->dev_private;
-       drm_i915_mem_alloc_t *alloc = data;
-       struct mem_block *block, **heap;
-
-       if (!dev_priv) {
-               DRM_ERROR("called with no initialization\n");
-               return -EINVAL;
-       }
-
-       heap = get_heap(dev_priv, alloc->region);
-       if (!heap || !*heap)
-               return -EFAULT;
-
-       /* Make things easier on ourselves: all allocations at least
-        * 4k aligned.
-        */
-       if (alloc->alignment < 12)
-               alloc->alignment = 12;
-
-       block = alloc_block(*heap, alloc->size, alloc->alignment, file_priv);
-
-       if (!block)
-               return -ENOMEM;
-
-       mark_block(dev, block, 1);
-
-       if (DRM_COPY_TO_USER(alloc->region_offset, &block->start,
-                            sizeof(int))) {
-               DRM_ERROR("copy_to_user\n");
-               return -EFAULT;
-       }
-
-       return 0;
-}
-
-int i915_mem_free(struct drm_device *dev, void *data,
-                 struct drm_file *file_priv)
-{
-       drm_i915_private_t *dev_priv = dev->dev_private;
-       drm_i915_mem_free_t *memfree = data;
-       struct mem_block *block, **heap;
-
-       if (!dev_priv) {
-               DRM_ERROR("called with no initialization\n");
-               return -EINVAL;
-       }
-
-       heap = get_heap(dev_priv, memfree->region);
-       if (!heap || !*heap)
-               return -EFAULT;
-
-       block = find_block(*heap, memfree->region_offset);
-       if (!block)
-               return -EFAULT;
-
-       if (block->file_priv != file_priv)
-               return -EPERM;
-
-       mark_block(dev, block, 0);
-       free_block(block);
-       return 0;
-}
-
-int i915_mem_init_heap(struct drm_device *dev, void *data,
-                      struct drm_file *file_priv)
-{
-       drm_i915_private_t *dev_priv = dev->dev_private;
-       drm_i915_mem_init_heap_t *initheap = data;
-       struct mem_block **heap;
-
-       if (!dev_priv) {
-               DRM_ERROR("called with no initialization\n");
-               return -EINVAL;
-       }
-
-       heap = get_heap(dev_priv, initheap->region);
-       if (!heap)
-               return -EFAULT;
-
-       if (*heap) {
-               DRM_ERROR("heap already initialized?");
-               return -EFAULT;
-       }
-
-       return init_heap(heap, initheap->start, initheap->size);
-}
-
-int i915_mem_destroy_heap(struct drm_device *dev, void *data,
-                          struct drm_file *file_priv)
-{
-       drm_i915_private_t *dev_priv = dev->dev_private;
-       drm_i915_mem_destroy_heap_t *destroyheap = data;
-       struct mem_block **heap;
-
-       if (!dev_priv) {
-               DRM_ERROR("called with no initialization\n");
-               return -EINVAL;
-       }
-
-       heap = get_heap(dev_priv, destroyheap->region);
-       if (!heap) {
-               DRM_ERROR("get_heap failed");
-               return -EFAULT;
-       }
-
-       if (!*heap) {
-               DRM_ERROR("heap not initialized?");
-               return -EFAULT;
-       }
-
-       i915_mem_takedown(heap);
-       return 0;
-}
index c3afb783cb9d93bf7c79dd32a18d8627ed6bfd0d..341ce44e732dbeaed2ed2d5ffbc718b74a2f7b81 100644 (file)
 #define   GEN6_MBC_SNPCR_LOW   (2<<21)
 #define   GEN6_MBC_SNPCR_MIN   (3<<21) /* only 1/16th of the cache is shared */
 
+#define GEN6_MBCTL             0x0907c
+#define   GEN6_MBCTL_ENABLE_BOOT_FETCH (1 << 4)
+#define   GEN6_MBCTL_CTX_FETCH_NEEDED  (1 << 3)
+#define   GEN6_MBCTL_BME_UPDATE_ENABLE (1 << 2)
+#define   GEN6_MBCTL_MAE_UPDATE_ENABLE (1 << 1)
+#define   GEN6_MBCTL_BOOT_FETCH_MECH   (1 << 0)
+
 #define GEN6_GDRST     0x941c
 #define  GEN6_GRDOM_FULL               (1 << 0)
 #define  GEN6_GRDOM_RENDER             (1 << 1)
 #define  GEN6_GRDOM_MEDIA              (1 << 2)
 #define  GEN6_GRDOM_BLT                        (1 << 3)
 
+/* PPGTT stuff */
+#define GEN6_GTT_ADDR_ENCODE(addr)     ((addr) | (((addr) >> 28) & 0xff0))
+
+#define GEN6_PDE_VALID                 (1 << 0)
+#define GEN6_PDE_LARGE_PAGE            (2 << 0) /* use 32kb pages */
+/* gen6+ has bit 11-4 for physical addr bit 39-32 */
+#define GEN6_PDE_ADDR_ENCODE(addr)     GEN6_GTT_ADDR_ENCODE(addr)
+
+#define GEN6_PTE_VALID                 (1 << 0)
+#define GEN6_PTE_UNCACHED              (1 << 1)
+#define GEN6_PTE_CACHE_LLC             (2 << 1)
+#define GEN6_PTE_CACHE_LLC_MLC         (3 << 1)
+#define GEN6_PTE_CACHE_BITS            (3 << 1)
+#define GEN6_PTE_GFDT                  (1 << 3)
+#define GEN6_PTE_ADDR_ENCODE(addr)     GEN6_GTT_ADDR_ENCODE(addr)
+
+#define RING_PP_DIR_BASE(ring)         ((ring)->mmio_base+0x228)
+#define RING_PP_DIR_BASE_READ(ring)    ((ring)->mmio_base+0x518)
+#define RING_PP_DIR_DCLV(ring)         ((ring)->mmio_base+0x220)
+#define   PP_DIR_DCLV_2G               0xffffffff
+
+#define GAM_ECOCHK                     0x4090
+#define   ECOCHK_SNB_BIT               (1<<10)
+#define   ECOCHK_PPGTT_CACHE64B                (0x3<<3)
+#define   ECOCHK_PPGTT_CACHE4B         (0x0<<3)
+
 /* VGA stuff */
 
 #define VGA_ST01_MDA 0x3ba
 #define FENCE_REG_SANDYBRIDGE_0                0x100000
 #define   SANDYBRIDGE_FENCE_PITCH_SHIFT        32
 
+/* control register for cpu gtt access */
+#define TILECTL                                0x101000
+#define   TILECTL_SWZCTL                       (1 << 0)
+#define   TILECTL_TLB_PREFETCH_DIS     (1 << 2)
+#define   TILECTL_BACKSNOOP_DIS                (1 << 3)
+
 /*
  * Instruction and interrupt control regs
  */
 #define RING_MAX_IDLE(base)    ((base)+0x54)
 #define RING_HWS_PGA(base)     ((base)+0x80)
 #define RING_HWS_PGA_GEN6(base)        ((base)+0x2080)
+#define ARB_MODE               0x04030
+#define   ARB_MODE_SWIZZLE_SNB (1<<4)
+#define   ARB_MODE_SWIZZLE_IVB (1<<5)
+#define   ARB_MODE_ENABLE(x)   GFX_MODE_ENABLE(x)
+#define   ARB_MODE_DISABLE(x)  GFX_MODE_DISABLE(x)
 #define RENDER_HWS_PGA_GEN7    (0x04080)
+#define RING_FAULT_REG(ring)   (0x4094 + 0x100*(ring)->id)
+#define DONE_REG               0x40b0
 #define BSD_HWS_PGA_GEN7       (0x04180)
 #define BLT_HWS_PGA_GEN7       (0x04280)
 #define RING_ACTHD(base)       ((base)+0x74)
 #define IPEIR_I965     0x02064
 #define IPEHR_I965     0x02068
 #define INSTDONE_I965  0x0206c
+#define RING_IPEIR(base)       ((base)+0x64)
+#define RING_IPEHR(base)       ((base)+0x68)
+#define RING_INSTDONE(base)    ((base)+0x6c)
+#define RING_INSTPS(base)      ((base)+0x70)
+#define RING_DMA_FADD(base)    ((base)+0x78)
+#define RING_INSTPM(base)      ((base)+0xc0)
 #define INSTPS         0x02070 /* 965+ only */
 #define INSTDONE1      0x0207c /* 965+ only */
 #define ACTHD_I965     0x02074
 #define INSTDONE       0x02090
 #define NOPID          0x02094
 #define HWSTAM         0x02098
-#define VCS_INSTDONE   0x1206C
-#define VCS_IPEIR      0x12064
-#define VCS_IPEHR      0x12068
-#define VCS_ACTHD      0x12074
-#define BCS_INSTDONE   0x2206C
-#define BCS_IPEIR      0x22064
-#define BCS_IPEHR      0x22068
-#define BCS_ACTHD      0x22074
 
 #define ERROR_GEN6     0x040a0
 
 
 #define MI_MODE                0x0209c
 # define VS_TIMER_DISPATCH                             (1 << 6)
-# define MI_FLUSH_ENABLE                               (1 << 11)
+# define MI_FLUSH_ENABLE                               (1 << 12)
 
 #define GFX_MODE       0x02520
 #define GFX_MODE_GEN7  0x0229c
+#define RING_MODE_GEN7(ring)   ((ring)->mmio_base+0x29c)
 #define   GFX_RUN_LIST_ENABLE          (1<<15)
 #define   GFX_TLB_INVALIDATE_ALWAYS    (1<<13)
 #define   GFX_SURFACE_FAULT_ENABLE     (1<<12)
 #define C0DRB3                 0x10206
 #define C1DRB3                 0x10606
 
+/** snb MCH registers for reading the DRAM channel configuration */
+#define MAD_DIMM_C0                    (MCHBAR_MIRROR_BASE_SNB + 0x5004)
+#define MAD_DIMM_C1                    (MCHBAR_MIRROR_BASE_SNB + 0x5008)
+#define MAD_DIMM_C2                    (MCHBAR_MIRROR_BASE_SNB + 0x500C)
+#define   MAD_DIMM_ECC_MASK            (0x3 << 24)
+#define   MAD_DIMM_ECC_OFF             (0x0 << 24)
+#define   MAD_DIMM_ECC_IO_ON_LOGIC_OFF (0x1 << 24)
+#define   MAD_DIMM_ECC_IO_OFF_LOGIC_ON (0x2 << 24)
+#define   MAD_DIMM_ECC_ON              (0x3 << 24)
+#define   MAD_DIMM_ENH_INTERLEAVE      (0x1 << 22)
+#define   MAD_DIMM_RANK_INTERLEAVE     (0x1 << 21)
+#define   MAD_DIMM_B_WIDTH_X16         (0x1 << 20) /* X8 chips if unset */
+#define   MAD_DIMM_A_WIDTH_X16         (0x1 << 19) /* X8 chips if unset */
+#define   MAD_DIMM_B_DUAL_RANK         (0x1 << 18)
+#define   MAD_DIMM_A_DUAL_RANK         (0x1 << 17)
+#define   MAD_DIMM_A_SELECT            (0x1 << 16)
+/* DIMM sizes are in multiples of 256mb. */
+#define   MAD_DIMM_B_SIZE_SHIFT                8
+#define   MAD_DIMM_B_SIZE_MASK         (0xff << MAD_DIMM_B_SIZE_SHIFT)
+#define   MAD_DIMM_A_SIZE_SHIFT                0
+#define   MAD_DIMM_A_SIZE_MASK         (0xff << MAD_DIMM_A_SIZE_SHIFT)
+
+
 /* Clocking configuration register */
 #define CLKCFG                 0x10c00
 #define CLKCFG_FSB_400                                 (5 << 0)        /* hrawclk 100 */
  */
 #define GEN7_SO_WRITE_OFFSET(n)                (0x5280 + (n) * 4)
 
+#define IBX_AUD_CONFIG_A                       0xe2000
+#define CPT_AUD_CONFIG_A                       0xe5000
+#define   AUD_CONFIG_N_VALUE_INDEX             (1 << 29)
+#define   AUD_CONFIG_N_PROG_ENABLE             (1 << 28)
+#define   AUD_CONFIG_UPPER_N_SHIFT             20
+#define   AUD_CONFIG_UPPER_N_VALUE             (0xff << 20)
+#define   AUD_CONFIG_LOWER_N_SHIFT             4
+#define   AUD_CONFIG_LOWER_N_VALUE             (0xfff << 4)
+#define   AUD_CONFIG_PIXEL_CLOCK_HDMI_SHIFT    16
+#define   AUD_CONFIG_PIXEL_CLOCK_HDMI          (0xf << 16)
+#define   AUD_CONFIG_DISABLE_NCTS              (1 << 3)
+
 #endif /* _I915_REG_H_ */
index cb912106d1a2c03bd158156e89c4eb8b07b255f6..bae3edf956a444f1e39e30e39867db75ef33303d 100644 (file)
@@ -208,7 +208,7 @@ static bool intel_dsm_pci_probe(struct pci_dev *pdev)
 
        ret = intel_dsm(dhandle, INTEL_DSM_FN_SUPPORTED_FUNCTIONS, 0);
        if (ret < 0) {
-               DRM_ERROR("failed to get supported _DSM functions\n");
+               DRM_DEBUG_KMS("failed to get supported _DSM functions\n");
                return false;
        }
 
index 63880e2e5cfd4c2028f9933081606b57e2d5380f..50656339d922c31f17dafbf97890e98b7028d14e 100644 (file)
@@ -572,7 +572,7 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
                DRM_DEBUG_KMS("no child dev is parsed from VBT\n");
                return;
        }
-       dev_priv->child_dev = kzalloc(sizeof(*p_child) * count, GFP_KERNEL);
+       dev_priv->child_dev = kcalloc(count, sizeof(*p_child), GFP_KERNEL);
        if (!dev_priv->child_dev) {
                DRM_DEBUG_KMS("No memory space for child device\n");
                return;
index 00fbff5ddd814b3ae73a4f7abf936274f3b06111..efe56a2c4f4b88be123078a3be8501e1e1e74d85 100644 (file)
@@ -75,7 +75,7 @@ struct intel_limit {
        intel_range_t   dot, vco, n, m, m1, m2, p, p1;
        intel_p2_t          p2;
        bool (* find_pll)(const intel_limit_t *, struct drm_crtc *,
-                       int, int, intel_clock_t *);
+                       int, int, intel_clock_t *, intel_clock_t *);
 };
 
 /* FDI */
@@ -83,17 +83,21 @@ struct intel_limit {
 
 static bool
 intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
-                   int target, int refclk, intel_clock_t *best_clock);
+                   int target, int refclk, intel_clock_t *match_clock,
+                   intel_clock_t *best_clock);
 static bool
 intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
-                       int target, int refclk, intel_clock_t *best_clock);
+                       int target, int refclk, intel_clock_t *match_clock,
+                       intel_clock_t *best_clock);
 
 static bool
 intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc,
-                     int target, int refclk, intel_clock_t *best_clock);
+                     int target, int refclk, intel_clock_t *match_clock,
+                     intel_clock_t *best_clock);
 static bool
 intel_find_pll_ironlake_dp(const intel_limit_t *, struct drm_crtc *crtc,
-                          int target, int refclk, intel_clock_t *best_clock);
+                          int target, int refclk, intel_clock_t *match_clock,
+                          intel_clock_t *best_clock);
 
 static inline u32 /* units of 100MHz */
 intel_fdi_link_freq(struct drm_device *dev)
@@ -515,7 +519,8 @@ static bool intel_PLL_is_valid(struct drm_device *dev,
 
 static bool
 intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
-                   int target, int refclk, intel_clock_t *best_clock)
+                   int target, int refclk, intel_clock_t *match_clock,
+                   intel_clock_t *best_clock)
 
 {
        struct drm_device *dev = crtc->dev;
@@ -562,6 +567,9 @@ intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
                                        if (!intel_PLL_is_valid(dev, limit,
                                                                &clock))
                                                continue;
+                                       if (match_clock &&
+                                           clock.p != match_clock->p)
+                                               continue;
 
                                        this_err = abs(clock.dot - target);
                                        if (this_err < err) {
@@ -578,7 +586,8 @@ intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
 
 static bool
 intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
-                       int target, int refclk, intel_clock_t *best_clock)
+                       int target, int refclk, intel_clock_t *match_clock,
+                       intel_clock_t *best_clock)
 {
        struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -625,6 +634,9 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
                                        if (!intel_PLL_is_valid(dev, limit,
                                                                &clock))
                                                continue;
+                                       if (match_clock &&
+                                           clock.p != match_clock->p)
+                                               continue;
 
                                        this_err = abs(clock.dot - target);
                                        if (this_err < err_most) {
@@ -642,7 +654,8 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
 
 static bool
 intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
-                          int target, int refclk, intel_clock_t *best_clock)
+                          int target, int refclk, intel_clock_t *match_clock,
+                          intel_clock_t *best_clock)
 {
        struct drm_device *dev = crtc->dev;
        intel_clock_t clock;
@@ -668,7 +681,8 @@ intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
 /* DisplayPort has only two frequencies, 162MHz and 270MHz */
 static bool
 intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
-                     int target, int refclk, intel_clock_t *best_clock)
+                     int target, int refclk, intel_clock_t *match_clock,
+                     intel_clock_t *best_clock)
 {
        intel_clock_t clock;
        if (target < 200000) {
@@ -922,6 +936,10 @@ void assert_pipe(struct drm_i915_private *dev_priv,
        u32 val;
        bool cur_state;
 
+       /* if we need the pipe A quirk it must be always on */
+       if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
+               state = true;
+
        reg = PIPECONF(pipe);
        val = I915_READ(reg);
        cur_state = !!(val & PIPECONF_ENABLE);
@@ -930,19 +948,24 @@ void assert_pipe(struct drm_i915_private *dev_priv,
             pipe_name(pipe), state_string(state), state_string(cur_state));
 }
 
-static void assert_plane_enabled(struct drm_i915_private *dev_priv,
-                                enum plane plane)
+static void assert_plane(struct drm_i915_private *dev_priv,
+                        enum plane plane, bool state)
 {
        int reg;
        u32 val;
+       bool cur_state;
 
        reg = DSPCNTR(plane);
        val = I915_READ(reg);
-       WARN(!(val & DISPLAY_PLANE_ENABLE),
-            "plane %c assertion failure, should be active but is disabled\n",
-            plane_name(plane));
+       cur_state = !!(val & DISPLAY_PLANE_ENABLE);
+       WARN(cur_state != state,
+            "plane %c assertion failure (expected %s, current %s)\n",
+            plane_name(plane), state_string(state), state_string(cur_state));
 }
 
+#define assert_plane_enabled(d, p) assert_plane(d, p, true)
+#define assert_plane_disabled(d, p) assert_plane(d, p, false)
+
 static void assert_planes_disabled(struct drm_i915_private *dev_priv,
                                   enum pipe pipe)
 {
@@ -951,8 +974,14 @@ static void assert_planes_disabled(struct drm_i915_private *dev_priv,
        int cur_pipe;
 
        /* Planes are fixed to pipes on ILK+ */
-       if (HAS_PCH_SPLIT(dev_priv->dev))
+       if (HAS_PCH_SPLIT(dev_priv->dev)) {
+               reg = DSPCNTR(pipe);
+               val = I915_READ(reg);
+               WARN((val & DISPLAY_PLANE_ENABLE),
+                    "plane %c assertion failure, should be disabled but not\n",
+                    plane_name(pipe));
                return;
+       }
 
        /* Need to check both planes against the pipe */
        for (i = 0; i < 2; i++) {
@@ -1071,7 +1100,7 @@ static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
 {
        u32 val = I915_READ(reg);
        WARN(hdmi_pipe_enabled(dev_priv, val, pipe),
-            "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
+            "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
             reg, pipe_name(pipe));
 }
 
@@ -2012,6 +2041,8 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
                ret = i915_gem_object_get_fence(obj, pipelined);
                if (ret)
                        goto err_unpin;
+
+               i915_gem_object_pin_fence(obj);
        }
 
        dev_priv->mm.interruptible = true;
@@ -2024,6 +2055,12 @@ err_interruptible:
        return ret;
 }
 
+void intel_unpin_fb_obj(struct drm_i915_gem_object *obj)
+{
+       i915_gem_object_unpin_fence(obj);
+       i915_gem_object_unpin(obj);
+}
+
 static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
                             int x, int y)
 {
@@ -2255,7 +2292,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
        ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y,
                                         LEAVE_ATOMIC_MODE_SET);
        if (ret) {
-               i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
+               intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj);
                mutex_unlock(&dev->struct_mutex);
                DRM_ERROR("failed to update base address\n");
                return ret;
@@ -2263,7 +2300,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
 
        if (old_fb) {
                intel_wait_for_vblank(dev, intel_crtc->pipe);
-               i915_gem_object_unpin(to_intel_framebuffer(old_fb)->obj);
+               intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj);
        }
 
        mutex_unlock(&dev->struct_mutex);
@@ -3321,10 +3358,12 @@ static void intel_crtc_disable(struct drm_crtc *crtc)
        struct drm_device *dev = crtc->dev;
 
        crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
+       assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane);
+       assert_pipe_disabled(dev->dev_private, to_intel_crtc(crtc)->pipe);
 
        if (crtc->fb) {
                mutex_lock(&dev->struct_mutex);
-               i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
+               intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj);
                mutex_unlock(&dev->struct_mutex);
        }
 }
@@ -4521,6 +4560,7 @@ void sandybridge_update_wm(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        int latency = SNB_READ_WM0_LATENCY() * 100;     /* In unit 0.1us */
+       u32 val;
        int fbc_wm, plane_wm, cursor_wm;
        unsigned int enabled;
 
@@ -4529,8 +4569,10 @@ void sandybridge_update_wm(struct drm_device *dev)
                            &sandybridge_display_wm_info, latency,
                            &sandybridge_cursor_wm_info, latency,
                            &plane_wm, &cursor_wm)) {
-               I915_WRITE(WM0_PIPEA_ILK,
-                          (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
+               val = I915_READ(WM0_PIPEA_ILK);
+               val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
+               I915_WRITE(WM0_PIPEA_ILK, val |
+                          ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
                DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
                              " plane %d, " "cursor: %d\n",
                              plane_wm, cursor_wm);
@@ -4541,8 +4583,10 @@ void sandybridge_update_wm(struct drm_device *dev)
                            &sandybridge_display_wm_info, latency,
                            &sandybridge_cursor_wm_info, latency,
                            &plane_wm, &cursor_wm)) {
-               I915_WRITE(WM0_PIPEB_ILK,
-                          (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
+               val = I915_READ(WM0_PIPEB_ILK);
+               val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
+               I915_WRITE(WM0_PIPEB_ILK, val |
+                          ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
                DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
                              " plane %d, cursor: %d\n",
                              plane_wm, cursor_wm);
@@ -4555,8 +4599,10 @@ void sandybridge_update_wm(struct drm_device *dev)
                            &sandybridge_display_wm_info, latency,
                            &sandybridge_cursor_wm_info, latency,
                            &plane_wm, &cursor_wm)) {
-               I915_WRITE(WM0_PIPEC_IVB,
-                          (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
+               val = I915_READ(WM0_PIPEC_IVB);
+               val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
+               I915_WRITE(WM0_PIPEC_IVB, val |
+                          ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
                DRM_DEBUG_KMS("FIFO watermarks For pipe C -"
                              " plane %d, cursor: %d\n",
                              plane_wm, cursor_wm);
@@ -4700,6 +4746,7 @@ static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe,
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        int latency = SNB_READ_WM0_LATENCY() * 100;     /* In unit 0.1us */
+       u32 val;
        int sprite_wm, reg;
        int ret;
 
@@ -4726,7 +4773,9 @@ static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe,
                return;
        }
 
-       I915_WRITE(reg, I915_READ(reg) | (sprite_wm << WM0_PIPE_SPRITE_SHIFT));
+       val = I915_READ(reg);
+       val &= ~WM0_PIPE_SPRITE_MASK;
+       I915_WRITE(reg, val | (sprite_wm << WM0_PIPE_SPRITE_SHIFT));
        DRM_DEBUG_KMS("sprite watermarks For pipe %d - %d\n", pipe, sprite_wm);
 
 
@@ -4968,6 +5017,82 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
        return display_bpc != bpc;
 }
 
+static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors)
+{
+       struct drm_device *dev = crtc->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int refclk;
+
+       if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
+           intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
+               refclk = dev_priv->lvds_ssc_freq * 1000;
+               DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
+                             refclk / 1000);
+       } else if (!IS_GEN2(dev)) {
+               refclk = 96000;
+       } else {
+               refclk = 48000;
+       }
+
+       return refclk;
+}
+
+static void i9xx_adjust_sdvo_tv_clock(struct drm_display_mode *adjusted_mode,
+                                     intel_clock_t *clock)
+{
+       /* SDVO TV has fixed PLL values depend on its clock range,
+          this mirrors vbios setting. */
+       if (adjusted_mode->clock >= 100000
+           && adjusted_mode->clock < 140500) {
+               clock->p1 = 2;
+               clock->p2 = 10;
+               clock->n = 3;
+               clock->m1 = 16;
+               clock->m2 = 8;
+       } else if (adjusted_mode->clock >= 140500
+                  && adjusted_mode->clock <= 200000) {
+               clock->p1 = 1;
+               clock->p2 = 10;
+               clock->n = 6;
+               clock->m1 = 12;
+               clock->m2 = 8;
+       }
+}
+
+static void i9xx_update_pll_dividers(struct drm_crtc *crtc,
+                                    intel_clock_t *clock,
+                                    intel_clock_t *reduced_clock)
+{
+       struct drm_device *dev = crtc->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       int pipe = intel_crtc->pipe;
+       u32 fp, fp2 = 0;
+
+       if (IS_PINEVIEW(dev)) {
+               fp = (1 << clock->n) << 16 | clock->m1 << 8 | clock->m2;
+               if (reduced_clock)
+                       fp2 = (1 << reduced_clock->n) << 16 |
+                               reduced_clock->m1 << 8 | reduced_clock->m2;
+       } else {
+               fp = clock->n << 16 | clock->m1 << 8 | clock->m2;
+               if (reduced_clock)
+                       fp2 = reduced_clock->n << 16 | reduced_clock->m1 << 8 |
+                               reduced_clock->m2;
+       }
+
+       I915_WRITE(FP0(pipe), fp);
+
+       intel_crtc->lowfreq_avail = false;
+       if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
+           reduced_clock && i915_powersave) {
+               I915_WRITE(FP1(pipe), fp2);
+               intel_crtc->lowfreq_avail = true;
+       } else {
+               I915_WRITE(FP1(pipe), fp);
+       }
+}
+
 static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
                              struct drm_display_mode *mode,
                              struct drm_display_mode *adjusted_mode,
@@ -4981,7 +5106,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
        int plane = intel_crtc->plane;
        int refclk, num_connectors = 0;
        intel_clock_t clock, reduced_clock;
-       u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf;
+       u32 dpll, dspcntr, pipeconf;
        bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false;
        bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
        struct drm_mode_config *mode_config = &dev->mode_config;
@@ -5022,15 +5147,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
                num_connectors++;
        }
 
-       if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
-               refclk = dev_priv->lvds_ssc_freq * 1000;
-               DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
-                             refclk / 1000);
-       } else if (!IS_GEN2(dev)) {
-               refclk = 96000;
-       } else {
-               refclk = 48000;
-       }
+       refclk = i9xx_get_refclk(crtc, num_connectors);
 
        /*
         * Returns a set of divisors for the desired target clock with the given
@@ -5038,7 +5155,8 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
         * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
         */
        limit = intel_limit(crtc, refclk);
-       ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock);
+       ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL,
+                            &clock);
        if (!ok) {
                DRM_ERROR("Couldn't find PLL settings for mode!\n");
                return -EINVAL;
@@ -5048,53 +5166,24 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
        intel_crtc_update_cursor(crtc, true);
 
        if (is_lvds && dev_priv->lvds_downclock_avail) {
+               /*
+                * Ensure we match the reduced clock's P to the target clock.
+                * If the clocks don't match, we can't switch the display clock
+                * by using the FP0/FP1. In such case we will disable the LVDS
+                * downclock feature.
+               */
                has_reduced_clock = limit->find_pll(limit, crtc,
                                                    dev_priv->lvds_downclock,
                                                    refclk,
+                                                   &clock,
                                                    &reduced_clock);
-               if (has_reduced_clock && (clock.p != reduced_clock.p)) {
-                       /*
-                        * If the different P is found, it means that we can't
-                        * switch the display clock by using the FP0/FP1.
-                        * In such case we will disable the LVDS downclock
-                        * feature.
-                        */
-                       DRM_DEBUG_KMS("Different P is found for "
-                                     "LVDS clock/downclock\n");
-                       has_reduced_clock = 0;
-               }
-       }
-       /* SDVO TV has fixed PLL values depend on its clock range,
-          this mirrors vbios setting. */
-       if (is_sdvo && is_tv) {
-               if (adjusted_mode->clock >= 100000
-                   && adjusted_mode->clock < 140500) {
-                       clock.p1 = 2;
-                       clock.p2 = 10;
-                       clock.n = 3;
-                       clock.m1 = 16;
-                       clock.m2 = 8;
-               } else if (adjusted_mode->clock >= 140500
-                          && adjusted_mode->clock <= 200000) {
-                       clock.p1 = 1;
-                       clock.p2 = 10;
-                       clock.n = 6;
-                       clock.m1 = 12;
-                       clock.m2 = 8;
-               }
        }
 
-       if (IS_PINEVIEW(dev)) {
-               fp = (1 << clock.n) << 16 | clock.m1 << 8 | clock.m2;
-               if (has_reduced_clock)
-                       fp2 = (1 << reduced_clock.n) << 16 |
-                               reduced_clock.m1 << 8 | reduced_clock.m2;
-       } else {
-               fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
-               if (has_reduced_clock)
-                       fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
-                               reduced_clock.m2;
-       }
+       if (is_sdvo && is_tv)
+               i9xx_adjust_sdvo_tv_clock(adjusted_mode, &clock);
+
+       i9xx_update_pll_dividers(crtc, &clock, has_reduced_clock ?
+                                &reduced_clock : NULL);
 
        dpll = DPLL_VGA_MODE_DIS;
 
@@ -5168,8 +5257,6 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
        /* Set up the display plane register */
        dspcntr = DISPPLANE_GAMMA_ENABLE;
 
-       /* Ironlake's plane is forced to pipe, bit 24 is to
-          enable color space conversion */
        if (pipe == 0)
                dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
        else
@@ -5204,7 +5291,6 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
        DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
        drm_mode_debug_printmodeline(mode);
 
-       I915_WRITE(FP0(pipe), fp);
        I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
 
        POSTING_READ(DPLL(pipe));
@@ -5291,17 +5377,11 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
                I915_WRITE(DPLL(pipe), dpll);
        }
 
-       intel_crtc->lowfreq_avail = false;
-       if (is_lvds && has_reduced_clock && i915_powersave) {
-               I915_WRITE(FP1(pipe), fp2);
-               intel_crtc->lowfreq_avail = true;
-               if (HAS_PIPE_CXSR(dev)) {
+       if (HAS_PIPE_CXSR(dev)) {
+               if (intel_crtc->lowfreq_avail) {
                        DRM_DEBUG_KMS("enabling CxSR downclocking\n");
                        pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
-               }
-       } else {
-               I915_WRITE(FP1(pipe), fp);
-               if (HAS_PIPE_CXSR(dev)) {
+               } else {
                        DRM_DEBUG_KMS("disabling CxSR downclocking\n");
                        pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
                }
@@ -5584,7 +5664,8 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
         * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
         */
        limit = intel_limit(crtc, refclk);
-       ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock);
+       ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL,
+                            &clock);
        if (!ok) {
                DRM_ERROR("Couldn't find PLL settings for mode!\n");
                return -EINVAL;
@@ -5594,21 +5675,17 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
        intel_crtc_update_cursor(crtc, true);
 
        if (is_lvds && dev_priv->lvds_downclock_avail) {
+               /*
+                * Ensure we match the reduced clock's P to the target clock.
+                * If the clocks don't match, we can't switch the display clock
+                * by using the FP0/FP1. In such case we will disable the LVDS
+                * downclock feature.
+               */
                has_reduced_clock = limit->find_pll(limit, crtc,
                                                    dev_priv->lvds_downclock,
                                                    refclk,
+                                                   &clock,
                                                    &reduced_clock);
-               if (has_reduced_clock && (clock.p != reduced_clock.p)) {
-                       /*
-                        * If the different P is found, it means that we can't
-                        * switch the display clock by using the FP0/FP1.
-                        * In such case we will disable the LVDS downclock
-                        * feature.
-                        */
-                       DRM_DEBUG_KMS("Different P is found for "
-                                     "LVDS clock/downclock\n");
-                       has_reduced_clock = 0;
-               }
        }
        /* SDVO TV has fixed PLL values depend on its clock range,
           this mirrors vbios setting. */
@@ -5957,12 +6034,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
 
        intel_wait_for_vblank(dev, pipe);
 
-       if (IS_GEN5(dev)) {
-               /* enable address swizzle for tiling buffer */
-               temp = I915_READ(DISP_ARB_CTL);
-               I915_WRITE(DISP_ARB_CTL, temp | DISP_TILE_SURFACE_SWIZZLING);
-       }
-
        I915_WRITE(DSPCNTR(plane), dspcntr);
        POSTING_READ(DSPCNTR(plane));
 
@@ -6077,15 +6148,18 @@ static void ironlake_write_eld(struct drm_connector *connector,
        uint32_t i;
        int len;
        int hdmiw_hdmiedid;
+       int aud_config;
        int aud_cntl_st;
        int aud_cntrl_st2;
 
        if (HAS_PCH_IBX(connector->dev)) {
                hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID_A;
+               aud_config = IBX_AUD_CONFIG_A;
                aud_cntl_st = IBX_AUD_CNTL_ST_A;
                aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
        } else {
                hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID_A;
+               aud_config = CPT_AUD_CONFIG_A;
                aud_cntl_st = CPT_AUD_CNTL_ST_A;
                aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
        }
@@ -6093,6 +6167,7 @@ static void ironlake_write_eld(struct drm_connector *connector,
        i = to_intel_crtc(crtc)->pipe;
        hdmiw_hdmiedid += i * 0x100;
        aud_cntl_st += i * 0x100;
+       aud_config += i * 0x100;
 
        DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(i));
 
@@ -6112,7 +6187,9 @@ static void ironlake_write_eld(struct drm_connector *connector,
        if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
                DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
                eld[5] |= (1 << 2);     /* Conn_Type, 0x1 = DisplayPort */
-       }
+               I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
+       } else
+               I915_WRITE(aud_config, 0);
 
        if (intel_eld_uptodate(connector,
                               aud_cntrl_st2, eldv,
@@ -7088,7 +7165,7 @@ static void intel_unpin_work_fn(struct work_struct *__work)
                container_of(__work, struct intel_unpin_work, work);
 
        mutex_lock(&work->dev->struct_mutex);
-       i915_gem_object_unpin(work->old_fb_obj);
+       intel_unpin_fb_obj(work->old_fb_obj);
        drm_gem_object_unreference(&work->pending_flip_obj->base);
        drm_gem_object_unreference(&work->old_fb_obj->base);
 
@@ -7238,7 +7315,7 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
                 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
        OUT_RING(fb->pitches[0]);
        OUT_RING(obj->gtt_offset + offset);
-       OUT_RING(MI_NOOP);
+       OUT_RING(0); /* aux display base address, unused */
        ADVANCE_LP_RING();
 out:
        return ret;
@@ -7830,7 +7907,8 @@ int intel_framebuffer_init(struct drm_device *dev,
        case DRM_FORMAT_VYUY:
                break;
        default:
-               DRM_ERROR("unsupported pixel format\n");
+               DRM_DEBUG_KMS("unsupported pixel format %u\n",
+                               mode_cmd->pixel_format);
                return -EINVAL;
        }
 
index 94f860cce3f748f4c89a080cc32b561cfd0fae40..39eccf908a69973738d3570455123e56803611b7 100644 (file)
@@ -352,7 +352,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
        int recv_bytes;
        uint32_t status;
        uint32_t aux_clock_divider;
-       int try, precharge;
+       int try, precharge = 5;
 
        intel_dp_check_edp(intel_dp);
        /* The clock divider is based off the hrawclk,
@@ -368,15 +368,10 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
                else
                        aux_clock_divider = 225; /* eDP input clock at 450Mhz */
        } else if (HAS_PCH_SPLIT(dev))
-               aux_clock_divider = 62; /* IRL input clock fixed at 125Mhz */
+               aux_clock_divider = 63; /* IRL input clock fixed at 125Mhz */
        else
                aux_clock_divider = intel_hrawclk(dev) / 2;
 
-       if (IS_GEN6(dev))
-               precharge = 3;
-       else
-               precharge = 5;
-
        /* Try to wait for any previous AUX channel activity */
        for (try = 0; try < 3; try++) {
                status = I915_READ(ch_ctl);
@@ -421,6 +416,10 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
                           DP_AUX_CH_CTL_DONE |
                           DP_AUX_CH_CTL_TIME_OUT_ERROR |
                           DP_AUX_CH_CTL_RECEIVE_ERROR);
+
+               if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR |
+                             DP_AUX_CH_CTL_RECEIVE_ERROR))
+                       continue;
                if (status & DP_AUX_CH_CTL_DONE)
                        break;
        }
index 1348705faf6bfbe5abf4caf0718107fa9ff85ee3..9cec6c3937faef852f036ee5134cec067367e52d 100644 (file)
@@ -374,6 +374,7 @@ extern void intel_init_emon(struct drm_device *dev);
 extern int intel_pin_and_fence_fb_obj(struct drm_device *dev,
                                      struct drm_i915_gem_object *obj,
                                      struct intel_ring_buffer *pipelined);
+extern void intel_unpin_fb_obj(struct drm_i915_gem_object *obj);
 
 extern int intel_framebuffer_init(struct drm_device *dev,
                                  struct intel_framebuffer *ifb,
index cdf17d4cc1f79d111e67ad66d4a8a64ce9c8c7bd..23a543cdfa99286cf62348ed7d3df0f7ed95bdb6 100644 (file)
@@ -227,7 +227,8 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
        }
        overlay->last_flip_req = request->seqno;
        overlay->flip_tail = tail;
-       ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req);
+       ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req,
+                               true);
        if (ret)
                return ret;
 
@@ -448,7 +449,8 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
        if (overlay->last_flip_req == 0)
                return 0;
 
-       ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req);
+       ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req,
+                               true);
        if (ret)
                return ret;
 
index 04d79fd1dc9d9e1477ee2592ac0fb2212883e180..c935cdaa2154facc89b69c35cc49032192702a5f 100644 (file)
@@ -141,8 +141,8 @@ static u32 i915_read_blc_pwm_ctl(struct drm_i915_private *dev_priv)
                        dev_priv->saveBLC_PWM_CTL2 = val;
                } else if (val == 0) {
                        I915_WRITE(BLC_PWM_PCH_CTL2,
-                                  dev_priv->saveBLC_PWM_CTL);
-                       val = dev_priv->saveBLC_PWM_CTL;
+                                  dev_priv->saveBLC_PWM_CTL2);
+                       val = dev_priv->saveBLC_PWM_CTL2;
                }
        } else {
                val = I915_READ(BLC_PWM_CTL);
index 1ab842c6032e949a37855a3995aa161d9f276977..4956f1bff5226e697f0ed63a94d80d57f20c6977 100644 (file)
@@ -399,8 +399,6 @@ static int init_render_ring(struct intel_ring_buffer *ring)
 
        if (INTEL_INFO(dev)->gen > 3) {
                int mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH;
-               if (IS_GEN6(dev) || IS_GEN7(dev))
-                       mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE;
                I915_WRITE(MI_MODE, mode);
                if (IS_GEN7(dev))
                        I915_WRITE(GFX_MODE_GEN7,
@@ -744,13 +742,13 @@ void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
         */
        if (IS_GEN7(dev)) {
                switch (ring->id) {
-               case RING_RENDER:
+               case RCS:
                        mmio = RENDER_HWS_PGA_GEN7;
                        break;
-               case RING_BLT:
+               case BCS:
                        mmio = BLT_HWS_PGA_GEN7;
                        break;
-               case RING_BSD:
+               case VCS:
                        mmio = BSD_HWS_PGA_GEN7;
                        break;
                }
@@ -1212,7 +1210,7 @@ void intel_ring_advance(struct intel_ring_buffer *ring)
 
 static const struct intel_ring_buffer render_ring = {
        .name                   = "render ring",
-       .id                     = RING_RENDER,
+       .id                     = RCS,
        .mmio_base              = RENDER_RING_BASE,
        .size                   = 32 * PAGE_SIZE,
        .init                   = init_render_ring,
@@ -1235,7 +1233,7 @@ static const struct intel_ring_buffer render_ring = {
 
 static const struct intel_ring_buffer bsd_ring = {
        .name                   = "bsd ring",
-       .id                     = RING_BSD,
+       .id                     = VCS,
        .mmio_base              = BSD_RING_BASE,
        .size                   = 32 * PAGE_SIZE,
        .init                   = init_ring_common,
@@ -1345,7 +1343,7 @@ gen6_bsd_ring_put_irq(struct intel_ring_buffer *ring)
 /* ring buffer for Video Codec for Gen6+ */
 static const struct intel_ring_buffer gen6_bsd_ring = {
        .name                   = "gen6 bsd ring",
-       .id                     = RING_BSD,
+       .id                     = VCS,
        .mmio_base              = GEN6_BSD_RING_BASE,
        .size                   = 32 * PAGE_SIZE,
        .init                   = init_ring_common,
@@ -1381,79 +1379,13 @@ blt_ring_put_irq(struct intel_ring_buffer *ring)
                          GEN6_BLITTER_USER_INTERRUPT);
 }
 
-
-/* Workaround for some stepping of SNB,
- * each time when BLT engine ring tail moved,
- * the first command in the ring to be parsed
- * should be MI_BATCH_BUFFER_START
- */
-#define NEED_BLT_WORKAROUND(dev) \
-       (IS_GEN6(dev) && (dev->pdev->revision < 8))
-
-static inline struct drm_i915_gem_object *
-to_blt_workaround(struct intel_ring_buffer *ring)
-{
-       return ring->private;
-}
-
-static int blt_ring_init(struct intel_ring_buffer *ring)
-{
-       if (NEED_BLT_WORKAROUND(ring->dev)) {
-               struct drm_i915_gem_object *obj;
-               u32 *ptr;
-               int ret;
-
-               obj = i915_gem_alloc_object(ring->dev, 4096);
-               if (obj == NULL)
-                       return -ENOMEM;
-
-               ret = i915_gem_object_pin(obj, 4096, true);
-               if (ret) {
-                       drm_gem_object_unreference(&obj->base);
-                       return ret;
-               }
-
-               ptr = kmap(obj->pages[0]);
-               *ptr++ = MI_BATCH_BUFFER_END;
-               *ptr++ = MI_NOOP;
-               kunmap(obj->pages[0]);
-
-               ret = i915_gem_object_set_to_gtt_domain(obj, false);
-               if (ret) {
-                       i915_gem_object_unpin(obj);
-                       drm_gem_object_unreference(&obj->base);
-                       return ret;
-               }
-
-               ring->private = obj;
-       }
-
-       return init_ring_common(ring);
-}
-
-static int blt_ring_begin(struct intel_ring_buffer *ring,
-                         int num_dwords)
-{
-       if (ring->private) {
-               int ret = intel_ring_begin(ring, num_dwords+2);
-               if (ret)
-                       return ret;
-
-               intel_ring_emit(ring, MI_BATCH_BUFFER_START);
-               intel_ring_emit(ring, to_blt_workaround(ring)->gtt_offset);
-
-               return 0;
-       } else
-               return intel_ring_begin(ring, 4);
-}
-
 static int blt_ring_flush(struct intel_ring_buffer *ring,
                          u32 invalidate, u32 flush)
 {
        uint32_t cmd;
        int ret;
 
-       ret = blt_ring_begin(ring, 4);
+       ret = intel_ring_begin(ring, 4);
        if (ret)
                return ret;
 
@@ -1468,22 +1400,12 @@ static int blt_ring_flush(struct intel_ring_buffer *ring,
        return 0;
 }
 
-static void blt_ring_cleanup(struct intel_ring_buffer *ring)
-{
-       if (!ring->private)
-               return;
-
-       i915_gem_object_unpin(ring->private);
-       drm_gem_object_unreference(ring->private);
-       ring->private = NULL;
-}
-
 static const struct intel_ring_buffer gen6_blt_ring = {
        .name                   = "blt ring",
-       .id                     = RING_BLT,
+       .id                     = BCS,
        .mmio_base              = BLT_RING_BASE,
        .size                   = 32 * PAGE_SIZE,
-       .init                   = blt_ring_init,
+       .init                   = init_ring_common,
        .write_tail             = ring_write_tail,
        .flush                  = blt_ring_flush,
        .add_request            = gen6_add_request,
@@ -1491,7 +1413,6 @@ static const struct intel_ring_buffer gen6_blt_ring = {
        .irq_get                = blt_ring_get_irq,
        .irq_put                = blt_ring_put_irq,
        .dispatch_execbuffer    = gen6_ring_dispatch_execbuffer,
-       .cleanup                = blt_ring_cleanup,
        .sync_to                = gen6_blt_ring_sync_to,
        .semaphore_register     = {MI_SEMAPHORE_SYNC_BR,
                                   MI_SEMAPHORE_SYNC_BV,
index 68281c96c558c9b3c9ec8a8624080c70c3ff5213..c8b9cc0cd0dc438eb248ee6421250b1313fa9d14 100644 (file)
@@ -1,13 +1,6 @@
 #ifndef _INTEL_RINGBUFFER_H_
 #define _INTEL_RINGBUFFER_H_
 
-enum {
-       RCS = 0x0,
-       VCS,
-       BCS,
-       I915_NUM_RINGS,
-};
-
 struct  intel_hw_status_page {
        u32     __iomem *page_addr;
        unsigned int    gfx_addr;
@@ -36,10 +29,11 @@ struct  intel_hw_status_page {
 struct  intel_ring_buffer {
        const char      *name;
        enum intel_ring_id {
-               RING_RENDER = 0x1,
-               RING_BSD = 0x2,
-               RING_BLT = 0x4,
+               RCS = 0x0,
+               VCS,
+               BCS,
        } id;
+#define I915_NUM_RINGS 3
        u32             mmio_base;
        void            __iomem *virtual_start;
        struct          drm_device *dev;
@@ -119,6 +113,12 @@ struct  intel_ring_buffer {
        void *private;
 };
 
+static inline unsigned
+intel_ring_flag(struct intel_ring_buffer *ring)
+{
+       return 1 << ring->id;
+}
+
 static inline u32
 intel_ring_sync_index(struct intel_ring_buffer *ring,
                      struct intel_ring_buffer *other)
index 2288abf88cce4e3420bbedc379747480aa8843e8..98444ab68bc367243f6cc0c593e39bbbe1fdad76 100644 (file)
@@ -501,7 +501,7 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
                        intel_wait_for_vblank(dev, to_intel_crtc(crtc)->pipe);
                        mutex_lock(&dev->struct_mutex);
                }
-               i915_gem_object_unpin(old_obj);
+               intel_unpin_fb_obj(old_obj);
        }
 
 out_unlock:
@@ -528,7 +528,7 @@ intel_disable_plane(struct drm_plane *plane)
                goto out;
 
        mutex_lock(&dev->struct_mutex);
-       i915_gem_object_unpin(intel_plane->obj);
+       intel_unpin_fb_obj(intel_plane->obj);
        intel_plane->obj = NULL;
        mutex_unlock(&dev->struct_mutex);
 out:
index 924f6a454fed97d9b84dbbbbdfc32751b82759db..da929bb5b7886e3387cd130ab3b0ee5aac2abb71 100644 (file)
@@ -296,6 +296,7 @@ typedef struct drm_i915_irq_wait {
 #define I915_PARAM_HAS_EXEC_CONSTANTS   14
 #define I915_PARAM_HAS_RELAXED_DELTA    15
 #define I915_PARAM_HAS_GEN7_SOL_RESET   16
+#define I915_PARAM_HAS_LLC              17
 
 typedef struct drm_i915_getparam {
        int param;
index b174620cc9b3a7a228234e56b9355b92a02cb479..0a0001b9dc7853e5fb0d84b1e3b6950429afe7f8 100644 (file)
@@ -15,6 +15,10 @@ const struct intel_gtt {
        unsigned int needs_dmar : 1;
        /* Whether we idle the gpu before mapping/unmapping */
        unsigned int do_idle_maps : 1;
+       /* Share the scratch page dma with ppgtts. */
+       dma_addr_t scratch_page_dma;
+       /* for ppgtt PDE access */
+       u32 __iomem *gtt;
 } *intel_gtt_get(void);
 
 void intel_gtt_chipset_flush(void);