]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/commitdiff
drm/i915: make mappable struct resource centric
authorMatthew Auld <matthew.auld@intel.com>
Tue, 11 Jun 2019 08:33:00 +0000 (10:33 +0200)
committerKleber Sacilotto de Souza <kleber.souza@canonical.com>
Mon, 24 Jun 2019 14:21:33 +0000 (16:21 +0200)
Now that we are using struct resource to track the stolen region, it is
more convenient if we track the mappable region in a resource as well.

v2: prefer iomap and gmadr naming scheme
    prefer DEFINE_RES_MEM

CVE-2019-11085

Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Paulo Zanoni <paulo.r.zanoni@intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20171211151822.20953-8-matthew.auld@intel.com
(backported from commit 73ebd503034c1abe31137df02dd4493eb7a522d4)
Signed-off-by: Andrea Righi <andrea.righi@canonical.com>
Acked-by: Stefan Bader <stefan.bader@canonical.com>
Acked-by: Timo Aaltonen <tjaalton@ubuntu.com>
Signed-off-by: Stefan Bader <stefan.bader@canonical.com>
12 files changed:
drivers/gpu/drm/i915/gvt/gvt.h
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/i915_gem_gtt.c
drivers/gpu/drm/i915/i915_gem_gtt.h
drivers/gpu/drm/i915/i915_gpu_error.c
drivers/gpu/drm/i915/i915_vma.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_overlay.c
drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
drivers/gpu/drm/i915/selftests/mock_gtt.c

index 9c2e7c0aa38fb68670a26789d4efcf58aa42b607..33af72acb6b2d2d73f32d1404c2fc5b5c3d08eea 100644 (file)
@@ -316,7 +316,7 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt);
 
 /* Aperture/GM space definitions for GVT device */
 #define gvt_aperture_sz(gvt)     (gvt->dev_priv->ggtt.mappable_end)
-#define gvt_aperture_pa_base(gvt) (gvt->dev_priv->ggtt.mappable_base)
+#define gvt_aperture_pa_base(gvt) (gvt->dev_priv->ggtt.gmadr.start)
 
 #define gvt_ggtt_gm_sz(gvt)      (gvt->dev_priv->ggtt.base.total)
 #define gvt_ggtt_sz(gvt) \
index 10ff0e2ffb8d008d3ff656f98f01d1cf25404c6b..40e3c3da5207e79ecd0b1b2b8de06b166038f00a 100644 (file)
@@ -724,7 +724,7 @@ static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
        if (!ap)
                return -ENOMEM;
 
-       ap->ranges[0].base = ggtt->mappable_base;
+       ap->ranges[0].base = ggtt->gmadr.start;
        ap->ranges[0].size = ggtt->mappable_end;
 
        primary =
index 5cfba89ed586391409cf02459344b668c9c181bc..2945232cd29a36ed39071660ac3593544876e586 100644 (file)
@@ -1099,7 +1099,7 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
                        page_base += offset & PAGE_MASK;
                }
 
-               if (gtt_user_read(&ggtt->mappable, page_base, page_offset,
+               if (gtt_user_read(&ggtt->iomap, page_base, page_offset,
                                  user_data, page_length)) {
                        ret = -EFAULT;
                        break;
@@ -1307,7 +1307,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
                 * If the object is non-shmem backed, we retry again with the
                 * path that handles page fault.
                 */
-               if (ggtt_write(&ggtt->mappable, page_base, page_offset,
+               if (ggtt_write(&ggtt->iomap, page_base, page_offset,
                               user_data, page_length)) {
                        ret = -EFAULT;
                        break;
@@ -1936,9 +1936,9 @@ int i915_gem_fault(struct vm_fault *vmf)
        /* Finally, remap it using the new GTT offset */
        ret = remap_io_mapping(area,
                               area->vm_start + (vma->ggtt_view.partial.offset << PAGE_SHIFT),
-                              (ggtt->mappable_base + vma->node.start) >> PAGE_SHIFT,
+                              (ggtt->gmadr.start + vma->node.start) >> PAGE_SHIFT,
                               min_t(u64, vma->size, area->vm_end - area->vm_start),
-                              &ggtt->mappable);
+                              &ggtt->iomap);
        if (ret)
                goto err_fence;
 
index d26aea4352c1cd6e3eef6d0db712d79d4158829a..0d7c1e19a4e32f761a2dc82448bd9753bdfc510d 100644 (file)
@@ -1014,7 +1014,7 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
                offset += page << PAGE_SHIFT;
        }
 
-       vaddr = (void __force *)io_mapping_map_atomic_wc(&ggtt->mappable,
+       vaddr = (void __force *)io_mapping_map_atomic_wc(&ggtt->iomap,
                                                         offset);
        cache->page = page;
        cache->vaddr = (unsigned long)vaddr;
index 2af65ecf2df84e8a26fe694f8e115d9e8e7d3671..5dc06c6e532666d2852baef0816d906c6c46c8a6 100644 (file)
@@ -2877,7 +2877,7 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
        mutex_unlock(&dev_priv->drm.struct_mutex);
 
        arch_phys_wc_del(ggtt->mtrr);
-       io_mapping_fini(&ggtt->mappable);
+       io_mapping_fini(&ggtt->iomap);
 }
 
 static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
@@ -3303,8 +3303,10 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
        int err;
 
        /* TODO: We're not aware of mappable constraints on gen8 yet */
-       ggtt->mappable_base = pci_resource_start(pdev, 2);
-       ggtt->mappable_end = pci_resource_len(pdev, 2);
+       ggtt->gmadr =
+               (struct resource) DEFINE_RES_MEM(pci_resource_start(pdev, 2),
+                                                pci_resource_len(pdev, 2));
+       ggtt->mappable_end = resource_size(&ggtt->gmadr);
 
        err = pci_set_dma_mask(pdev, DMA_BIT_MASK(39));
        if (!err)
@@ -3361,8 +3363,10 @@ static int gen6_gmch_probe(struct i915_ggtt *ggtt)
        u16 snb_gmch_ctl;
        int err;
 
-       ggtt->mappable_base = pci_resource_start(pdev, 2);
-       ggtt->mappable_end = pci_resource_len(pdev, 2);
+       ggtt->gmadr =
+               (struct resource) DEFINE_RES_MEM(pci_resource_start(pdev, 2),
+                                                pci_resource_len(pdev, 2));
+       ggtt->mappable_end = resource_size(&ggtt->gmadr);
 
        /* 64/512MB is the current min/max we actually know of, but this is just
         * a coarse sanity check.
@@ -3417,6 +3421,7 @@ static void i915_gmch_remove(struct i915_address_space *vm)
 static int i915_gmch_probe(struct i915_ggtt *ggtt)
 {
        struct drm_i915_private *dev_priv = ggtt->base.i915;
+       phys_addr_t gmadr_base;
        int ret;
 
        ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->drm.pdev, NULL);
@@ -3427,9 +3432,13 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt)
 
        intel_gtt_get(&ggtt->base.total,
                      &ggtt->stolen_size,
-                     &ggtt->mappable_base,
+                     &gmadr_base,
                      &ggtt->mappable_end);
 
+       ggtt->gmadr =
+               (struct resource) DEFINE_RES_MEM(gmadr_base,
+                                                ggtt->mappable_end);
+
        ggtt->do_idle_maps = needs_idle_maps(dev_priv);
        ggtt->base.insert_page = i915_ggtt_insert_page;
        ggtt->base.insert_entries = i915_ggtt_insert_entries;
@@ -3497,7 +3506,7 @@ int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv)
        /* GMADR is the PCI mmio aperture into the global GTT. */
        DRM_INFO("Memory usable by graphics device = %lluM\n",
                 ggtt->base.total >> 20);
-       DRM_DEBUG_DRIVER("GMADR size = %lldM\n", ggtt->mappable_end >> 20);
+       DRM_DEBUG_DRIVER("GMADR size = %lluM\n", (u64)ggtt->mappable_end >> 20);
        DRM_DEBUG_DRIVER("GTT stolen size = %uM\n", ggtt->stolen_size >> 20);
        if (intel_vtd_active())
                DRM_INFO("VT-d active for gfx access\n");
@@ -3527,14 +3536,14 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
                ggtt->base.mm.color_adjust = i915_gtt_color_adjust;
        mutex_unlock(&dev_priv->drm.struct_mutex);
 
-       if (!io_mapping_init_wc(&dev_priv->ggtt.mappable,
-                               dev_priv->ggtt.mappable_base,
+       if (!io_mapping_init_wc(&dev_priv->ggtt.iomap,
+                               dev_priv->ggtt.gmadr.start,
                                dev_priv->ggtt.mappable_end)) {
                ret = -EIO;
                goto out_gtt_cleanup;
        }
 
-       ggtt->mtrr = arch_phys_wc_add(ggtt->mappable_base, ggtt->mappable_end);
+       ggtt->mtrr = arch_phys_wc_add(ggtt->gmadr.start, ggtt->mappable_end);
 
        /*
         * Initialise stolen early so that we may reserve preallocated
index 93211a96fdadd2ec10962f6fac34a160ad311433..86a494d9c5ab2ec84f28e2d65589f324f8c1ce02 100644 (file)
@@ -368,9 +368,9 @@ i915_vm_has_scratch_64K(struct i915_address_space *vm)
  */
 struct i915_ggtt {
        struct i915_address_space base;
-       struct io_mapping mappable;     /* Mapping to our CPU mappable region */
 
-       phys_addr_t mappable_base;      /* PA of our GMADR */
+       struct io_mapping iomap;        /* Mapping to our CPU mappable region */
+       struct resource gmadr;          /* GMADR resource */
        u64 mappable_end;               /* End offset that we can CPU map */
 
        /* Stolen memory is segmented in hardware with different portions
index 653fb69e7ecb0ab038069681f72d5f5c32b1e220..5f85ea4beb4e89afe0f83f772bd8287bdae7c3af 100644 (file)
@@ -912,7 +912,7 @@ i915_error_object_create(struct drm_i915_private *i915,
                ggtt->base.insert_page(&ggtt->base, dma, slot,
                                       I915_CACHE_NONE, 0);
 
-               s = io_mapping_map_atomic_wc(&ggtt->mappable, slot);
+               s = io_mapping_map_atomic_wc(&ggtt->iomap, slot);
                ret = compress_page(&compress, (void  __force *)s, dst);
                io_mapping_unmap_atomic(s);
 
index fbfab2f3302326cafdfbd7443e76970ab68f5c7a..1f14c76b5e35cb705b2d1f3eebbc8c3d42f01adb 100644 (file)
@@ -305,7 +305,7 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
 
        ptr = vma->iomap;
        if (ptr == NULL) {
-               ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->mappable,
+               ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap,
                                        vma->node.start,
                                        vma->node.size);
                if (ptr == NULL) {
index 8544de5ba687a23ae0a84be6cd5325fe7bdbcbc2..aebd04f30046c44e8dbfd466aa6f0b922a684c2a 100644 (file)
@@ -14565,7 +14565,7 @@ int intel_modeset_init(struct drm_device *dev)
                dev->mode_config.cursor_height = MAX_CURSOR_HEIGHT;
        }
 
-       dev->mode_config.fb_base = ggtt->mappable_base;
+       dev->mode_config.fb_base = ggtt->gmadr.start;
 
        DRM_DEBUG_KMS("%d display pipe%s available.\n",
                      INTEL_INFO(dev_priv)->num_pipes,
index 1b397b41cb4fc17efad5aeeafab1d9e8c9cd05e9..41e9465d44a8128c0463d024be40066212819d79 100644 (file)
@@ -219,7 +219,7 @@ intel_overlay_map_regs(struct intel_overlay *overlay)
        if (OVERLAY_NEEDS_PHYSICAL(dev_priv))
                regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_handle->vaddr;
        else
-               regs = io_mapping_map_wc(&dev_priv->ggtt.mappable,
+               regs = io_mapping_map_wc(&dev_priv->ggtt.iomap,
                                         overlay->flip_addr,
                                         PAGE_SIZE);
 
@@ -1508,7 +1508,7 @@ intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
                regs = (struct overlay_registers __iomem *)
                        overlay->reg_bo->phys_handle->vaddr;
        else
-               regs = io_mapping_map_atomic_wc(&dev_priv->ggtt.mappable,
+               regs = io_mapping_map_atomic_wc(&dev_priv->ggtt.iomap,
                                                overlay->flip_addr);
 
        return regs;
index 9da0c9f999167a62821a6c05ff7cdf831204426e..abfbafa765fc1059c52aa1f106721026af4a6d88 100644 (file)
@@ -1055,7 +1055,7 @@ static int igt_ggtt_page(void *arg)
                                       i915_gem_object_get_dma_address(obj, 0),
                                       offset, I915_CACHE_NONE, 0);
 
-               vaddr = io_mapping_map_atomic_wc(&ggtt->mappable, offset);
+               vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset);
                iowrite32(n, vaddr + n);
                io_mapping_unmap_atomic(vaddr);
 
@@ -1073,7 +1073,7 @@ static int igt_ggtt_page(void *arg)
                                       i915_gem_object_get_dma_address(obj, 0),
                                       offset, I915_CACHE_NONE, 0);
 
-               vaddr = io_mapping_map_atomic_wc(&ggtt->mappable, offset);
+               vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset);
                val = ioread32(vaddr + n);
                io_mapping_unmap_atomic(vaddr);
 
index 336e1afb250f68147da22be96e8d9b87a3e76595..e96873f96116ecddabfac25b1804b39be9daf2b4 100644 (file)
@@ -110,8 +110,8 @@ void mock_init_ggtt(struct drm_i915_private *i915)
 
        ggtt->base.i915 = i915;
 
-       ggtt->mappable_base = 0;
-       ggtt->mappable_end = 2048 * PAGE_SIZE;
+       ggtt->gmadr = (struct resource) DEFINE_RES_MEM(0, 2048 * PAGE_SIZE);
+       ggtt->mappable_end = resource_size(&ggtt->gmadr);
        ggtt->base.total = 4096 * PAGE_SIZE;
 
        ggtt->base.clear_range = nop_clear_range;